tcp: annotate sk->sk_rcvbuf lockless reads
[linux-2.6-block.git] / net / core / sock.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Generic socket support routines. Memory allocators, socket lock/release
8 * handler for protocols to use and generic option handler.
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
4ec93edb 35 * code. The ACK stuff can wait and needs major
1da177e4
LT
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
1da177e4
LT
84 */
85
e005d193
JP
86#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
87
80b14dee 88#include <asm/unaligned.h>
4fc268d2 89#include <linux/capability.h>
1da177e4 90#include <linux/errno.h>
cb820f8e 91#include <linux/errqueue.h>
1da177e4
LT
92#include <linux/types.h>
93#include <linux/socket.h>
94#include <linux/in.h>
95#include <linux/kernel.h>
1da177e4
LT
96#include <linux/module.h>
97#include <linux/proc_fs.h>
98#include <linux/seq_file.h>
99#include <linux/sched.h>
f1083048 100#include <linux/sched/mm.h>
1da177e4
LT
101#include <linux/timer.h>
102#include <linux/string.h>
103#include <linux/sockios.h>
104#include <linux/net.h>
105#include <linux/mm.h>
106#include <linux/slab.h>
107#include <linux/interrupt.h>
108#include <linux/poll.h>
109#include <linux/tcp.h>
110#include <linux/init.h>
a1f8e7f7 111#include <linux/highmem.h>
3f551f94 112#include <linux/user_namespace.h>
c5905afb 113#include <linux/static_key.h>
3969eb38 114#include <linux/memcontrol.h>
8c1ae10d 115#include <linux/prefetch.h>
1da177e4 116
7c0f6ba6 117#include <linux/uaccess.h>
1da177e4
LT
118
119#include <linux/netdevice.h>
120#include <net/protocol.h>
121#include <linux/skbuff.h>
457c4cbc 122#include <net/net_namespace.h>
2e6599cb 123#include <net/request_sock.h>
1da177e4 124#include <net/sock.h>
20d49473 125#include <linux/net_tstamp.h>
1da177e4
LT
126#include <net/xfrm.h>
127#include <linux/ipsec.h>
f8451725 128#include <net/cls_cgroup.h>
5bc1421e 129#include <net/netprio_cgroup.h>
eb4cb008 130#include <linux/sock_diag.h>
1da177e4
LT
131
132#include <linux/filter.h>
538950a1 133#include <net/sock_reuseport.h>
6ac99e8f 134#include <net/bpf_sk_storage.h>
1da177e4 135
3847ce32
SM
136#include <trace/events/sock.h>
137
1da177e4 138#include <net/tcp.h>
076bb0c8 139#include <net/busy_poll.h>
06021292 140
36b77a52 141static DEFINE_MUTEX(proto_list_mutex);
d1a4c0b3
GC
142static LIST_HEAD(proto_list);
143
648845ab
TZ
144static void sock_inuse_add(struct net *net, int val);
145
a3b299da
EB
146/**
147 * sk_ns_capable - General socket capability test
148 * @sk: Socket to use a capability on or through
149 * @user_ns: The user namespace of the capability to use
150 * @cap: The capability to use
151 *
152 * Test to see if the opener of the socket had when the socket was
153 * created and the current process has the capability @cap in the user
154 * namespace @user_ns.
155 */
156bool sk_ns_capable(const struct sock *sk,
157 struct user_namespace *user_ns, int cap)
158{
159 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
160 ns_capable(user_ns, cap);
161}
162EXPORT_SYMBOL(sk_ns_capable);
163
164/**
165 * sk_capable - Socket global capability test
166 * @sk: Socket to use a capability on or through
e793c0f7 167 * @cap: The global capability to use
a3b299da
EB
168 *
169 * Test to see if the opener of the socket had when the socket was
170 * created and the current process has the capability @cap in all user
171 * namespaces.
172 */
173bool sk_capable(const struct sock *sk, int cap)
174{
175 return sk_ns_capable(sk, &init_user_ns, cap);
176}
177EXPORT_SYMBOL(sk_capable);
178
179/**
180 * sk_net_capable - Network namespace socket capability test
181 * @sk: Socket to use a capability on or through
182 * @cap: The capability to use
183 *
e793c0f7 184 * Test to see if the opener of the socket had when the socket was created
a3b299da
EB
185 * and the current process has the capability @cap over the network namespace
186 * the socket is a member of.
187 */
188bool sk_net_capable(const struct sock *sk, int cap)
189{
190 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
191}
192EXPORT_SYMBOL(sk_net_capable);
193
da21f24d
IM
194/*
195 * Each address family might have different locking rules, so we have
cdfbabfb
DH
196 * one slock key per address family and separate keys for internal and
197 * userspace sockets.
da21f24d 198 */
a5b5bb9a 199static struct lock_class_key af_family_keys[AF_MAX];
cdfbabfb 200static struct lock_class_key af_family_kern_keys[AF_MAX];
a5b5bb9a 201static struct lock_class_key af_family_slock_keys[AF_MAX];
cdfbabfb 202static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
a5b5bb9a 203
a5b5bb9a
IM
204/*
205 * Make lock validator output more readable. (we pre-construct these
206 * strings build-time, so that runtime initialization of socket
207 * locks is fast):
208 */
cdfbabfb
DH
209
210#define _sock_locks(x) \
211 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
212 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
213 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
214 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
215 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
216 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
217 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
218 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
219 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
220 x "27" , x "28" , x "AF_CAN" , \
221 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
222 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
223 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
224 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
68e8b849
BT
225 x "AF_QIPCRTR", x "AF_SMC" , x "AF_XDP" , \
226 x "AF_MAX"
cdfbabfb 227
36cbd3dc 228static const char *const af_family_key_strings[AF_MAX+1] = {
cdfbabfb 229 _sock_locks("sk_lock-")
a5b5bb9a 230};
36cbd3dc 231static const char *const af_family_slock_key_strings[AF_MAX+1] = {
cdfbabfb 232 _sock_locks("slock-")
a5b5bb9a 233};
36cbd3dc 234static const char *const af_family_clock_key_strings[AF_MAX+1] = {
cdfbabfb
DH
235 _sock_locks("clock-")
236};
237
238static const char *const af_family_kern_key_strings[AF_MAX+1] = {
239 _sock_locks("k-sk_lock-")
240};
241static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
242 _sock_locks("k-slock-")
243};
244static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
245 _sock_locks("k-clock-")
443aef0e 246};
581319c5 247static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
6b431d50 248 _sock_locks("rlock-")
581319c5
PA
249};
250static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
6b431d50 251 _sock_locks("wlock-")
581319c5
PA
252};
253static const char *const af_family_elock_key_strings[AF_MAX+1] = {
6b431d50 254 _sock_locks("elock-")
581319c5 255};
da21f24d
IM
256
257/*
581319c5 258 * sk_callback_lock and sk queues locking rules are per-address-family,
da21f24d
IM
259 * so split the lock classes by using a per-AF key:
260 */
261static struct lock_class_key af_callback_keys[AF_MAX];
581319c5
PA
262static struct lock_class_key af_rlock_keys[AF_MAX];
263static struct lock_class_key af_wlock_keys[AF_MAX];
264static struct lock_class_key af_elock_keys[AF_MAX];
cdfbabfb 265static struct lock_class_key af_kern_callback_keys[AF_MAX];
da21f24d 266
1da177e4 267/* Run time adjustable parameters. */
ab32ea5d 268__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
6d8ebc8a 269EXPORT_SYMBOL(sysctl_wmem_max);
ab32ea5d 270__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
6d8ebc8a 271EXPORT_SYMBOL(sysctl_rmem_max);
ab32ea5d
BH
272__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
273__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
1da177e4 274
25985edc 275/* Maximal space eaten by iovec or ancillary data plus some space */
ab32ea5d 276int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
2a91525c 277EXPORT_SYMBOL(sysctl_optmem_max);
1da177e4 278
b245be1f
WB
279int sysctl_tstamp_allow_data __read_mostly = 1;
280
a7950ae8
DB
281DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
282EXPORT_SYMBOL_GPL(memalloc_socks_key);
c93bdd0e 283
7cb02404
MG
284/**
285 * sk_set_memalloc - sets %SOCK_MEMALLOC
286 * @sk: socket to set it on
287 *
288 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
289 * It's the responsibility of the admin to adjust min_free_kbytes
290 * to meet the requirements
291 */
292void sk_set_memalloc(struct sock *sk)
293{
294 sock_set_flag(sk, SOCK_MEMALLOC);
295 sk->sk_allocation |= __GFP_MEMALLOC;
a7950ae8 296 static_branch_inc(&memalloc_socks_key);
7cb02404
MG
297}
298EXPORT_SYMBOL_GPL(sk_set_memalloc);
299
300void sk_clear_memalloc(struct sock *sk)
301{
302 sock_reset_flag(sk, SOCK_MEMALLOC);
303 sk->sk_allocation &= ~__GFP_MEMALLOC;
a7950ae8 304 static_branch_dec(&memalloc_socks_key);
c76562b6
MG
305
306 /*
307 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
5d753610
MG
308 * progress of swapping. SOCK_MEMALLOC may be cleared while
309 * it has rmem allocations due to the last swapfile being deactivated
310 * but there is a risk that the socket is unusable due to exceeding
311 * the rmem limits. Reclaim the reserves and obey rmem limits again.
c76562b6 312 */
5d753610 313 sk_mem_reclaim(sk);
7cb02404
MG
314}
315EXPORT_SYMBOL_GPL(sk_clear_memalloc);
316
b4b9e355
MG
317int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
318{
319 int ret;
f1083048 320 unsigned int noreclaim_flag;
b4b9e355
MG
321
322 /* these should have been dropped before queueing */
323 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
324
f1083048 325 noreclaim_flag = memalloc_noreclaim_save();
b4b9e355 326 ret = sk->sk_backlog_rcv(sk, skb);
f1083048 327 memalloc_noreclaim_restore(noreclaim_flag);
b4b9e355
MG
328
329 return ret;
330}
331EXPORT_SYMBOL(__sk_backlog_rcv);
332
a9beb86a 333static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
fe0c72f3 334{
a9beb86a
DD
335 struct __kernel_sock_timeval tv;
336 int size;
fe0c72f3
AB
337
338 if (timeo == MAX_SCHEDULE_TIMEOUT) {
339 tv.tv_sec = 0;
340 tv.tv_usec = 0;
341 } else {
342 tv.tv_sec = timeo / HZ;
343 tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
344 }
345
e6986423 346 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
fe0c72f3
AB
347 struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
348 *(struct old_timeval32 *)optval = tv32;
349 return sizeof(tv32);
350 }
351
a9beb86a
DD
352 if (old_timeval) {
353 struct __kernel_old_timeval old_tv;
354 old_tv.tv_sec = tv.tv_sec;
355 old_tv.tv_usec = tv.tv_usec;
356 *(struct __kernel_old_timeval *)optval = old_tv;
357 size = sizeof(old_tv);
358 } else {
359 *(struct __kernel_sock_timeval *)optval = tv;
360 size = sizeof(tv);
361 }
362
363 return size;
fe0c72f3
AB
364}
365
a9beb86a 366static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool old_timeval)
1da177e4 367{
a9beb86a 368 struct __kernel_sock_timeval tv;
1da177e4 369
e6986423 370 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
fe0c72f3
AB
371 struct old_timeval32 tv32;
372
373 if (optlen < sizeof(tv32))
374 return -EINVAL;
375
376 if (copy_from_user(&tv32, optval, sizeof(tv32)))
377 return -EFAULT;
378 tv.tv_sec = tv32.tv_sec;
379 tv.tv_usec = tv32.tv_usec;
a9beb86a
DD
380 } else if (old_timeval) {
381 struct __kernel_old_timeval old_tv;
382
383 if (optlen < sizeof(old_tv))
384 return -EINVAL;
385 if (copy_from_user(&old_tv, optval, sizeof(old_tv)))
386 return -EFAULT;
387 tv.tv_sec = old_tv.tv_sec;
388 tv.tv_usec = old_tv.tv_usec;
fe0c72f3
AB
389 } else {
390 if (optlen < sizeof(tv))
391 return -EINVAL;
392 if (copy_from_user(&tv, optval, sizeof(tv)))
393 return -EFAULT;
394 }
ba78073e
VA
395 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
396 return -EDOM;
1da177e4 397
ba78073e 398 if (tv.tv_sec < 0) {
6f11df83
AM
399 static int warned __read_mostly;
400
ba78073e 401 *timeo_p = 0;
50aab54f 402 if (warned < 10 && net_ratelimit()) {
ba78073e 403 warned++;
e005d193
JP
404 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
405 __func__, current->comm, task_pid_nr(current));
50aab54f 406 }
ba78073e
VA
407 return 0;
408 }
1da177e4
LT
409 *timeo_p = MAX_SCHEDULE_TIMEOUT;
410 if (tv.tv_sec == 0 && tv.tv_usec == 0)
411 return 0;
a9beb86a
DD
412 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))
413 *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ);
1da177e4
LT
414 return 0;
415}
416
417static void sock_warn_obsolete_bsdism(const char *name)
418{
419 static int warned;
420 static char warncomm[TASK_COMM_LEN];
4ec93edb
YH
421 if (strcmp(warncomm, current->comm) && warned < 5) {
422 strcpy(warncomm, current->comm);
e005d193
JP
423 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
424 warncomm, name);
1da177e4
LT
425 warned++;
426 }
427}
428
080a270f
HFS
429static bool sock_needs_netstamp(const struct sock *sk)
430{
431 switch (sk->sk_family) {
432 case AF_UNSPEC:
433 case AF_UNIX:
434 return false;
435 default:
436 return true;
437 }
438}
439
08e29af3 440static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
4ec93edb 441{
08e29af3
ED
442 if (sk->sk_flags & flags) {
443 sk->sk_flags &= ~flags;
080a270f
HFS
444 if (sock_needs_netstamp(sk) &&
445 !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
20d49473 446 net_disable_timestamp();
1da177e4
LT
447 }
448}
449
450
e6afc8ac 451int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
f0088a50 452{
3b885787
NH
453 unsigned long flags;
454 struct sk_buff_head *list = &sk->sk_receive_queue;
f0088a50 455
0fd7bac6 456 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
766e9037 457 atomic_inc(&sk->sk_drops);
3847ce32 458 trace_sock_rcvqueue_full(sk, skb);
766e9037 459 return -ENOMEM;
f0088a50
DV
460 }
461
c76562b6 462 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
766e9037
ED
463 atomic_inc(&sk->sk_drops);
464 return -ENOBUFS;
3ab224be
HA
465 }
466
f0088a50
DV
467 skb->dev = NULL;
468 skb_set_owner_r(skb, sk);
49ad9599 469
7fee226a
ED
470 /* we escape from rcu protected region, make sure we dont leak
471 * a norefcounted dst
472 */
473 skb_dst_force(skb);
474
3b885787 475 spin_lock_irqsave(&list->lock, flags);
3bc3b96f 476 sock_skb_set_dropcount(sk, skb);
3b885787
NH
477 __skb_queue_tail(list, skb);
478 spin_unlock_irqrestore(&list->lock, flags);
f0088a50
DV
479
480 if (!sock_flag(sk, SOCK_DEAD))
676d2369 481 sk->sk_data_ready(sk);
766e9037 482 return 0;
f0088a50 483}
e6afc8ac 484EXPORT_SYMBOL(__sock_queue_rcv_skb);
485
486int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
487{
488 int err;
489
490 err = sk_filter(sk, skb);
491 if (err)
492 return err;
493
494 return __sock_queue_rcv_skb(sk, skb);
495}
f0088a50
DV
496EXPORT_SYMBOL(sock_queue_rcv_skb);
497
4f0c40d9 498int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
c3f24cfb 499 const int nested, unsigned int trim_cap, bool refcounted)
f0088a50
DV
500{
501 int rc = NET_RX_SUCCESS;
502
4f0c40d9 503 if (sk_filter_trim_cap(sk, skb, trim_cap))
f0088a50
DV
504 goto discard_and_relse;
505
506 skb->dev = NULL;
507
274f482d 508 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
c377411f
ED
509 atomic_inc(&sk->sk_drops);
510 goto discard_and_relse;
511 }
58a5a7b9
ACM
512 if (nested)
513 bh_lock_sock_nested(sk);
514 else
515 bh_lock_sock(sk);
a5b5bb9a
IM
516 if (!sock_owned_by_user(sk)) {
517 /*
518 * trylock + unlock semantics:
519 */
520 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
521
c57943a1 522 rc = sk_backlog_rcv(sk, skb);
a5b5bb9a
IM
523
524 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
8265792b 525 } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
8eae939f
ZY
526 bh_unlock_sock(sk);
527 atomic_inc(&sk->sk_drops);
528 goto discard_and_relse;
529 }
530
f0088a50
DV
531 bh_unlock_sock(sk);
532out:
c3f24cfb
ED
533 if (refcounted)
534 sock_put(sk);
f0088a50
DV
535 return rc;
536discard_and_relse:
537 kfree_skb(skb);
538 goto out;
539}
4f0c40d9 540EXPORT_SYMBOL(__sk_receive_skb);
f0088a50
DV
541
542struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
543{
b6c6712a 544 struct dst_entry *dst = __sk_dst_get(sk);
f0088a50
DV
545
546 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
e022f0b4 547 sk_tx_queue_clear(sk);
9b8805a3 548 sk->sk_dst_pending_confirm = 0;
a9b3cd7f 549 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
f0088a50
DV
550 dst_release(dst);
551 return NULL;
552 }
553
554 return dst;
555}
556EXPORT_SYMBOL(__sk_dst_check);
557
558struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
559{
560 struct dst_entry *dst = sk_dst_get(sk);
561
562 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
563 sk_dst_reset(sk);
564 dst_release(dst);
565 return NULL;
566 }
567
568 return dst;
569}
570EXPORT_SYMBOL(sk_dst_check);
571
f5dd3d0c 572static int sock_setbindtodevice_locked(struct sock *sk, int ifindex)
4878809f
DM
573{
574 int ret = -ENOPROTOOPT;
575#ifdef CONFIG_NETDEVICES
3b1e0a65 576 struct net *net = sock_net(sk);
4878809f
DM
577
578 /* Sorry... */
579 ret = -EPERM;
5e1fccc0 580 if (!ns_capable(net->user_ns, CAP_NET_RAW))
4878809f
DM
581 goto out;
582
f5dd3d0c
DH
583 ret = -EINVAL;
584 if (ifindex < 0)
585 goto out;
586
587 sk->sk_bound_dev_if = ifindex;
588 if (sk->sk_prot->rehash)
589 sk->sk_prot->rehash(sk);
590 sk_dst_reset(sk);
591
592 ret = 0;
593
594out:
595#endif
596
597 return ret;
598}
599
600static int sock_setbindtodevice(struct sock *sk, char __user *optval,
601 int optlen)
602{
603 int ret = -ENOPROTOOPT;
604#ifdef CONFIG_NETDEVICES
605 struct net *net = sock_net(sk);
606 char devname[IFNAMSIZ];
607 int index;
608
4878809f
DM
609 ret = -EINVAL;
610 if (optlen < 0)
611 goto out;
612
613 /* Bind this socket to a particular device like "eth0",
614 * as specified in the passed interface name. If the
615 * name is "" or the option length is zero the socket
616 * is not bound.
617 */
618 if (optlen > IFNAMSIZ - 1)
619 optlen = IFNAMSIZ - 1;
620 memset(devname, 0, sizeof(devname));
621
622 ret = -EFAULT;
623 if (copy_from_user(devname, optval, optlen))
624 goto out;
625
000ba2e4
DM
626 index = 0;
627 if (devname[0] != '\0') {
bf8e56bf 628 struct net_device *dev;
4878809f 629
bf8e56bf
ED
630 rcu_read_lock();
631 dev = dev_get_by_name_rcu(net, devname);
632 if (dev)
633 index = dev->ifindex;
634 rcu_read_unlock();
4878809f
DM
635 ret = -ENODEV;
636 if (!dev)
637 goto out;
4878809f
DM
638 }
639
640 lock_sock(sk);
f5dd3d0c 641 ret = sock_setbindtodevice_locked(sk, index);
4878809f
DM
642 release_sock(sk);
643
4878809f
DM
644out:
645#endif
646
647 return ret;
648}
649
c91f6df2
BH
650static int sock_getbindtodevice(struct sock *sk, char __user *optval,
651 int __user *optlen, int len)
652{
653 int ret = -ENOPROTOOPT;
654#ifdef CONFIG_NETDEVICES
655 struct net *net = sock_net(sk);
c91f6df2 656 char devname[IFNAMSIZ];
c91f6df2
BH
657
658 if (sk->sk_bound_dev_if == 0) {
659 len = 0;
660 goto zero;
661 }
662
663 ret = -EINVAL;
664 if (len < IFNAMSIZ)
665 goto out;
666
5dbe7c17
NS
667 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
668 if (ret)
c91f6df2 669 goto out;
c91f6df2
BH
670
671 len = strlen(devname) + 1;
672
673 ret = -EFAULT;
674 if (copy_to_user(optval, devname, len))
675 goto out;
676
677zero:
678 ret = -EFAULT;
679 if (put_user(len, optlen))
680 goto out;
681
682 ret = 0;
683
684out:
685#endif
686
687 return ret;
688}
689
c0ef877b
PE
690static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
691{
692 if (valbool)
693 sock_set_flag(sk, bit);
694 else
695 sock_reset_flag(sk, bit);
696}
697
f60e5990 698bool sk_mc_loop(struct sock *sk)
699{
700 if (dev_recursion_level())
701 return false;
702 if (!sk)
703 return true;
704 switch (sk->sk_family) {
705 case AF_INET:
706 return inet_sk(sk)->mc_loop;
707#if IS_ENABLED(CONFIG_IPV6)
708 case AF_INET6:
709 return inet6_sk(sk)->mc_loop;
710#endif
711 }
712 WARN_ON(1);
713 return true;
714}
715EXPORT_SYMBOL(sk_mc_loop);
716
1da177e4
LT
717/*
718 * This is meant for all protocols to use and covers goings on
719 * at the socket level. Everything here is generic.
720 */
721
722int sock_setsockopt(struct socket *sock, int level, int optname,
b7058842 723 char __user *optval, unsigned int optlen)
1da177e4 724{
80b14dee 725 struct sock_txtime sk_txtime;
2a91525c 726 struct sock *sk = sock->sk;
1da177e4
LT
727 int val;
728 int valbool;
729 struct linger ling;
730 int ret = 0;
4ec93edb 731
1da177e4
LT
732 /*
733 * Options without arguments
734 */
735
4878809f 736 if (optname == SO_BINDTODEVICE)
c91f6df2 737 return sock_setbindtodevice(sk, optval, optlen);
4878809f 738
e71a4783
SH
739 if (optlen < sizeof(int))
740 return -EINVAL;
4ec93edb 741
1da177e4
LT
742 if (get_user(val, (int __user *)optval))
743 return -EFAULT;
4ec93edb 744
2a91525c 745 valbool = val ? 1 : 0;
1da177e4
LT
746
747 lock_sock(sk);
748
2a91525c 749 switch (optname) {
e71a4783 750 case SO_DEBUG:
2a91525c 751 if (val && !capable(CAP_NET_ADMIN))
e71a4783 752 ret = -EACCES;
2a91525c 753 else
c0ef877b 754 sock_valbool_flag(sk, SOCK_DBG, valbool);
e71a4783
SH
755 break;
756 case SO_REUSEADDR:
cdb8744d 757 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
e71a4783 758 break;
055dc21a
TH
759 case SO_REUSEPORT:
760 sk->sk_reuseport = valbool;
761 break;
e71a4783 762 case SO_TYPE:
49c794e9 763 case SO_PROTOCOL:
0d6038ee 764 case SO_DOMAIN:
e71a4783
SH
765 case SO_ERROR:
766 ret = -ENOPROTOOPT;
767 break;
768 case SO_DONTROUTE:
c0ef877b 769 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
0fbe82e6 770 sk_dst_reset(sk);
e71a4783
SH
771 break;
772 case SO_BROADCAST:
773 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
774 break;
775 case SO_SNDBUF:
776 /* Don't error on this BSD doesn't and if you think
82981930
ED
777 * about it this is right. Otherwise apps have to
778 * play 'guess the biggest size' games. RCVBUF/SNDBUF
779 * are treated in BSD as hints
780 */
781 val = min_t(u32, val, sysctl_wmem_max);
b0573dea 782set_sndbuf:
4057765f
GN
783 /* Ensure val * 2 fits into an int, to prevent max_t()
784 * from treating it as a negative value.
785 */
786 val = min_t(int, val, INT_MAX / 2);
e71a4783 787 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
b98b0bc8 788 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
82981930 789 /* Wake up sending tasks if we upped the value. */
e71a4783
SH
790 sk->sk_write_space(sk);
791 break;
1da177e4 792
e71a4783
SH
793 case SO_SNDBUFFORCE:
794 if (!capable(CAP_NET_ADMIN)) {
795 ret = -EPERM;
796 break;
797 }
4057765f
GN
798
799 /* No negative values (to prevent underflow, as val will be
800 * multiplied by 2).
801 */
802 if (val < 0)
803 val = 0;
e71a4783 804 goto set_sndbuf;
b0573dea 805
e71a4783
SH
806 case SO_RCVBUF:
807 /* Don't error on this BSD doesn't and if you think
82981930
ED
808 * about it this is right. Otherwise apps have to
809 * play 'guess the biggest size' games. RCVBUF/SNDBUF
810 * are treated in BSD as hints
811 */
812 val = min_t(u32, val, sysctl_rmem_max);
b0573dea 813set_rcvbuf:
4057765f
GN
814 /* Ensure val * 2 fits into an int, to prevent max_t()
815 * from treating it as a negative value.
816 */
817 val = min_t(int, val, INT_MAX / 2);
e71a4783
SH
818 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
819 /*
820 * We double it on the way in to account for
821 * "struct sk_buff" etc. overhead. Applications
822 * assume that the SO_RCVBUF setting they make will
823 * allow that much actual data to be received on that
824 * socket.
825 *
826 * Applications are unaware that "struct sk_buff" and
827 * other overheads allocate from the receive buffer
828 * during socket buffer allocation.
829 *
830 * And after considering the possible alternatives,
831 * returning the value we actually used in getsockopt
832 * is the most desirable behavior.
833 */
ebb3b78d
ED
834 WRITE_ONCE(sk->sk_rcvbuf,
835 max_t(int, val * 2, SOCK_MIN_RCVBUF));
e71a4783
SH
836 break;
837
838 case SO_RCVBUFFORCE:
839 if (!capable(CAP_NET_ADMIN)) {
840 ret = -EPERM;
1da177e4 841 break;
e71a4783 842 }
4057765f
GN
843
844 /* No negative values (to prevent underflow, as val will be
845 * multiplied by 2).
846 */
847 if (val < 0)
848 val = 0;
e71a4783 849 goto set_rcvbuf;
1da177e4 850
e71a4783 851 case SO_KEEPALIVE:
4b9d07a4
UB
852 if (sk->sk_prot->keepalive)
853 sk->sk_prot->keepalive(sk, valbool);
e71a4783
SH
854 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
855 break;
856
857 case SO_OOBINLINE:
858 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
859 break;
860
861 case SO_NO_CHECK:
28448b80 862 sk->sk_no_check_tx = valbool;
e71a4783
SH
863 break;
864
865 case SO_PRIORITY:
5e1fccc0
EB
866 if ((val >= 0 && val <= 6) ||
867 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
e71a4783
SH
868 sk->sk_priority = val;
869 else
870 ret = -EPERM;
871 break;
872
873 case SO_LINGER:
874 if (optlen < sizeof(ling)) {
875 ret = -EINVAL; /* 1003.1g */
1da177e4 876 break;
e71a4783 877 }
2a91525c 878 if (copy_from_user(&ling, optval, sizeof(ling))) {
e71a4783 879 ret = -EFAULT;
1da177e4 880 break;
e71a4783
SH
881 }
882 if (!ling.l_onoff)
883 sock_reset_flag(sk, SOCK_LINGER);
884 else {
1da177e4 885#if (BITS_PER_LONG == 32)
e71a4783
SH
886 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
887 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
1da177e4 888 else
e71a4783
SH
889#endif
890 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
891 sock_set_flag(sk, SOCK_LINGER);
892 }
893 break;
894
895 case SO_BSDCOMPAT:
896 sock_warn_obsolete_bsdism("setsockopt");
897 break;
898
899 case SO_PASSCRED:
900 if (valbool)
901 set_bit(SOCK_PASSCRED, &sock->flags);
902 else
903 clear_bit(SOCK_PASSCRED, &sock->flags);
904 break;
905
7f1bc6e9 906 case SO_TIMESTAMP_OLD:
887feae3 907 case SO_TIMESTAMP_NEW:
7f1bc6e9 908 case SO_TIMESTAMPNS_OLD:
887feae3 909 case SO_TIMESTAMPNS_NEW:
e71a4783 910 if (valbool) {
887feae3
DD
911 if (optname == SO_TIMESTAMP_NEW || optname == SO_TIMESTAMPNS_NEW)
912 sock_set_flag(sk, SOCK_TSTAMP_NEW);
913 else
914 sock_reset_flag(sk, SOCK_TSTAMP_NEW);
915
916 if (optname == SO_TIMESTAMP_OLD || optname == SO_TIMESTAMP_NEW)
92f37fd2
ED
917 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
918 else
919 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
e71a4783 920 sock_set_flag(sk, SOCK_RCVTSTAMP);
20d49473 921 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
92f37fd2 922 } else {
e71a4783 923 sock_reset_flag(sk, SOCK_RCVTSTAMP);
92f37fd2 924 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
887feae3 925 sock_reset_flag(sk, SOCK_TSTAMP_NEW);
92f37fd2 926 }
e71a4783
SH
927 break;
928
9718475e
DD
929 case SO_TIMESTAMPING_NEW:
930 sock_set_flag(sk, SOCK_TSTAMP_NEW);
ff7653f9 931 /* fall through */
7f1bc6e9 932 case SO_TIMESTAMPING_OLD:
20d49473 933 if (val & ~SOF_TIMESTAMPING_MASK) {
f249fb78 934 ret = -EINVAL;
20d49473
PO
935 break;
936 }
b245be1f 937
09c2d251 938 if (val & SOF_TIMESTAMPING_OPT_ID &&
4ed2d765 939 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
ac5cc977
WC
940 if (sk->sk_protocol == IPPROTO_TCP &&
941 sk->sk_type == SOCK_STREAM) {
6db8b963
SHY
942 if ((1 << sk->sk_state) &
943 (TCPF_CLOSE | TCPF_LISTEN)) {
4ed2d765
WB
944 ret = -EINVAL;
945 break;
946 }
947 sk->sk_tskey = tcp_sk(sk)->snd_una;
948 } else {
949 sk->sk_tskey = 0;
950 }
951 }
1c885808
FY
952
953 if (val & SOF_TIMESTAMPING_OPT_STATS &&
954 !(val & SOF_TIMESTAMPING_OPT_TSONLY)) {
955 ret = -EINVAL;
956 break;
957 }
958
b9f40e21 959 sk->sk_tsflags = val;
20d49473
PO
960 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
961 sock_enable_timestamp(sk,
962 SOCK_TIMESTAMPING_RX_SOFTWARE);
9718475e
DD
963 else {
964 if (optname == SO_TIMESTAMPING_NEW)
965 sock_reset_flag(sk, SOCK_TSTAMP_NEW);
966
20d49473 967 sock_disable_timestamp(sk,
08e29af3 968 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
9718475e 969 }
20d49473
PO
970 break;
971
e71a4783
SH
972 case SO_RCVLOWAT:
973 if (val < 0)
974 val = INT_MAX;
d1361840
ED
975 if (sock->ops->set_rcvlowat)
976 ret = sock->ops->set_rcvlowat(sk, val);
977 else
eac66402 978 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
e71a4783
SH
979 break;
980
45bdc661 981 case SO_RCVTIMEO_OLD:
a9beb86a
DD
982 case SO_RCVTIMEO_NEW:
983 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen, optname == SO_RCVTIMEO_OLD);
e71a4783
SH
984 break;
985
45bdc661 986 case SO_SNDTIMEO_OLD:
a9beb86a
DD
987 case SO_SNDTIMEO_NEW:
988 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen, optname == SO_SNDTIMEO_OLD);
e71a4783 989 break;
1da177e4 990
e71a4783
SH
991 case SO_ATTACH_FILTER:
992 ret = -EINVAL;
993 if (optlen == sizeof(struct sock_fprog)) {
994 struct sock_fprog fprog;
1da177e4 995
e71a4783
SH
996 ret = -EFAULT;
997 if (copy_from_user(&fprog, optval, sizeof(fprog)))
1da177e4 998 break;
e71a4783
SH
999
1000 ret = sk_attach_filter(&fprog, sk);
1001 }
1002 break;
1003
89aa0758
AS
1004 case SO_ATTACH_BPF:
1005 ret = -EINVAL;
1006 if (optlen == sizeof(u32)) {
1007 u32 ufd;
1008
1009 ret = -EFAULT;
1010 if (copy_from_user(&ufd, optval, sizeof(ufd)))
1011 break;
1012
1013 ret = sk_attach_bpf(ufd, sk);
1014 }
1015 break;
1016
538950a1
CG
1017 case SO_ATTACH_REUSEPORT_CBPF:
1018 ret = -EINVAL;
1019 if (optlen == sizeof(struct sock_fprog)) {
1020 struct sock_fprog fprog;
1021
1022 ret = -EFAULT;
1023 if (copy_from_user(&fprog, optval, sizeof(fprog)))
1024 break;
1025
1026 ret = sk_reuseport_attach_filter(&fprog, sk);
1027 }
1028 break;
1029
1030 case SO_ATTACH_REUSEPORT_EBPF:
1031 ret = -EINVAL;
1032 if (optlen == sizeof(u32)) {
1033 u32 ufd;
1034
1035 ret = -EFAULT;
1036 if (copy_from_user(&ufd, optval, sizeof(ufd)))
1037 break;
1038
1039 ret = sk_reuseport_attach_bpf(ufd, sk);
1040 }
1041 break;
1042
99f3a064
MKL
1043 case SO_DETACH_REUSEPORT_BPF:
1044 ret = reuseport_detach_prog(sk);
1045 break;
1046
e71a4783 1047 case SO_DETACH_FILTER:
55b33325 1048 ret = sk_detach_filter(sk);
e71a4783 1049 break;
1da177e4 1050
d59577b6
VB
1051 case SO_LOCK_FILTER:
1052 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
1053 ret = -EPERM;
1054 else
1055 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
1056 break;
1057
e71a4783
SH
1058 case SO_PASSSEC:
1059 if (valbool)
1060 set_bit(SOCK_PASSSEC, &sock->flags);
1061 else
1062 clear_bit(SOCK_PASSSEC, &sock->flags);
1063 break;
4a19ec58 1064 case SO_MARK:
50254256 1065 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
4a19ec58 1066 ret = -EPERM;
50254256 1067 } else if (val != sk->sk_mark) {
4a19ec58 1068 sk->sk_mark = val;
50254256
DB
1069 sk_dst_reset(sk);
1070 }
4a19ec58 1071 break;
877ce7c1 1072
3b885787 1073 case SO_RXQ_OVFL:
8083f0fc 1074 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
3b885787 1075 break;
6e3e939f
JB
1076
1077 case SO_WIFI_STATUS:
1078 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
1079 break;
1080
ef64a54f
PE
1081 case SO_PEEK_OFF:
1082 if (sock->ops->set_peek_off)
12663bfc 1083 ret = sock->ops->set_peek_off(sk, val);
ef64a54f
PE
1084 else
1085 ret = -EOPNOTSUPP;
1086 break;
3bdc0eba
BG
1087
1088 case SO_NOFCS:
1089 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
1090 break;
1091
7d4c04fc
KJ
1092 case SO_SELECT_ERR_QUEUE:
1093 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
1094 break;
1095
e0d1095a 1096#ifdef CONFIG_NET_RX_BUSY_POLL
64b0dc51 1097 case SO_BUSY_POLL:
dafcc438
ET
1098 /* allow unprivileged users to decrease the value */
1099 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
1100 ret = -EPERM;
1101 else {
1102 if (val < 0)
1103 ret = -EINVAL;
1104 else
1105 sk->sk_ll_usec = val;
1106 }
1107 break;
1108#endif
62748f32
ED
1109
1110 case SO_MAX_PACING_RATE:
6bdef102
ED
1111 {
1112 unsigned long ulval = (val == ~0U) ? ~0UL : val;
1113
1114 if (sizeof(ulval) != sizeof(val) &&
1115 optlen >= sizeof(ulval) &&
1116 get_user(ulval, (unsigned long __user *)optval)) {
1117 ret = -EFAULT;
1118 break;
1119 }
1120 if (ulval != ~0UL)
218af599
ED
1121 cmpxchg(&sk->sk_pacing_status,
1122 SK_PACING_NONE,
1123 SK_PACING_NEEDED);
6bdef102
ED
1124 sk->sk_max_pacing_rate = ulval;
1125 sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
62748f32 1126 break;
6bdef102 1127 }
70da268b
ED
1128 case SO_INCOMING_CPU:
1129 sk->sk_incoming_cpu = val;
1130 break;
1131
a87cb3e4
TH
1132 case SO_CNX_ADVICE:
1133 if (val == 1)
1134 dst_negative_advice(sk);
1135 break;
76851d12
WB
1136
1137 case SO_ZEROCOPY:
28190752 1138 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
b5947e5d
WB
1139 if (!((sk->sk_type == SOCK_STREAM &&
1140 sk->sk_protocol == IPPROTO_TCP) ||
1141 (sk->sk_type == SOCK_DGRAM &&
1142 sk->sk_protocol == IPPROTO_UDP)))
28190752 1143 ret = -ENOTSUPP;
28190752 1144 } else if (sk->sk_family != PF_RDS) {
76851d12 1145 ret = -ENOTSUPP;
28190752
SV
1146 }
1147 if (!ret) {
1148 if (val < 0 || val > 1)
1149 ret = -EINVAL;
1150 else
1151 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
28190752 1152 }
334e6413
JSP
1153 break;
1154
80b14dee
RC
1155 case SO_TXTIME:
1156 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1157 ret = -EPERM;
1158 } else if (optlen != sizeof(struct sock_txtime)) {
1159 ret = -EINVAL;
1160 } else if (copy_from_user(&sk_txtime, optval,
1161 sizeof(struct sock_txtime))) {
1162 ret = -EFAULT;
1163 } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
1164 ret = -EINVAL;
1165 } else {
1166 sock_valbool_flag(sk, SOCK_TXTIME, true);
1167 sk->sk_clockid = sk_txtime.clockid;
1168 sk->sk_txtime_deadline_mode =
1169 !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
4b15c707
JSP
1170 sk->sk_txtime_report_errors =
1171 !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
80b14dee
RC
1172 }
1173 break;
1174
f5dd3d0c
DH
1175 case SO_BINDTOIFINDEX:
1176 ret = sock_setbindtodevice_locked(sk, val);
1177 break;
1178
e71a4783
SH
1179 default:
1180 ret = -ENOPROTOOPT;
1181 break;
4ec93edb 1182 }
1da177e4
LT
1183 release_sock(sk);
1184 return ret;
1185}
2a91525c 1186EXPORT_SYMBOL(sock_setsockopt);
1da177e4
LT
1187
1188
8f09898b 1189static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1190 struct ucred *ucred)
3f551f94
EB
1191{
1192 ucred->pid = pid_vnr(pid);
1193 ucred->uid = ucred->gid = -1;
1194 if (cred) {
1195 struct user_namespace *current_ns = current_user_ns();
1196
b2e4f544
EB
1197 ucred->uid = from_kuid_munged(current_ns, cred->euid);
1198 ucred->gid = from_kgid_munged(current_ns, cred->egid);
3f551f94
EB
1199 }
1200}
1201
28b5ba2a
DH
1202static int groups_to_user(gid_t __user *dst, const struct group_info *src)
1203{
1204 struct user_namespace *user_ns = current_user_ns();
1205 int i;
1206
1207 for (i = 0; i < src->ngroups; i++)
1208 if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i))
1209 return -EFAULT;
1210
1211 return 0;
1212}
1213
1da177e4
LT
1214int sock_getsockopt(struct socket *sock, int level, int optname,
1215 char __user *optval, int __user *optlen)
1216{
1217 struct sock *sk = sock->sk;
4ec93edb 1218
e71a4783 1219 union {
4ec93edb 1220 int val;
5daab9db 1221 u64 val64;
677f136c 1222 unsigned long ulval;
4ec93edb 1223 struct linger ling;
fe0c72f3
AB
1224 struct old_timeval32 tm32;
1225 struct __kernel_old_timeval tm;
a9beb86a 1226 struct __kernel_sock_timeval stm;
80b14dee 1227 struct sock_txtime txtime;
1da177e4 1228 } v;
4ec93edb 1229
4d0392be 1230 int lv = sizeof(int);
1da177e4 1231 int len;
4ec93edb 1232
e71a4783 1233 if (get_user(len, optlen))
4ec93edb 1234 return -EFAULT;
e71a4783 1235 if (len < 0)
1da177e4 1236 return -EINVAL;
4ec93edb 1237
50fee1de 1238 memset(&v, 0, sizeof(v));
df0bca04 1239
2a91525c 1240 switch (optname) {
e71a4783
SH
1241 case SO_DEBUG:
1242 v.val = sock_flag(sk, SOCK_DBG);
1243 break;
1244
1245 case SO_DONTROUTE:
1246 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1247 break;
1248
1249 case SO_BROADCAST:
1b23a5df 1250 v.val = sock_flag(sk, SOCK_BROADCAST);
e71a4783
SH
1251 break;
1252
1253 case SO_SNDBUF:
1254 v.val = sk->sk_sndbuf;
1255 break;
1256
1257 case SO_RCVBUF:
1258 v.val = sk->sk_rcvbuf;
1259 break;
1260
1261 case SO_REUSEADDR:
1262 v.val = sk->sk_reuse;
1263 break;
1264
055dc21a
TH
1265 case SO_REUSEPORT:
1266 v.val = sk->sk_reuseport;
1267 break;
1268
e71a4783 1269 case SO_KEEPALIVE:
1b23a5df 1270 v.val = sock_flag(sk, SOCK_KEEPOPEN);
e71a4783
SH
1271 break;
1272
1273 case SO_TYPE:
1274 v.val = sk->sk_type;
1275 break;
1276
49c794e9
JE
1277 case SO_PROTOCOL:
1278 v.val = sk->sk_protocol;
1279 break;
1280
0d6038ee
JE
1281 case SO_DOMAIN:
1282 v.val = sk->sk_family;
1283 break;
1284
e71a4783
SH
1285 case SO_ERROR:
1286 v.val = -sock_error(sk);
2a91525c 1287 if (v.val == 0)
e71a4783
SH
1288 v.val = xchg(&sk->sk_err_soft, 0);
1289 break;
1290
1291 case SO_OOBINLINE:
1b23a5df 1292 v.val = sock_flag(sk, SOCK_URGINLINE);
e71a4783
SH
1293 break;
1294
1295 case SO_NO_CHECK:
28448b80 1296 v.val = sk->sk_no_check_tx;
e71a4783
SH
1297 break;
1298
1299 case SO_PRIORITY:
1300 v.val = sk->sk_priority;
1301 break;
1302
1303 case SO_LINGER:
1304 lv = sizeof(v.ling);
1b23a5df 1305 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
e71a4783
SH
1306 v.ling.l_linger = sk->sk_lingertime / HZ;
1307 break;
1308
1309 case SO_BSDCOMPAT:
1310 sock_warn_obsolete_bsdism("getsockopt");
1311 break;
1312
7f1bc6e9 1313 case SO_TIMESTAMP_OLD:
92f37fd2 1314 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
887feae3 1315 !sock_flag(sk, SOCK_TSTAMP_NEW) &&
92f37fd2
ED
1316 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1317 break;
1318
7f1bc6e9 1319 case SO_TIMESTAMPNS_OLD:
887feae3
DD
1320 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW);
1321 break;
1322
1323 case SO_TIMESTAMP_NEW:
1324 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW);
1325 break;
1326
1327 case SO_TIMESTAMPNS_NEW:
1328 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW);
e71a4783
SH
1329 break;
1330
7f1bc6e9 1331 case SO_TIMESTAMPING_OLD:
b9f40e21 1332 v.val = sk->sk_tsflags;
20d49473
PO
1333 break;
1334
a9beb86a
DD
1335 case SO_RCVTIMEO_OLD:
1336 case SO_RCVTIMEO_NEW:
1337 lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname);
e71a4783
SH
1338 break;
1339
a9beb86a
DD
1340 case SO_SNDTIMEO_OLD:
1341 case SO_SNDTIMEO_NEW:
1342 lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname);
e71a4783 1343 break;
1da177e4 1344
e71a4783
SH
1345 case SO_RCVLOWAT:
1346 v.val = sk->sk_rcvlowat;
1347 break;
1da177e4 1348
e71a4783 1349 case SO_SNDLOWAT:
2a91525c 1350 v.val = 1;
e71a4783 1351 break;
1da177e4 1352
e71a4783 1353 case SO_PASSCRED:
82981930 1354 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
e71a4783 1355 break;
1da177e4 1356
e71a4783 1357 case SO_PEERCRED:
109f6e39
EB
1358 {
1359 struct ucred peercred;
1360 if (len > sizeof(peercred))
1361 len = sizeof(peercred);
1362 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1363 if (copy_to_user(optval, &peercred, len))
e71a4783
SH
1364 return -EFAULT;
1365 goto lenout;
109f6e39 1366 }
1da177e4 1367
28b5ba2a
DH
1368 case SO_PEERGROUPS:
1369 {
1370 int ret, n;
1371
1372 if (!sk->sk_peer_cred)
1373 return -ENODATA;
1374
1375 n = sk->sk_peer_cred->group_info->ngroups;
1376 if (len < n * sizeof(gid_t)) {
1377 len = n * sizeof(gid_t);
1378 return put_user(len, optlen) ? -EFAULT : -ERANGE;
1379 }
1380 len = n * sizeof(gid_t);
1381
1382 ret = groups_to_user((gid_t __user *)optval,
1383 sk->sk_peer_cred->group_info);
1384 if (ret)
1385 return ret;
1386 goto lenout;
1387 }
1388
e71a4783
SH
1389 case SO_PEERNAME:
1390 {
1391 char address[128];
1392
9b2c45d4
DV
1393 lv = sock->ops->getname(sock, (struct sockaddr *)address, 2);
1394 if (lv < 0)
e71a4783
SH
1395 return -ENOTCONN;
1396 if (lv < len)
1397 return -EINVAL;
1398 if (copy_to_user(optval, address, len))
1399 return -EFAULT;
1400 goto lenout;
1401 }
1da177e4 1402
e71a4783
SH
1403 /* Dubious BSD thing... Probably nobody even uses it, but
1404 * the UNIX standard wants it for whatever reason... -DaveM
1405 */
1406 case SO_ACCEPTCONN:
1407 v.val = sk->sk_state == TCP_LISTEN;
1408 break;
1da177e4 1409
e71a4783 1410 case SO_PASSSEC:
82981930 1411 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
e71a4783 1412 break;
877ce7c1 1413
e71a4783
SH
1414 case SO_PEERSEC:
1415 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1da177e4 1416
4a19ec58
LAT
1417 case SO_MARK:
1418 v.val = sk->sk_mark;
1419 break;
1420
3b885787 1421 case SO_RXQ_OVFL:
1b23a5df 1422 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
3b885787
NH
1423 break;
1424
6e3e939f 1425 case SO_WIFI_STATUS:
1b23a5df 1426 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
6e3e939f
JB
1427 break;
1428
ef64a54f
PE
1429 case SO_PEEK_OFF:
1430 if (!sock->ops->set_peek_off)
1431 return -EOPNOTSUPP;
1432
1433 v.val = sk->sk_peek_off;
1434 break;
bc2f7996 1435 case SO_NOFCS:
1b23a5df 1436 v.val = sock_flag(sk, SOCK_NOFCS);
bc2f7996 1437 break;
c91f6df2 1438
f7b86bfe 1439 case SO_BINDTODEVICE:
c91f6df2
BH
1440 return sock_getbindtodevice(sk, optval, optlen, len);
1441
a8fc9277
PE
1442 case SO_GET_FILTER:
1443 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1444 if (len < 0)
1445 return len;
1446
1447 goto lenout;
c91f6df2 1448
d59577b6
VB
1449 case SO_LOCK_FILTER:
1450 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1451 break;
1452
ea02f941
MS
1453 case SO_BPF_EXTENSIONS:
1454 v.val = bpf_tell_extensions();
1455 break;
1456
7d4c04fc
KJ
1457 case SO_SELECT_ERR_QUEUE:
1458 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1459 break;
1460
e0d1095a 1461#ifdef CONFIG_NET_RX_BUSY_POLL
64b0dc51 1462 case SO_BUSY_POLL:
dafcc438
ET
1463 v.val = sk->sk_ll_usec;
1464 break;
1465#endif
1466
62748f32 1467 case SO_MAX_PACING_RATE:
677f136c
ED
1468 if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
1469 lv = sizeof(v.ulval);
1470 v.ulval = sk->sk_max_pacing_rate;
1471 } else {
1472 /* 32bit version */
1473 v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
1474 }
62748f32
ED
1475 break;
1476
2c8c56e1
ED
1477 case SO_INCOMING_CPU:
1478 v.val = sk->sk_incoming_cpu;
1479 break;
1480
a2d133b1
JH
1481 case SO_MEMINFO:
1482 {
1483 u32 meminfo[SK_MEMINFO_VARS];
1484
a2d133b1
JH
1485 sk_get_meminfo(sk, meminfo);
1486
1487 len = min_t(unsigned int, len, sizeof(meminfo));
1488 if (copy_to_user(optval, &meminfo, len))
1489 return -EFAULT;
1490
1491 goto lenout;
1492 }
6d433902
SS
1493
1494#ifdef CONFIG_NET_RX_BUSY_POLL
1495 case SO_INCOMING_NAPI_ID:
1496 v.val = READ_ONCE(sk->sk_napi_id);
1497
1498 /* aggregate non-NAPI IDs down to 0 */
1499 if (v.val < MIN_NAPI_ID)
1500 v.val = 0;
1501
1502 break;
1503#endif
1504
5daab9db
CF
1505 case SO_COOKIE:
1506 lv = sizeof(u64);
1507 if (len < lv)
1508 return -EINVAL;
1509 v.val64 = sock_gen_cookie(sk);
1510 break;
1511
76851d12
WB
1512 case SO_ZEROCOPY:
1513 v.val = sock_flag(sk, SOCK_ZEROCOPY);
1514 break;
1515
80b14dee
RC
1516 case SO_TXTIME:
1517 lv = sizeof(v.txtime);
1518 v.txtime.clockid = sk->sk_clockid;
1519 v.txtime.flags |= sk->sk_txtime_deadline_mode ?
1520 SOF_TXTIME_DEADLINE_MODE : 0;
4b15c707
JSP
1521 v.txtime.flags |= sk->sk_txtime_report_errors ?
1522 SOF_TXTIME_REPORT_ERRORS : 0;
80b14dee
RC
1523 break;
1524
f5dd3d0c
DH
1525 case SO_BINDTOIFINDEX:
1526 v.val = sk->sk_bound_dev_if;
1527 break;
1528
e71a4783 1529 default:
443b5991
YH
1530 /* We implement the SO_SNDLOWAT etc to not be settable
1531 * (1003.1g 7).
1532 */
e71a4783 1533 return -ENOPROTOOPT;
1da177e4 1534 }
e71a4783 1535
1da177e4
LT
1536 if (len > lv)
1537 len = lv;
1538 if (copy_to_user(optval, &v, len))
1539 return -EFAULT;
1540lenout:
4ec93edb
YH
1541 if (put_user(len, optlen))
1542 return -EFAULT;
1543 return 0;
1da177e4
LT
1544}
1545
a5b5bb9a
IM
1546/*
1547 * Initialize an sk_lock.
1548 *
1549 * (We also register the sk_lock with the lock validator.)
1550 */
b6f99a21 1551static inline void sock_lock_init(struct sock *sk)
a5b5bb9a 1552{
cdfbabfb
DH
1553 if (sk->sk_kern_sock)
1554 sock_lock_init_class_and_name(
1555 sk,
1556 af_family_kern_slock_key_strings[sk->sk_family],
1557 af_family_kern_slock_keys + sk->sk_family,
1558 af_family_kern_key_strings[sk->sk_family],
1559 af_family_kern_keys + sk->sk_family);
1560 else
1561 sock_lock_init_class_and_name(
1562 sk,
ed07536e
PZ
1563 af_family_slock_key_strings[sk->sk_family],
1564 af_family_slock_keys + sk->sk_family,
1565 af_family_key_strings[sk->sk_family],
1566 af_family_keys + sk->sk_family);
a5b5bb9a
IM
1567}
1568
4dc6dc71
ED
1569/*
1570 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1571 * even temporarly, because of RCU lookups. sk_node should also be left as is.
68835aba 1572 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
4dc6dc71 1573 */
f1a6c4da
PE
1574static void sock_copy(struct sock *nsk, const struct sock *osk)
1575{
1576#ifdef CONFIG_SECURITY_NETWORK
1577 void *sptr = nsk->sk_security;
1578#endif
68835aba
ED
1579 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1580
1581 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1582 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1583
f1a6c4da
PE
1584#ifdef CONFIG_SECURITY_NETWORK
1585 nsk->sk_security = sptr;
1586 security_sk_clone(osk, nsk);
1587#endif
1588}
1589
2e4afe7b
PE
1590static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1591 int family)
c308c1b2
PE
1592{
1593 struct sock *sk;
1594 struct kmem_cache *slab;
1595
1596 slab = prot->slab;
e912b114
ED
1597 if (slab != NULL) {
1598 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1599 if (!sk)
1600 return sk;
6471384a 1601 if (want_init_on_alloc(priority))
ba2489b0 1602 sk_prot_clear_nulls(sk, prot->obj_size);
fcbdf09d 1603 } else
c308c1b2
PE
1604 sk = kmalloc(prot->obj_size, priority);
1605
2e4afe7b
PE
1606 if (sk != NULL) {
1607 if (security_sk_alloc(sk, family, priority))
1608 goto out_free;
1609
1610 if (!try_module_get(prot->owner))
1611 goto out_free_sec;
e022f0b4 1612 sk_tx_queue_clear(sk);
2e4afe7b
PE
1613 }
1614
c308c1b2 1615 return sk;
2e4afe7b
PE
1616
1617out_free_sec:
1618 security_sk_free(sk);
1619out_free:
1620 if (slab != NULL)
1621 kmem_cache_free(slab, sk);
1622 else
1623 kfree(sk);
1624 return NULL;
c308c1b2
PE
1625}
1626
1627static void sk_prot_free(struct proto *prot, struct sock *sk)
1628{
1629 struct kmem_cache *slab;
2e4afe7b 1630 struct module *owner;
c308c1b2 1631
2e4afe7b 1632 owner = prot->owner;
c308c1b2 1633 slab = prot->slab;
2e4afe7b 1634
bd1060a1 1635 cgroup_sk_free(&sk->sk_cgrp_data);
2d758073 1636 mem_cgroup_sk_free(sk);
2e4afe7b 1637 security_sk_free(sk);
c308c1b2
PE
1638 if (slab != NULL)
1639 kmem_cache_free(slab, sk);
1640 else
1641 kfree(sk);
2e4afe7b 1642 module_put(owner);
c308c1b2
PE
1643}
1644
1da177e4
LT
1645/**
1646 * sk_alloc - All socket objects are allocated here
c4ea43c5 1647 * @net: the applicable net namespace
4dc3b16b
PP
1648 * @family: protocol family
1649 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1650 * @prot: struct proto associated with this new sock instance
11aa9c28 1651 * @kern: is this to be a kernel socket?
1da177e4 1652 */
1b8d7ae4 1653struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
11aa9c28 1654 struct proto *prot, int kern)
1da177e4 1655{
c308c1b2 1656 struct sock *sk;
1da177e4 1657
154adbc8 1658 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1da177e4 1659 if (sk) {
154adbc8
PE
1660 sk->sk_family = family;
1661 /*
1662 * See comment in struct sock definition to understand
1663 * why we need sk_prot_creator -acme
1664 */
1665 sk->sk_prot = sk->sk_prot_creator = prot;
cdfbabfb 1666 sk->sk_kern_sock = kern;
154adbc8 1667 sock_lock_init(sk);
26abe143 1668 sk->sk_net_refcnt = kern ? 0 : 1;
648845ab 1669 if (likely(sk->sk_net_refcnt)) {
26abe143 1670 get_net(net);
648845ab
TZ
1671 sock_inuse_add(net, 1);
1672 }
1673
26abe143 1674 sock_net_set(sk, net);
14afee4b 1675 refcount_set(&sk->sk_wmem_alloc, 1);
f8451725 1676
2d758073 1677 mem_cgroup_sk_alloc(sk);
d979a39d 1678 cgroup_sk_alloc(&sk->sk_cgrp_data);
2a56a1fe
TH
1679 sock_update_classid(&sk->sk_cgrp_data);
1680 sock_update_netprioidx(&sk->sk_cgrp_data);
1da177e4 1681 }
a79af59e 1682
2e4afe7b 1683 return sk;
1da177e4 1684}
2a91525c 1685EXPORT_SYMBOL(sk_alloc);
1da177e4 1686
a4298e45
ED
1687/* Sockets having SOCK_RCU_FREE will call this function after one RCU
1688 * grace period. This is the case for UDP sockets and TCP listeners.
1689 */
1690static void __sk_destruct(struct rcu_head *head)
1da177e4 1691{
a4298e45 1692 struct sock *sk = container_of(head, struct sock, sk_rcu);
1da177e4 1693 struct sk_filter *filter;
1da177e4
LT
1694
1695 if (sk->sk_destruct)
1696 sk->sk_destruct(sk);
1697
a898def2 1698 filter = rcu_dereference_check(sk->sk_filter,
14afee4b 1699 refcount_read(&sk->sk_wmem_alloc) == 0);
1da177e4 1700 if (filter) {
309dd5fc 1701 sk_filter_uncharge(sk, filter);
a9b3cd7f 1702 RCU_INIT_POINTER(sk->sk_filter, NULL);
1da177e4
LT
1703 }
1704
08e29af3 1705 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1da177e4 1706
6ac99e8f
MKL
1707#ifdef CONFIG_BPF_SYSCALL
1708 bpf_sk_storage_free(sk);
1709#endif
1710
1da177e4 1711 if (atomic_read(&sk->sk_omem_alloc))
e005d193
JP
1712 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1713 __func__, atomic_read(&sk->sk_omem_alloc));
1da177e4 1714
22a0e18e
ED
1715 if (sk->sk_frag.page) {
1716 put_page(sk->sk_frag.page);
1717 sk->sk_frag.page = NULL;
1718 }
1719
109f6e39
EB
1720 if (sk->sk_peer_cred)
1721 put_cred(sk->sk_peer_cred);
1722 put_pid(sk->sk_peer_pid);
26abe143
EB
1723 if (likely(sk->sk_net_refcnt))
1724 put_net(sock_net(sk));
c308c1b2 1725 sk_prot_free(sk->sk_prot_creator, sk);
1da177e4 1726}
2b85a34e 1727
a4298e45
ED
1728void sk_destruct(struct sock *sk)
1729{
8c7138b3
MKL
1730 bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
1731
1732 if (rcu_access_pointer(sk->sk_reuseport_cb)) {
1733 reuseport_detach_sock(sk);
1734 use_call_rcu = true;
1735 }
1736
1737 if (use_call_rcu)
a4298e45
ED
1738 call_rcu(&sk->sk_rcu, __sk_destruct);
1739 else
1740 __sk_destruct(&sk->sk_rcu);
1741}
1742
eb4cb008
CG
1743static void __sk_free(struct sock *sk)
1744{
648845ab
TZ
1745 if (likely(sk->sk_net_refcnt))
1746 sock_inuse_add(sock_net(sk), -1);
1747
9709020c 1748 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
eb4cb008
CG
1749 sock_diag_broadcast_destroy(sk);
1750 else
1751 sk_destruct(sk);
1752}
1753
2b85a34e
ED
1754void sk_free(struct sock *sk)
1755{
1756 /*
25985edc 1757 * We subtract one from sk_wmem_alloc and can know if
2b85a34e
ED
1758 * some packets are still in some tx queue.
1759 * If not null, sock_wfree() will call __sk_free(sk) later
1760 */
14afee4b 1761 if (refcount_dec_and_test(&sk->sk_wmem_alloc))
2b85a34e
ED
1762 __sk_free(sk);
1763}
2a91525c 1764EXPORT_SYMBOL(sk_free);
1da177e4 1765
581319c5
PA
1766static void sk_init_common(struct sock *sk)
1767{
1768 skb_queue_head_init(&sk->sk_receive_queue);
1769 skb_queue_head_init(&sk->sk_write_queue);
1770 skb_queue_head_init(&sk->sk_error_queue);
1771
1772 rwlock_init(&sk->sk_callback_lock);
1773 lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
1774 af_rlock_keys + sk->sk_family,
1775 af_family_rlock_key_strings[sk->sk_family]);
1776 lockdep_set_class_and_name(&sk->sk_write_queue.lock,
1777 af_wlock_keys + sk->sk_family,
1778 af_family_wlock_key_strings[sk->sk_family]);
1779 lockdep_set_class_and_name(&sk->sk_error_queue.lock,
1780 af_elock_keys + sk->sk_family,
1781 af_family_elock_key_strings[sk->sk_family]);
1782 lockdep_set_class_and_name(&sk->sk_callback_lock,
1783 af_callback_keys + sk->sk_family,
1784 af_family_clock_key_strings[sk->sk_family]);
1785}
1786
e56c57d0
ED
1787/**
1788 * sk_clone_lock - clone a socket, and lock its clone
1789 * @sk: the socket to clone
1790 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1791 *
1792 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1793 */
1794struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
87d11ceb 1795{
8fd1d178 1796 struct sock *newsk;
278571ba 1797 bool is_charged = true;
87d11ceb 1798
8fd1d178 1799 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
87d11ceb
ACM
1800 if (newsk != NULL) {
1801 struct sk_filter *filter;
1802
892c141e 1803 sock_copy(newsk, sk);
87d11ceb 1804
9d538fa6
CP
1805 newsk->sk_prot_creator = sk->sk_prot;
1806
87d11ceb 1807 /* SANITY */
8a681736
SV
1808 if (likely(newsk->sk_net_refcnt))
1809 get_net(sock_net(newsk));
87d11ceb
ACM
1810 sk_node_init(&newsk->sk_node);
1811 sock_lock_init(newsk);
1812 bh_lock_sock(newsk);
fa438ccf 1813 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
8eae939f 1814 newsk->sk_backlog.len = 0;
87d11ceb
ACM
1815
1816 atomic_set(&newsk->sk_rmem_alloc, 0);
2b85a34e
ED
1817 /*
1818 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1819 */
14afee4b 1820 refcount_set(&newsk->sk_wmem_alloc, 1);
87d11ceb 1821 atomic_set(&newsk->sk_omem_alloc, 0);
581319c5 1822 sk_init_common(newsk);
87d11ceb
ACM
1823
1824 newsk->sk_dst_cache = NULL;
9b8805a3 1825 newsk->sk_dst_pending_confirm = 0;
87d11ceb
ACM
1826 newsk->sk_wmem_queued = 0;
1827 newsk->sk_forward_alloc = 0;
9caad864 1828 atomic_set(&newsk->sk_drops, 0);
87d11ceb 1829 newsk->sk_send_head = NULL;
87d11ceb 1830 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
52267790 1831 atomic_set(&newsk->sk_zckey, 0);
87d11ceb
ACM
1832
1833 sock_reset_flag(newsk, SOCK_DONE);
edbe69ef 1834 mem_cgroup_sk_alloc(newsk);
c0576e39 1835 cgroup_sk_alloc(&newsk->sk_cgrp_data);
87d11ceb 1836
eefca20e
ED
1837 rcu_read_lock();
1838 filter = rcu_dereference(sk->sk_filter);
87d11ceb 1839 if (filter != NULL)
278571ba
AS
1840 /* though it's an empty new sock, the charging may fail
1841 * if sysctl_optmem_max was changed between creation of
1842 * original socket and cloning
1843 */
1844 is_charged = sk_filter_charge(newsk, filter);
eefca20e
ED
1845 RCU_INIT_POINTER(newsk->sk_filter, filter);
1846 rcu_read_unlock();
87d11ceb 1847
d188ba86 1848 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
a97e50cc
DB
1849 /* We need to make sure that we don't uncharge the new
1850 * socket if we couldn't charge it in the first place
1851 * as otherwise we uncharge the parent's filter.
1852 */
1853 if (!is_charged)
1854 RCU_INIT_POINTER(newsk->sk_filter, NULL);
94352d45 1855 sk_free_unlock_clone(newsk);
87d11ceb
ACM
1856 newsk = NULL;
1857 goto out;
1858 }
fa463497 1859 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
8f51dfc7
SF
1860
1861 if (bpf_sk_storage_clone(sk, newsk)) {
1862 sk_free_unlock_clone(newsk);
1863 newsk = NULL;
1864 goto out;
1865 }
87d11ceb
ACM
1866
1867 newsk->sk_err = 0;
e551c32d 1868 newsk->sk_err_soft = 0;
87d11ceb 1869 newsk->sk_priority = 0;
2c8c56e1 1870 newsk->sk_incoming_cpu = raw_smp_processor_id();
648845ab
TZ
1871 if (likely(newsk->sk_net_refcnt))
1872 sock_inuse_add(sock_net(newsk), 1);
d979a39d 1873
4dc6dc71
ED
1874 /*
1875 * Before updating sk_refcnt, we must commit prior changes to memory
1876 * (Documentation/RCU/rculist_nulls.txt for details)
1877 */
1878 smp_wmb();
41c6d650 1879 refcount_set(&newsk->sk_refcnt, 2);
87d11ceb
ACM
1880
1881 /*
1882 * Increment the counter in the same struct proto as the master
1883 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1884 * is the same as sk->sk_prot->socks, as this field was copied
1885 * with memcpy).
1886 *
1887 * This _changes_ the previous behaviour, where
1888 * tcp_create_openreq_child always was incrementing the
1889 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1890 * to be taken into account in all callers. -acme
1891 */
1892 sk_refcnt_debug_inc(newsk);
972692e0 1893 sk_set_socket(newsk, NULL);
c2f26e8f 1894 RCU_INIT_POINTER(newsk->sk_wq, NULL);
87d11ceb
ACM
1895
1896 if (newsk->sk_prot->sockets_allocated)
180d8cd9 1897 sk_sockets_allocated_inc(newsk);
704da560 1898
080a270f
HFS
1899 if (sock_needs_netstamp(sk) &&
1900 newsk->sk_flags & SK_FLAGS_TIMESTAMP)
704da560 1901 net_enable_timestamp();
87d11ceb
ACM
1902 }
1903out:
1904 return newsk;
1905}
e56c57d0 1906EXPORT_SYMBOL_GPL(sk_clone_lock);
87d11ceb 1907
94352d45
ACM
1908void sk_free_unlock_clone(struct sock *sk)
1909{
1910 /* It is still raw copy of parent, so invalidate
1911 * destructor and make plain sk_free() */
1912 sk->sk_destruct = NULL;
1913 bh_unlock_sock(sk);
1914 sk_free(sk);
1915}
1916EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
1917
9958089a
AK
1918void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1919{
d6a4e26a
ED
1920 u32 max_segs = 1;
1921
6bd4f355 1922 sk_dst_set(sk, dst);
0a6b2a1d 1923 sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
9958089a 1924 if (sk->sk_route_caps & NETIF_F_GSO)
4fcd6b99 1925 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
a465419b 1926 sk->sk_route_caps &= ~sk->sk_route_nocaps;
9958089a 1927 if (sk_can_gso(sk)) {
f70f250a 1928 if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
9958089a 1929 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
82cc1a7a 1930 } else {
9958089a 1931 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
82cc1a7a 1932 sk->sk_gso_max_size = dst->dev->gso_max_size;
d6a4e26a 1933 max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
82cc1a7a 1934 }
9958089a 1935 }
d6a4e26a 1936 sk->sk_gso_max_segs = max_segs;
9958089a
AK
1937}
1938EXPORT_SYMBOL_GPL(sk_setup_caps);
1939
1da177e4
LT
1940/*
1941 * Simple resource managers for sockets.
1942 */
1943
1944
4ec93edb
YH
1945/*
1946 * Write buffer destructor automatically called from kfree_skb.
1da177e4
LT
1947 */
1948void sock_wfree(struct sk_buff *skb)
1949{
1950 struct sock *sk = skb->sk;
d99927f4 1951 unsigned int len = skb->truesize;
1da177e4 1952
d99927f4
ED
1953 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1954 /*
1955 * Keep a reference on sk_wmem_alloc, this will be released
1956 * after sk_write_space() call
1957 */
14afee4b 1958 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
1da177e4 1959 sk->sk_write_space(sk);
d99927f4
ED
1960 len = 1;
1961 }
2b85a34e 1962 /*
d99927f4
ED
1963 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1964 * could not do because of in-flight packets
2b85a34e 1965 */
14afee4b 1966 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
2b85a34e 1967 __sk_free(sk);
1da177e4 1968}
2a91525c 1969EXPORT_SYMBOL(sock_wfree);
1da177e4 1970
1d2077ac
ED
1971/* This variant of sock_wfree() is used by TCP,
1972 * since it sets SOCK_USE_WRITE_QUEUE.
1973 */
1974void __sock_wfree(struct sk_buff *skb)
1975{
1976 struct sock *sk = skb->sk;
1977
14afee4b 1978 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
1d2077ac
ED
1979 __sk_free(sk);
1980}
1981
9e17f8a4
ED
1982void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1983{
1984 skb_orphan(skb);
1985 skb->sk = sk;
1986#ifdef CONFIG_INET
1987 if (unlikely(!sk_fullsock(sk))) {
1988 skb->destructor = sock_edemux;
1989 sock_hold(sk);
1990 return;
1991 }
1992#endif
1993 skb->destructor = sock_wfree;
1994 skb_set_hash_from_sk(skb, sk);
1995 /*
1996 * We used to take a refcount on sk, but following operation
1997 * is enough to guarantee sk_free() wont free this sock until
1998 * all in-flight packets are completed
1999 */
14afee4b 2000 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
9e17f8a4
ED
2001}
2002EXPORT_SYMBOL(skb_set_owner_w);
2003
41477662
JK
2004static bool can_skb_orphan_partial(const struct sk_buff *skb)
2005{
2006#ifdef CONFIG_TLS_DEVICE
2007 /* Drivers depend on in-order delivery for crypto offload,
2008 * partial orphan breaks out-of-order-OK logic.
2009 */
2010 if (skb->decrypted)
2011 return false;
2012#endif
2013 return (skb->destructor == sock_wfree ||
2014 (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
2015}
2016
1d2077ac
ED
2017/* This helper is used by netem, as it can hold packets in its
2018 * delay queue. We want to allow the owner socket to send more
2019 * packets, as if they were already TX completed by a typical driver.
2020 * But we also want to keep skb->sk set because some packet schedulers
f6ba8d33 2021 * rely on it (sch_fq for example).
1d2077ac 2022 */
f2f872f9
ED
2023void skb_orphan_partial(struct sk_buff *skb)
2024{
f6ba8d33 2025 if (skb_is_tcp_pure_ack(skb))
1d2077ac
ED
2026 return;
2027
41477662 2028 if (can_skb_orphan_partial(skb)) {
f6ba8d33
ED
2029 struct sock *sk = skb->sk;
2030
41c6d650 2031 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
14afee4b 2032 WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
f6ba8d33
ED
2033 skb->destructor = sock_efree;
2034 }
f2f872f9
ED
2035 } else {
2036 skb_orphan(skb);
2037 }
2038}
2039EXPORT_SYMBOL(skb_orphan_partial);
2040
4ec93edb
YH
2041/*
2042 * Read buffer destructor automatically called from kfree_skb.
1da177e4
LT
2043 */
2044void sock_rfree(struct sk_buff *skb)
2045{
2046 struct sock *sk = skb->sk;
d361fd59 2047 unsigned int len = skb->truesize;
1da177e4 2048
d361fd59
ED
2049 atomic_sub(len, &sk->sk_rmem_alloc);
2050 sk_mem_uncharge(sk, len);
1da177e4 2051}
2a91525c 2052EXPORT_SYMBOL(sock_rfree);
1da177e4 2053
7768eed8
OH
2054/*
2055 * Buffer destructor for skbs that are not used directly in read or write
2056 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
2057 */
62bccb8c
AD
2058void sock_efree(struct sk_buff *skb)
2059{
2060 sock_put(skb->sk);
2061}
2062EXPORT_SYMBOL(sock_efree);
2063
976d0201 2064kuid_t sock_i_uid(struct sock *sk)
1da177e4 2065{
976d0201 2066 kuid_t uid;
1da177e4 2067
f064af1e 2068 read_lock_bh(&sk->sk_callback_lock);
976d0201 2069 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
f064af1e 2070 read_unlock_bh(&sk->sk_callback_lock);
1da177e4
LT
2071 return uid;
2072}
2a91525c 2073EXPORT_SYMBOL(sock_i_uid);
1da177e4
LT
2074
2075unsigned long sock_i_ino(struct sock *sk)
2076{
2077 unsigned long ino;
2078
f064af1e 2079 read_lock_bh(&sk->sk_callback_lock);
1da177e4 2080 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
f064af1e 2081 read_unlock_bh(&sk->sk_callback_lock);
1da177e4
LT
2082 return ino;
2083}
2a91525c 2084EXPORT_SYMBOL(sock_i_ino);
1da177e4
LT
2085
2086/*
2087 * Allocate a skb from the socket's send buffer.
2088 */
86a76caf 2089struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
dd0fc66f 2090 gfp_t priority)
1da177e4 2091{
14afee4b 2092 if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
2a91525c 2093 struct sk_buff *skb = alloc_skb(size, priority);
1da177e4
LT
2094 if (skb) {
2095 skb_set_owner_w(skb, sk);
2096 return skb;
2097 }
2098 }
2099 return NULL;
2100}
2a91525c 2101EXPORT_SYMBOL(sock_wmalloc);
1da177e4 2102
98ba0bd5
WB
2103static void sock_ofree(struct sk_buff *skb)
2104{
2105 struct sock *sk = skb->sk;
2106
2107 atomic_sub(skb->truesize, &sk->sk_omem_alloc);
2108}
2109
2110struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
2111 gfp_t priority)
2112{
2113 struct sk_buff *skb;
2114
2115 /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
2116 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
2117 sysctl_optmem_max)
2118 return NULL;
2119
2120 skb = alloc_skb(size, priority);
2121 if (!skb)
2122 return NULL;
2123
2124 atomic_add(skb->truesize, &sk->sk_omem_alloc);
2125 skb->sk = sk;
2126 skb->destructor = sock_ofree;
2127 return skb;
2128}
2129
4ec93edb 2130/*
1da177e4 2131 * Allocate a memory block from the socket's option memory buffer.
4ec93edb 2132 */
dd0fc66f 2133void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1da177e4 2134{
95c96174 2135 if ((unsigned int)size <= sysctl_optmem_max &&
1da177e4
LT
2136 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
2137 void *mem;
2138 /* First do the add, to avoid the race if kmalloc
4ec93edb 2139 * might sleep.
1da177e4
LT
2140 */
2141 atomic_add(size, &sk->sk_omem_alloc);
2142 mem = kmalloc(size, priority);
2143 if (mem)
2144 return mem;
2145 atomic_sub(size, &sk->sk_omem_alloc);
2146 }
2147 return NULL;
2148}
2a91525c 2149EXPORT_SYMBOL(sock_kmalloc);
1da177e4 2150
79e88659
DB
2151/* Free an option memory block. Note, we actually want the inline
2152 * here as this allows gcc to detect the nullify and fold away the
2153 * condition entirely.
1da177e4 2154 */
79e88659
DB
2155static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
2156 const bool nullify)
1da177e4 2157{
e53da5fb
DM
2158 if (WARN_ON_ONCE(!mem))
2159 return;
79e88659
DB
2160 if (nullify)
2161 kzfree(mem);
2162 else
2163 kfree(mem);
1da177e4
LT
2164 atomic_sub(size, &sk->sk_omem_alloc);
2165}
79e88659
DB
2166
2167void sock_kfree_s(struct sock *sk, void *mem, int size)
2168{
2169 __sock_kfree_s(sk, mem, size, false);
2170}
2a91525c 2171EXPORT_SYMBOL(sock_kfree_s);
1da177e4 2172
79e88659
DB
2173void sock_kzfree_s(struct sock *sk, void *mem, int size)
2174{
2175 __sock_kfree_s(sk, mem, size, true);
2176}
2177EXPORT_SYMBOL(sock_kzfree_s);
2178
1da177e4
LT
2179/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
2180 I think, these locks should be removed for datagram sockets.
2181 */
2a91525c 2182static long sock_wait_for_wmem(struct sock *sk, long timeo)
1da177e4
LT
2183{
2184 DEFINE_WAIT(wait);
2185
9cd3e072 2186 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1da177e4
LT
2187 for (;;) {
2188 if (!timeo)
2189 break;
2190 if (signal_pending(current))
2191 break;
2192 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
aa395145 2193 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
14afee4b 2194 if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1da177e4
LT
2195 break;
2196 if (sk->sk_shutdown & SEND_SHUTDOWN)
2197 break;
2198 if (sk->sk_err)
2199 break;
2200 timeo = schedule_timeout(timeo);
2201 }
aa395145 2202 finish_wait(sk_sleep(sk), &wait);
1da177e4
LT
2203 return timeo;
2204}
2205
2206
2207/*
2208 * Generic send/receive buffer handlers
2209 */
2210
4cc7f68d
HX
2211struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
2212 unsigned long data_len, int noblock,
28d64271 2213 int *errcode, int max_page_order)
1da177e4 2214{
2e4e4410 2215 struct sk_buff *skb;
1da177e4
LT
2216 long timeo;
2217 int err;
2218
1da177e4 2219 timeo = sock_sndtimeo(sk, noblock);
2e4e4410 2220 for (;;) {
1da177e4
LT
2221 err = sock_error(sk);
2222 if (err != 0)
2223 goto failure;
2224
2225 err = -EPIPE;
2226 if (sk->sk_shutdown & SEND_SHUTDOWN)
2227 goto failure;
2228
2e4e4410
ED
2229 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
2230 break;
28d64271 2231
9cd3e072 2232 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2e4e4410
ED
2233 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2234 err = -EAGAIN;
2235 if (!timeo)
1da177e4 2236 goto failure;
2e4e4410
ED
2237 if (signal_pending(current))
2238 goto interrupted;
2239 timeo = sock_wait_for_wmem(sk, timeo);
1da177e4 2240 }
2e4e4410
ED
2241 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
2242 errcode, sk->sk_allocation);
2243 if (skb)
2244 skb_set_owner_w(skb, sk);
1da177e4
LT
2245 return skb;
2246
2247interrupted:
2248 err = sock_intr_errno(timeo);
2249failure:
2250 *errcode = err;
2251 return NULL;
2252}
4cc7f68d 2253EXPORT_SYMBOL(sock_alloc_send_pskb);
1da177e4 2254
4ec93edb 2255struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1da177e4
LT
2256 int noblock, int *errcode)
2257{
28d64271 2258 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
1da177e4 2259}
2a91525c 2260EXPORT_SYMBOL(sock_alloc_send_skb);
1da177e4 2261
39771b12
WB
2262int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
2263 struct sockcm_cookie *sockc)
2264{
3dd17e63
SHY
2265 u32 tsflags;
2266
39771b12
WB
2267 switch (cmsg->cmsg_type) {
2268 case SO_MARK:
2269 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2270 return -EPERM;
2271 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2272 return -EINVAL;
2273 sockc->mark = *(u32 *)CMSG_DATA(cmsg);
2274 break;
7f1bc6e9 2275 case SO_TIMESTAMPING_OLD:
3dd17e63
SHY
2276 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2277 return -EINVAL;
2278
2279 tsflags = *(u32 *)CMSG_DATA(cmsg);
2280 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
2281 return -EINVAL;
2282
2283 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
2284 sockc->tsflags |= tsflags;
2285 break;
80b14dee
RC
2286 case SCM_TXTIME:
2287 if (!sock_flag(sk, SOCK_TXTIME))
2288 return -EINVAL;
2289 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
2290 return -EINVAL;
2291 sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
2292 break;
779f1ede
SHY
2293 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
2294 case SCM_RIGHTS:
2295 case SCM_CREDENTIALS:
2296 break;
39771b12
WB
2297 default:
2298 return -EINVAL;
2299 }
2300 return 0;
2301}
2302EXPORT_SYMBOL(__sock_cmsg_send);
2303
f28ea365
EJ
2304int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
2305 struct sockcm_cookie *sockc)
2306{
2307 struct cmsghdr *cmsg;
39771b12 2308 int ret;
f28ea365
EJ
2309
2310 for_each_cmsghdr(cmsg, msg) {
2311 if (!CMSG_OK(msg, cmsg))
2312 return -EINVAL;
2313 if (cmsg->cmsg_level != SOL_SOCKET)
2314 continue;
39771b12
WB
2315 ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
2316 if (ret)
2317 return ret;
f28ea365
EJ
2318 }
2319 return 0;
2320}
2321EXPORT_SYMBOL(sock_cmsg_send);
2322
06044751
ED
2323static void sk_enter_memory_pressure(struct sock *sk)
2324{
2325 if (!sk->sk_prot->enter_memory_pressure)
2326 return;
2327
2328 sk->sk_prot->enter_memory_pressure(sk);
2329}
2330
2331static void sk_leave_memory_pressure(struct sock *sk)
2332{
2333 if (sk->sk_prot->leave_memory_pressure) {
2334 sk->sk_prot->leave_memory_pressure(sk);
2335 } else {
2336 unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
2337
503978ac
ED
2338 if (memory_pressure && READ_ONCE(*memory_pressure))
2339 WRITE_ONCE(*memory_pressure, 0);
06044751
ED
2340 }
2341}
2342
5640f768
ED
2343/* On 32bit arches, an skb frag is limited to 2^15 */
2344#define SKB_FRAG_PAGE_ORDER get_order(32768)
ce27ec60 2345DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
5640f768 2346
400dfd3a
ED
2347/**
2348 * skb_page_frag_refill - check that a page_frag contains enough room
2349 * @sz: minimum size of the fragment we want to get
2350 * @pfrag: pointer to page_frag
82d5e2b8 2351 * @gfp: priority for memory allocation
400dfd3a
ED
2352 *
2353 * Note: While this allocator tries to use high order pages, there is
2354 * no guarantee that allocations succeed. Therefore, @sz MUST be
2355 * less or equal than PAGE_SIZE.
2356 */
d9b2938a 2357bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
5640f768 2358{
5640f768 2359 if (pfrag->page) {
fe896d18 2360 if (page_ref_count(pfrag->page) == 1) {
5640f768
ED
2361 pfrag->offset = 0;
2362 return true;
2363 }
400dfd3a 2364 if (pfrag->offset + sz <= pfrag->size)
5640f768
ED
2365 return true;
2366 put_page(pfrag->page);
2367 }
2368
d9b2938a 2369 pfrag->offset = 0;
ce27ec60
ED
2370 if (SKB_FRAG_PAGE_ORDER &&
2371 !static_branch_unlikely(&net_high_order_alloc_disable_key)) {
d0164adc
MG
2372 /* Avoid direct reclaim but allow kswapd to wake */
2373 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
2374 __GFP_COMP | __GFP_NOWARN |
2375 __GFP_NORETRY,
d9b2938a 2376 SKB_FRAG_PAGE_ORDER);
5640f768 2377 if (likely(pfrag->page)) {
d9b2938a 2378 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
5640f768
ED
2379 return true;
2380 }
d9b2938a
ED
2381 }
2382 pfrag->page = alloc_page(gfp);
2383 if (likely(pfrag->page)) {
2384 pfrag->size = PAGE_SIZE;
2385 return true;
2386 }
400dfd3a
ED
2387 return false;
2388}
2389EXPORT_SYMBOL(skb_page_frag_refill);
2390
2391bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2392{
2393 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2394 return true;
2395
5640f768
ED
2396 sk_enter_memory_pressure(sk);
2397 sk_stream_moderate_sndbuf(sk);
2398 return false;
2399}
2400EXPORT_SYMBOL(sk_page_frag_refill);
2401
1da177e4 2402static void __lock_sock(struct sock *sk)
f39234d6
NK
2403 __releases(&sk->sk_lock.slock)
2404 __acquires(&sk->sk_lock.slock)
1da177e4
LT
2405{
2406 DEFINE_WAIT(wait);
2407
e71a4783 2408 for (;;) {
1da177e4
LT
2409 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2410 TASK_UNINTERRUPTIBLE);
2411 spin_unlock_bh(&sk->sk_lock.slock);
2412 schedule();
2413 spin_lock_bh(&sk->sk_lock.slock);
e71a4783 2414 if (!sock_owned_by_user(sk))
1da177e4
LT
2415 break;
2416 }
2417 finish_wait(&sk->sk_lock.wq, &wait);
2418}
2419
8873c064 2420void __release_sock(struct sock *sk)
f39234d6
NK
2421 __releases(&sk->sk_lock.slock)
2422 __acquires(&sk->sk_lock.slock)
1da177e4 2423{
5413d1ba 2424 struct sk_buff *skb, *next;
1da177e4 2425
5413d1ba 2426 while ((skb = sk->sk_backlog.head) != NULL) {
1da177e4 2427 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1da177e4 2428
5413d1ba 2429 spin_unlock_bh(&sk->sk_lock.slock);
1da177e4 2430
5413d1ba
ED
2431 do {
2432 next = skb->next;
e4cbb02a 2433 prefetch(next);
7fee226a 2434 WARN_ON_ONCE(skb_dst_is_noref(skb));
a8305bff 2435 skb_mark_not_on_list(skb);
c57943a1 2436 sk_backlog_rcv(sk, skb);
1da177e4 2437
5413d1ba 2438 cond_resched();
1da177e4
LT
2439
2440 skb = next;
2441 } while (skb != NULL);
2442
5413d1ba
ED
2443 spin_lock_bh(&sk->sk_lock.slock);
2444 }
8eae939f
ZY
2445
2446 /*
2447 * Doing the zeroing here guarantee we can not loop forever
2448 * while a wild producer attempts to flood us.
2449 */
2450 sk->sk_backlog.len = 0;
1da177e4
LT
2451}
2452
d41a69f1
ED
2453void __sk_flush_backlog(struct sock *sk)
2454{
2455 spin_lock_bh(&sk->sk_lock.slock);
2456 __release_sock(sk);
2457 spin_unlock_bh(&sk->sk_lock.slock);
2458}
2459
1da177e4
LT
2460/**
2461 * sk_wait_data - wait for data to arrive at sk_receive_queue
4dc3b16b
PP
2462 * @sk: sock to wait on
2463 * @timeo: for how long
dfbafc99 2464 * @skb: last skb seen on sk_receive_queue
1da177e4
LT
2465 *
2466 * Now socket state including sk->sk_err is changed only under lock,
2467 * hence we may omit checks after joining wait queue.
2468 * We check receive queue before schedule() only as optimization;
2469 * it is very likely that release_sock() added new data.
2470 */
dfbafc99 2471int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
1da177e4 2472{
d9dc8b0f 2473 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1da177e4 2474 int rc;
1da177e4 2475
d9dc8b0f 2476 add_wait_queue(sk_sleep(sk), &wait);
9cd3e072 2477 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
d9dc8b0f 2478 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
9cd3e072 2479 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
d9dc8b0f 2480 remove_wait_queue(sk_sleep(sk), &wait);
1da177e4
LT
2481 return rc;
2482}
1da177e4
LT
2483EXPORT_SYMBOL(sk_wait_data);
2484
3ab224be 2485/**
f8c3bf00 2486 * __sk_mem_raise_allocated - increase memory_allocated
3ab224be
HA
2487 * @sk: socket
2488 * @size: memory size to allocate
f8c3bf00 2489 * @amt: pages to allocate
3ab224be
HA
2490 * @kind: allocation type
2491 *
f8c3bf00 2492 * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
3ab224be 2493 */
f8c3bf00 2494int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
3ab224be
HA
2495{
2496 struct proto *prot = sk->sk_prot;
f8c3bf00 2497 long allocated = sk_memory_allocated_add(sk, amt);
d6f19938 2498 bool charged = true;
e805605c 2499
baac50bb 2500 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
d6f19938 2501 !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt)))
e805605c 2502 goto suppress_allocation;
3ab224be
HA
2503
2504 /* Under limit. */
e805605c 2505 if (allocated <= sk_prot_mem_limits(sk, 0)) {
180d8cd9 2506 sk_leave_memory_pressure(sk);
3ab224be
HA
2507 return 1;
2508 }
2509
e805605c
JW
2510 /* Under pressure. */
2511 if (allocated > sk_prot_mem_limits(sk, 1))
180d8cd9 2512 sk_enter_memory_pressure(sk);
3ab224be 2513
e805605c
JW
2514 /* Over hard limit. */
2515 if (allocated > sk_prot_mem_limits(sk, 2))
3ab224be
HA
2516 goto suppress_allocation;
2517
2518 /* guarantee minimum buffer size under pressure */
2519 if (kind == SK_MEM_RECV) {
a3dcaf17 2520 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
3ab224be 2521 return 1;
180d8cd9 2522
3ab224be 2523 } else { /* SK_MEM_SEND */
a3dcaf17
ED
2524 int wmem0 = sk_get_wmem0(sk, prot);
2525
3ab224be 2526 if (sk->sk_type == SOCK_STREAM) {
a3dcaf17 2527 if (sk->sk_wmem_queued < wmem0)
3ab224be 2528 return 1;
a3dcaf17 2529 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
3ab224be 2530 return 1;
a3dcaf17 2531 }
3ab224be
HA
2532 }
2533
180d8cd9 2534 if (sk_has_memory_pressure(sk)) {
5bf325a5 2535 u64 alloc;
1748376b 2536
180d8cd9 2537 if (!sk_under_memory_pressure(sk))
1748376b 2538 return 1;
180d8cd9
GC
2539 alloc = sk_sockets_allocated_read_positive(sk);
2540 if (sk_prot_mem_limits(sk, 2) > alloc *
3ab224be
HA
2541 sk_mem_pages(sk->sk_wmem_queued +
2542 atomic_read(&sk->sk_rmem_alloc) +
2543 sk->sk_forward_alloc))
2544 return 1;
2545 }
2546
2547suppress_allocation:
2548
2549 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2550 sk_stream_moderate_sndbuf(sk);
2551
2552 /* Fail only if socket is _under_ its sndbuf.
2553 * In this case we cannot block, so that we have to fail.
2554 */
2555 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2556 return 1;
2557 }
2558
d6f19938
YS
2559 if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
2560 trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
3847ce32 2561
0e90b31f 2562 sk_memory_allocated_sub(sk, amt);
180d8cd9 2563
baac50bb
JW
2564 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2565 mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
e805605c 2566
3ab224be
HA
2567 return 0;
2568}
f8c3bf00
PA
2569EXPORT_SYMBOL(__sk_mem_raise_allocated);
2570
2571/**
2572 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2573 * @sk: socket
2574 * @size: memory size to allocate
2575 * @kind: allocation type
2576 *
2577 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2578 * rmem allocation. This function assumes that protocols which have
2579 * memory_pressure use sk_wmem_queued as write buffer accounting.
2580 */
2581int __sk_mem_schedule(struct sock *sk, int size, int kind)
2582{
2583 int ret, amt = sk_mem_pages(size);
2584
2585 sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
2586 ret = __sk_mem_raise_allocated(sk, size, amt, kind);
2587 if (!ret)
2588 sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
2589 return ret;
2590}
3ab224be
HA
2591EXPORT_SYMBOL(__sk_mem_schedule);
2592
2593/**
f8c3bf00 2594 * __sk_mem_reduce_allocated - reclaim memory_allocated
3ab224be 2595 * @sk: socket
f8c3bf00
PA
2596 * @amount: number of quanta
2597 *
2598 * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
3ab224be 2599 */
f8c3bf00 2600void __sk_mem_reduce_allocated(struct sock *sk, int amount)
3ab224be 2601{
1a24e04e 2602 sk_memory_allocated_sub(sk, amount);
3ab224be 2603
baac50bb
JW
2604 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2605 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
e805605c 2606
180d8cd9
GC
2607 if (sk_under_memory_pressure(sk) &&
2608 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2609 sk_leave_memory_pressure(sk);
3ab224be 2610}
f8c3bf00
PA
2611EXPORT_SYMBOL(__sk_mem_reduce_allocated);
2612
2613/**
2614 * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
2615 * @sk: socket
2616 * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2617 */
2618void __sk_mem_reclaim(struct sock *sk, int amount)
2619{
2620 amount >>= SK_MEM_QUANTUM_SHIFT;
2621 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2622 __sk_mem_reduce_allocated(sk, amount);
2623}
3ab224be
HA
2624EXPORT_SYMBOL(__sk_mem_reclaim);
2625
627d2d6b 2626int sk_set_peek_off(struct sock *sk, int val)
2627{
627d2d6b 2628 sk->sk_peek_off = val;
2629 return 0;
2630}
2631EXPORT_SYMBOL_GPL(sk_set_peek_off);
3ab224be 2632
1da177e4
LT
2633/*
2634 * Set of default routines for initialising struct proto_ops when
2635 * the protocol does not support a particular function. In certain
2636 * cases where it makes no sense for a protocol to have a "do nothing"
2637 * function, some default processing is provided.
2638 */
2639
2640int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2641{
2642 return -EOPNOTSUPP;
2643}
2a91525c 2644EXPORT_SYMBOL(sock_no_bind);
1da177e4 2645
4ec93edb 2646int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1da177e4
LT
2647 int len, int flags)
2648{
2649 return -EOPNOTSUPP;
2650}
2a91525c 2651EXPORT_SYMBOL(sock_no_connect);
1da177e4
LT
2652
2653int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2654{
2655 return -EOPNOTSUPP;
2656}
2a91525c 2657EXPORT_SYMBOL(sock_no_socketpair);
1da177e4 2658
cdfbabfb
DH
2659int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
2660 bool kern)
1da177e4
LT
2661{
2662 return -EOPNOTSUPP;
2663}
2a91525c 2664EXPORT_SYMBOL(sock_no_accept);
1da177e4 2665
4ec93edb 2666int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
9b2c45d4 2667 int peer)
1da177e4
LT
2668{
2669 return -EOPNOTSUPP;
2670}
2a91525c 2671EXPORT_SYMBOL(sock_no_getname);
1da177e4 2672
1da177e4
LT
2673int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2674{
2675 return -EOPNOTSUPP;
2676}
2a91525c 2677EXPORT_SYMBOL(sock_no_ioctl);
1da177e4
LT
2678
2679int sock_no_listen(struct socket *sock, int backlog)
2680{
2681 return -EOPNOTSUPP;
2682}
2a91525c 2683EXPORT_SYMBOL(sock_no_listen);
1da177e4
LT
2684
2685int sock_no_shutdown(struct socket *sock, int how)
2686{
2687 return -EOPNOTSUPP;
2688}
2a91525c 2689EXPORT_SYMBOL(sock_no_shutdown);
1da177e4
LT
2690
2691int sock_no_setsockopt(struct socket *sock, int level, int optname,
b7058842 2692 char __user *optval, unsigned int optlen)
1da177e4
LT
2693{
2694 return -EOPNOTSUPP;
2695}
2a91525c 2696EXPORT_SYMBOL(sock_no_setsockopt);
1da177e4
LT
2697
2698int sock_no_getsockopt(struct socket *sock, int level, int optname,
2699 char __user *optval, int __user *optlen)
2700{
2701 return -EOPNOTSUPP;
2702}
2a91525c 2703EXPORT_SYMBOL(sock_no_getsockopt);
1da177e4 2704
1b784140 2705int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
1da177e4
LT
2706{
2707 return -EOPNOTSUPP;
2708}
2a91525c 2709EXPORT_SYMBOL(sock_no_sendmsg);
1da177e4 2710
306b13eb
TH
2711int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
2712{
2713 return -EOPNOTSUPP;
2714}
2715EXPORT_SYMBOL(sock_no_sendmsg_locked);
2716
1b784140
YX
2717int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2718 int flags)
1da177e4
LT
2719{
2720 return -EOPNOTSUPP;
2721}
2a91525c 2722EXPORT_SYMBOL(sock_no_recvmsg);
1da177e4
LT
2723
2724int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2725{
2726 /* Mirror missing mmap method error code */
2727 return -ENODEV;
2728}
2a91525c 2729EXPORT_SYMBOL(sock_no_mmap);
1da177e4
LT
2730
2731ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2732{
2733 ssize_t res;
2734 struct msghdr msg = {.msg_flags = flags};
2735 struct kvec iov;
2736 char *kaddr = kmap(page);
2737 iov.iov_base = kaddr + offset;
2738 iov.iov_len = size;
2739 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2740 kunmap(page);
2741 return res;
2742}
2a91525c 2743EXPORT_SYMBOL(sock_no_sendpage);
1da177e4 2744
306b13eb
TH
2745ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
2746 int offset, size_t size, int flags)
2747{
2748 ssize_t res;
2749 struct msghdr msg = {.msg_flags = flags};
2750 struct kvec iov;
2751 char *kaddr = kmap(page);
2752
2753 iov.iov_base = kaddr + offset;
2754 iov.iov_len = size;
2755 res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
2756 kunmap(page);
2757 return res;
2758}
2759EXPORT_SYMBOL(sock_no_sendpage_locked);
2760
1da177e4
LT
2761/*
2762 * Default Socket Callbacks
2763 */
2764
2765static void sock_def_wakeup(struct sock *sk)
2766{
43815482
ED
2767 struct socket_wq *wq;
2768
2769 rcu_read_lock();
2770 wq = rcu_dereference(sk->sk_wq);
1ce0bf50 2771 if (skwq_has_sleeper(wq))
43815482
ED
2772 wake_up_interruptible_all(&wq->wait);
2773 rcu_read_unlock();
1da177e4
LT
2774}
2775
2776static void sock_def_error_report(struct sock *sk)
2777{
43815482
ED
2778 struct socket_wq *wq;
2779
2780 rcu_read_lock();
2781 wq = rcu_dereference(sk->sk_wq);
1ce0bf50 2782 if (skwq_has_sleeper(wq))
a9a08845 2783 wake_up_interruptible_poll(&wq->wait, EPOLLERR);
8d8ad9d7 2784 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
43815482 2785 rcu_read_unlock();
1da177e4
LT
2786}
2787
676d2369 2788static void sock_def_readable(struct sock *sk)
1da177e4 2789{
43815482
ED
2790 struct socket_wq *wq;
2791
2792 rcu_read_lock();
2793 wq = rcu_dereference(sk->sk_wq);
1ce0bf50 2794 if (skwq_has_sleeper(wq))
a9a08845
LT
2795 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
2796 EPOLLRDNORM | EPOLLRDBAND);
8d8ad9d7 2797 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
43815482 2798 rcu_read_unlock();
1da177e4
LT
2799}
2800
2801static void sock_def_write_space(struct sock *sk)
2802{
43815482
ED
2803 struct socket_wq *wq;
2804
2805 rcu_read_lock();
1da177e4
LT
2806
2807 /* Do not wake up a writer until he can make "significant"
2808 * progress. --DaveM
2809 */
14afee4b 2810 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
43815482 2811 wq = rcu_dereference(sk->sk_wq);
1ce0bf50 2812 if (skwq_has_sleeper(wq))
a9a08845
LT
2813 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
2814 EPOLLWRNORM | EPOLLWRBAND);
1da177e4
LT
2815
2816 /* Should agree with poll, otherwise some programs break */
2817 if (sock_writeable(sk))
8d8ad9d7 2818 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1da177e4
LT
2819 }
2820
43815482 2821 rcu_read_unlock();
1da177e4
LT
2822}
2823
2824static void sock_def_destruct(struct sock *sk)
2825{
1da177e4
LT
2826}
2827
2828void sk_send_sigurg(struct sock *sk)
2829{
2830 if (sk->sk_socket && sk->sk_socket->file)
2831 if (send_sigurg(&sk->sk_socket->file->f_owner))
8d8ad9d7 2832 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
1da177e4 2833}
2a91525c 2834EXPORT_SYMBOL(sk_send_sigurg);
1da177e4
LT
2835
2836void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2837 unsigned long expires)
2838{
2839 if (!mod_timer(timer, expires))
2840 sock_hold(sk);
2841}
1da177e4
LT
2842EXPORT_SYMBOL(sk_reset_timer);
2843
2844void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2845{
25cc4ae9 2846 if (del_timer(timer))
1da177e4
LT
2847 __sock_put(sk);
2848}
1da177e4
LT
2849EXPORT_SYMBOL(sk_stop_timer);
2850
2851void sock_init_data(struct socket *sock, struct sock *sk)
2852{
581319c5 2853 sk_init_common(sk);
1da177e4
LT
2854 sk->sk_send_head = NULL;
2855
99767f27 2856 timer_setup(&sk->sk_timer, NULL, 0);
4ec93edb 2857
1da177e4
LT
2858 sk->sk_allocation = GFP_KERNEL;
2859 sk->sk_rcvbuf = sysctl_rmem_default;
2860 sk->sk_sndbuf = sysctl_wmem_default;
2861 sk->sk_state = TCP_CLOSE;
972692e0 2862 sk_set_socket(sk, sock);
1da177e4
LT
2863
2864 sock_set_flag(sk, SOCK_ZAPPED);
2865
e71a4783 2866 if (sock) {
1da177e4 2867 sk->sk_type = sock->type;
333f7909 2868 RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
1da177e4 2869 sock->sk = sk;
86741ec2
LC
2870 sk->sk_uid = SOCK_INODE(sock)->i_uid;
2871 } else {
c2f26e8f 2872 RCU_INIT_POINTER(sk->sk_wq, NULL);
86741ec2
LC
2873 sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0);
2874 }
1da177e4 2875
1da177e4 2876 rwlock_init(&sk->sk_callback_lock);
cdfbabfb
DH
2877 if (sk->sk_kern_sock)
2878 lockdep_set_class_and_name(
2879 &sk->sk_callback_lock,
2880 af_kern_callback_keys + sk->sk_family,
2881 af_family_kern_clock_key_strings[sk->sk_family]);
2882 else
2883 lockdep_set_class_and_name(
2884 &sk->sk_callback_lock,
443aef0e
PZ
2885 af_callback_keys + sk->sk_family,
2886 af_family_clock_key_strings[sk->sk_family]);
1da177e4
LT
2887
2888 sk->sk_state_change = sock_def_wakeup;
2889 sk->sk_data_ready = sock_def_readable;
2890 sk->sk_write_space = sock_def_write_space;
2891 sk->sk_error_report = sock_def_error_report;
2892 sk->sk_destruct = sock_def_destruct;
2893
5640f768
ED
2894 sk->sk_frag.page = NULL;
2895 sk->sk_frag.offset = 0;
ef64a54f 2896 sk->sk_peek_off = -1;
1da177e4 2897
109f6e39
EB
2898 sk->sk_peer_pid = NULL;
2899 sk->sk_peer_cred = NULL;
1da177e4
LT
2900 sk->sk_write_pending = 0;
2901 sk->sk_rcvlowat = 1;
2902 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2903 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2904
6c7c98ba 2905 sk->sk_stamp = SK_DEFAULT_STAMP;
3a0ed3e9
DD
2906#if BITS_PER_LONG==32
2907 seqlock_init(&sk->sk_stamp_seq);
2908#endif
52267790 2909 atomic_set(&sk->sk_zckey, 0);
1da177e4 2910
e0d1095a 2911#ifdef CONFIG_NET_RX_BUSY_POLL
06021292 2912 sk->sk_napi_id = 0;
64b0dc51 2913 sk->sk_ll_usec = sysctl_net_busy_read;
06021292
ET
2914#endif
2915
76a9ebe8
ED
2916 sk->sk_max_pacing_rate = ~0UL;
2917 sk->sk_pacing_rate = ~0UL;
3a9b76fd 2918 sk->sk_pacing_shift = 10;
70da268b 2919 sk->sk_incoming_cpu = -1;
c6345ce7
AN
2920
2921 sk_rx_queue_clear(sk);
4dc6dc71
ED
2922 /*
2923 * Before updating sk_refcnt, we must commit prior changes to memory
2924 * (Documentation/RCU/rculist_nulls.txt for details)
2925 */
2926 smp_wmb();
41c6d650 2927 refcount_set(&sk->sk_refcnt, 1);
33c732c3 2928 atomic_set(&sk->sk_drops, 0);
1da177e4 2929}
2a91525c 2930EXPORT_SYMBOL(sock_init_data);
1da177e4 2931
b5606c2d 2932void lock_sock_nested(struct sock *sk, int subclass)
1da177e4
LT
2933{
2934 might_sleep();
a5b5bb9a 2935 spin_lock_bh(&sk->sk_lock.slock);
d2e9117c 2936 if (sk->sk_lock.owned)
1da177e4 2937 __lock_sock(sk);
d2e9117c 2938 sk->sk_lock.owned = 1;
a5b5bb9a
IM
2939 spin_unlock(&sk->sk_lock.slock);
2940 /*
2941 * The sk_lock has mutex_lock() semantics here:
2942 */
fcc70d5f 2943 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
a5b5bb9a 2944 local_bh_enable();
1da177e4 2945}
fcc70d5f 2946EXPORT_SYMBOL(lock_sock_nested);
1da177e4 2947
b5606c2d 2948void release_sock(struct sock *sk)
1da177e4 2949{
a5b5bb9a 2950 spin_lock_bh(&sk->sk_lock.slock);
1da177e4
LT
2951 if (sk->sk_backlog.tail)
2952 __release_sock(sk);
46d3ceab 2953
c3f9b018
ED
2954 /* Warning : release_cb() might need to release sk ownership,
2955 * ie call sock_release_ownership(sk) before us.
2956 */
46d3ceab
ED
2957 if (sk->sk_prot->release_cb)
2958 sk->sk_prot->release_cb(sk);
2959
c3f9b018 2960 sock_release_ownership(sk);
a5b5bb9a
IM
2961 if (waitqueue_active(&sk->sk_lock.wq))
2962 wake_up(&sk->sk_lock.wq);
2963 spin_unlock_bh(&sk->sk_lock.slock);
1da177e4
LT
2964}
2965EXPORT_SYMBOL(release_sock);
2966
8a74ad60
ED
2967/**
2968 * lock_sock_fast - fast version of lock_sock
2969 * @sk: socket
2970 *
2971 * This version should be used for very small section, where process wont block
d651983d
MCC
2972 * return false if fast path is taken:
2973 *
8a74ad60 2974 * sk_lock.slock locked, owned = 0, BH disabled
d651983d
MCC
2975 *
2976 * return true if slow path is taken:
2977 *
8a74ad60
ED
2978 * sk_lock.slock unlocked, owned = 1, BH enabled
2979 */
2980bool lock_sock_fast(struct sock *sk)
2981{
2982 might_sleep();
2983 spin_lock_bh(&sk->sk_lock.slock);
2984
2985 if (!sk->sk_lock.owned)
2986 /*
2987 * Note : We must disable BH
2988 */
2989 return false;
2990
2991 __lock_sock(sk);
2992 sk->sk_lock.owned = 1;
2993 spin_unlock(&sk->sk_lock.slock);
2994 /*
2995 * The sk_lock has mutex_lock() semantics here:
2996 */
2997 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2998 local_bh_enable();
2999 return true;
3000}
3001EXPORT_SYMBOL(lock_sock_fast);
3002
c7cbdbf2
AB
3003int sock_gettstamp(struct socket *sock, void __user *userstamp,
3004 bool timeval, bool time32)
4ec93edb 3005{
c7cbdbf2
AB
3006 struct sock *sk = sock->sk;
3007 struct timespec64 ts;
9dae3497
YS
3008
3009 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
c7cbdbf2
AB
3010 ts = ktime_to_timespec64(sock_read_timestamp(sk));
3011 if (ts.tv_sec == -1)
1da177e4 3012 return -ENOENT;
c7cbdbf2 3013 if (ts.tv_sec == 0) {
3a0ed3e9 3014 ktime_t kt = ktime_get_real();
c7cbdbf2
AB
3015 sock_write_timestamp(sk, kt);;
3016 ts = ktime_to_timespec64(kt);
b7aa0bf7 3017 }
1da177e4 3018
c7cbdbf2
AB
3019 if (timeval)
3020 ts.tv_nsec /= 1000;
9dae3497 3021
c7cbdbf2
AB
3022#ifdef CONFIG_COMPAT_32BIT_TIME
3023 if (time32)
3024 return put_old_timespec32(&ts, userstamp);
3025#endif
3026#ifdef CONFIG_SPARC64
3027 /* beware of padding in sparc64 timeval */
3028 if (timeval && !in_compat_syscall()) {
3029 struct __kernel_old_timeval __user tv = {
c98f4822
SR
3030 .tv_sec = ts.tv_sec,
3031 .tv_usec = ts.tv_nsec,
c7cbdbf2 3032 };
c98f4822 3033 if (copy_to_user(userstamp, &tv, sizeof(tv)))
c7cbdbf2
AB
3034 return -EFAULT;
3035 return 0;
ae40eb1e 3036 }
c7cbdbf2
AB
3037#endif
3038 return put_timespec64(&ts, userstamp);
ae40eb1e 3039}
c7cbdbf2 3040EXPORT_SYMBOL(sock_gettstamp);
ae40eb1e 3041
20d49473 3042void sock_enable_timestamp(struct sock *sk, int flag)
4ec93edb 3043{
20d49473 3044 if (!sock_flag(sk, flag)) {
08e29af3
ED
3045 unsigned long previous_flags = sk->sk_flags;
3046
20d49473
PO
3047 sock_set_flag(sk, flag);
3048 /*
3049 * we just set one of the two flags which require net
3050 * time stamping, but time stamping might have been on
3051 * already because of the other one
3052 */
080a270f
HFS
3053 if (sock_needs_netstamp(sk) &&
3054 !(previous_flags & SK_FLAGS_TIMESTAMP))
20d49473 3055 net_enable_timestamp();
1da177e4
LT
3056 }
3057}
1da177e4 3058
cb820f8e
RC
3059int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
3060 int level, int type)
3061{
3062 struct sock_exterr_skb *serr;
364a9e93 3063 struct sk_buff *skb;
cb820f8e
RC
3064 int copied, err;
3065
3066 err = -EAGAIN;
364a9e93 3067 skb = sock_dequeue_err_skb(sk);
cb820f8e
RC
3068 if (skb == NULL)
3069 goto out;
3070
3071 copied = skb->len;
3072 if (copied > len) {
3073 msg->msg_flags |= MSG_TRUNC;
3074 copied = len;
3075 }
51f3d02b 3076 err = skb_copy_datagram_msg(skb, 0, msg, copied);
cb820f8e
RC
3077 if (err)
3078 goto out_free_skb;
3079
3080 sock_recv_timestamp(msg, sk, skb);
3081
3082 serr = SKB_EXT_ERR(skb);
3083 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
3084
3085 msg->msg_flags |= MSG_ERRQUEUE;
3086 err = copied;
3087
cb820f8e
RC
3088out_free_skb:
3089 kfree_skb(skb);
3090out:
3091 return err;
3092}
3093EXPORT_SYMBOL(sock_recv_errqueue);
3094
1da177e4
LT
3095/*
3096 * Get a socket option on an socket.
3097 *
3098 * FIX: POSIX 1003.1g is very ambiguous here. It states that
3099 * asynchronous errors should be reported by getsockopt. We assume
3100 * this means if you specify SO_ERROR (otherwise whats the point of it).
3101 */
3102int sock_common_getsockopt(struct socket *sock, int level, int optname,
3103 char __user *optval, int __user *optlen)
3104{
3105 struct sock *sk = sock->sk;
3106
3107 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
3108}
1da177e4
LT
3109EXPORT_SYMBOL(sock_common_getsockopt);
3110
3fdadf7d 3111#ifdef CONFIG_COMPAT
543d9cfe
ACM
3112int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
3113 char __user *optval, int __user *optlen)
3fdadf7d
DM
3114{
3115 struct sock *sk = sock->sk;
3116
1e51f951 3117 if (sk->sk_prot->compat_getsockopt != NULL)
543d9cfe
ACM
3118 return sk->sk_prot->compat_getsockopt(sk, level, optname,
3119 optval, optlen);
3fdadf7d
DM
3120 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
3121}
3122EXPORT_SYMBOL(compat_sock_common_getsockopt);
3123#endif
3124
1b784140
YX
3125int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3126 int flags)
1da177e4
LT
3127{
3128 struct sock *sk = sock->sk;
3129 int addr_len = 0;
3130 int err;
3131
1b784140 3132 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
1da177e4
LT
3133 flags & ~MSG_DONTWAIT, &addr_len);
3134 if (err >= 0)
3135 msg->msg_namelen = addr_len;
3136 return err;
3137}
1da177e4
LT
3138EXPORT_SYMBOL(sock_common_recvmsg);
3139
3140/*
3141 * Set socket options on an inet socket.
3142 */
3143int sock_common_setsockopt(struct socket *sock, int level, int optname,
b7058842 3144 char __user *optval, unsigned int optlen)
1da177e4
LT
3145{
3146 struct sock *sk = sock->sk;
3147
3148 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
3149}
1da177e4
LT
3150EXPORT_SYMBOL(sock_common_setsockopt);
3151
3fdadf7d 3152#ifdef CONFIG_COMPAT
543d9cfe 3153int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
b7058842 3154 char __user *optval, unsigned int optlen)
3fdadf7d
DM
3155{
3156 struct sock *sk = sock->sk;
3157
543d9cfe
ACM
3158 if (sk->sk_prot->compat_setsockopt != NULL)
3159 return sk->sk_prot->compat_setsockopt(sk, level, optname,
3160 optval, optlen);
3fdadf7d
DM
3161 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
3162}
3163EXPORT_SYMBOL(compat_sock_common_setsockopt);
3164#endif
3165
1da177e4
LT
3166void sk_common_release(struct sock *sk)
3167{
3168 if (sk->sk_prot->destroy)
3169 sk->sk_prot->destroy(sk);
3170
3171 /*
3172 * Observation: when sock_common_release is called, processes have
3173 * no access to socket. But net still has.
3174 * Step one, detach it from networking:
3175 *
3176 * A. Remove from hash tables.
3177 */
3178
3179 sk->sk_prot->unhash(sk);
3180
3181 /*
3182 * In this point socket cannot receive new packets, but it is possible
3183 * that some packets are in flight because some CPU runs receiver and
3184 * did hash table lookup before we unhashed socket. They will achieve
3185 * receive queue and will be purged by socket destructor.
3186 *
3187 * Also we still have packets pending on receive queue and probably,
3188 * our own packets waiting in device queues. sock_destroy will drain
3189 * receive queue, but transmitted packets will delay socket destruction
3190 * until the last reference will be released.
3191 */
3192
3193 sock_orphan(sk);
3194
3195 xfrm_sk_free_policy(sk);
3196
e6848976 3197 sk_refcnt_debug_release(sk);
5640f768 3198
1da177e4
LT
3199 sock_put(sk);
3200}
1da177e4
LT
3201EXPORT_SYMBOL(sk_common_release);
3202
a2d133b1
JH
3203void sk_get_meminfo(const struct sock *sk, u32 *mem)
3204{
3205 memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
3206
3207 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
ebb3b78d 3208 mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
a2d133b1
JH
3209 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
3210 mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
3211 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
3212 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
3213 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
70c26558 3214 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
a2d133b1
JH
3215 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
3216}
3217
13ff3d6f
PE
3218#ifdef CONFIG_PROC_FS
3219#define PROTO_INUSE_NR 64 /* should be enough for the first time */
1338d466
PE
3220struct prot_inuse {
3221 int val[PROTO_INUSE_NR];
3222};
13ff3d6f
PE
3223
3224static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
70ee1159 3225
70ee1159
PE
3226void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
3227{
08fc7f81 3228 __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
70ee1159
PE
3229}
3230EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
3231
3232int sock_prot_inuse_get(struct net *net, struct proto *prot)
3233{
3234 int cpu, idx = prot->inuse_idx;
3235 int res = 0;
3236
3237 for_each_possible_cpu(cpu)
08fc7f81 3238 res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
70ee1159
PE
3239
3240 return res >= 0 ? res : 0;
3241}
3242EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
3243
648845ab
TZ
3244static void sock_inuse_add(struct net *net, int val)
3245{
3246 this_cpu_add(*net->core.sock_inuse, val);
3247}
3248
3249int sock_inuse_get(struct net *net)
3250{
3251 int cpu, res = 0;
3252
3253 for_each_possible_cpu(cpu)
3254 res += *per_cpu_ptr(net->core.sock_inuse, cpu);
3255
3256 return res;
3257}
3258
3259EXPORT_SYMBOL_GPL(sock_inuse_get);
3260
2c8c1e72 3261static int __net_init sock_inuse_init_net(struct net *net)
70ee1159 3262{
08fc7f81 3263 net->core.prot_inuse = alloc_percpu(struct prot_inuse);
648845ab
TZ
3264 if (net->core.prot_inuse == NULL)
3265 return -ENOMEM;
3266
3267 net->core.sock_inuse = alloc_percpu(int);
3268 if (net->core.sock_inuse == NULL)
3269 goto out;
3270
3271 return 0;
3272
3273out:
3274 free_percpu(net->core.prot_inuse);
3275 return -ENOMEM;
70ee1159
PE
3276}
3277
2c8c1e72 3278static void __net_exit sock_inuse_exit_net(struct net *net)
70ee1159 3279{
08fc7f81 3280 free_percpu(net->core.prot_inuse);
648845ab 3281 free_percpu(net->core.sock_inuse);
70ee1159
PE
3282}
3283
3284static struct pernet_operations net_inuse_ops = {
3285 .init = sock_inuse_init_net,
3286 .exit = sock_inuse_exit_net,
3287};
3288
3289static __init int net_inuse_init(void)
3290{
3291 if (register_pernet_subsys(&net_inuse_ops))
3292 panic("Cannot initialize net inuse counters");
3293
3294 return 0;
3295}
3296
3297core_initcall(net_inuse_init);
13ff3d6f 3298
b45ce321 3299static int assign_proto_idx(struct proto *prot)
13ff3d6f
PE
3300{
3301 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3302
3303 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
e005d193 3304 pr_err("PROTO_INUSE_NR exhausted\n");
b45ce321 3305 return -ENOSPC;
13ff3d6f
PE
3306 }
3307
3308 set_bit(prot->inuse_idx, proto_inuse_idx);
b45ce321 3309 return 0;
13ff3d6f
PE
3310}
3311
3312static void release_proto_idx(struct proto *prot)
3313{
3314 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
3315 clear_bit(prot->inuse_idx, proto_inuse_idx);
3316}
3317#else
b45ce321 3318static inline int assign_proto_idx(struct proto *prot)
13ff3d6f 3319{
b45ce321 3320 return 0;
13ff3d6f
PE
3321}
3322
3323static inline void release_proto_idx(struct proto *prot)
3324{
3325}
648845ab
TZ
3326
3327static void sock_inuse_add(struct net *net, int val)
3328{
3329}
13ff3d6f
PE
3330#endif
3331
0159dfd3
ED
3332static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
3333{
3334 if (!rsk_prot)
3335 return;
3336 kfree(rsk_prot->slab_name);
3337 rsk_prot->slab_name = NULL;
adf78eda
JL
3338 kmem_cache_destroy(rsk_prot->slab);
3339 rsk_prot->slab = NULL;
0159dfd3
ED
3340}
3341
3342static int req_prot_init(const struct proto *prot)
3343{
3344 struct request_sock_ops *rsk_prot = prot->rsk_prot;
3345
3346 if (!rsk_prot)
3347 return 0;
3348
3349 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
3350 prot->name);
3351 if (!rsk_prot->slab_name)
3352 return -ENOMEM;
3353
3354 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
3355 rsk_prot->obj_size, 0,
e699e2c6
SB
3356 SLAB_ACCOUNT | prot->slab_flags,
3357 NULL);
0159dfd3
ED
3358
3359 if (!rsk_prot->slab) {
3360 pr_crit("%s: Can't create request sock SLAB cache!\n",
3361 prot->name);
3362 return -ENOMEM;
3363 }
3364 return 0;
3365}
3366
b733c007
PE
3367int proto_register(struct proto *prot, int alloc_slab)
3368{
b45ce321 3369 int ret = -ENOBUFS;
3370
1da177e4 3371 if (alloc_slab) {
30c2c9f1
DW
3372 prot->slab = kmem_cache_create_usercopy(prot->name,
3373 prot->obj_size, 0,
e699e2c6
SB
3374 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
3375 prot->slab_flags,
289a4860 3376 prot->useroffset, prot->usersize,
271b72c7 3377 NULL);
1da177e4
LT
3378
3379 if (prot->slab == NULL) {
e005d193
JP
3380 pr_crit("%s: Can't create sock SLAB cache!\n",
3381 prot->name);
60e7663d 3382 goto out;
1da177e4 3383 }
2e6599cb 3384
0159dfd3
ED
3385 if (req_prot_init(prot))
3386 goto out_free_request_sock_slab;
8feaf0c0 3387
6d6ee43e 3388 if (prot->twsk_prot != NULL) {
faf23422 3389 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
8feaf0c0 3390
7e56b5d6 3391 if (prot->twsk_prot->twsk_slab_name == NULL)
8feaf0c0
ACM
3392 goto out_free_request_sock_slab;
3393
6d6ee43e 3394 prot->twsk_prot->twsk_slab =
7e56b5d6 3395 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
6d6ee43e 3396 prot->twsk_prot->twsk_obj_size,
3ab5aee7 3397 0,
e699e2c6 3398 SLAB_ACCOUNT |
52db70dc 3399 prot->slab_flags,
20c2df83 3400 NULL);
6d6ee43e 3401 if (prot->twsk_prot->twsk_slab == NULL)
8feaf0c0
ACM
3402 goto out_free_timewait_sock_slab_name;
3403 }
1da177e4
LT
3404 }
3405
36b77a52 3406 mutex_lock(&proto_list_mutex);
b45ce321 3407 ret = assign_proto_idx(prot);
3408 if (ret) {
3409 mutex_unlock(&proto_list_mutex);
3410 goto out_free_timewait_sock_slab_name;
3411 }
1da177e4 3412 list_add(&prot->node, &proto_list);
36b77a52 3413 mutex_unlock(&proto_list_mutex);
b45ce321 3414 return ret;
b733c007 3415
8feaf0c0 3416out_free_timewait_sock_slab_name:
b45ce321 3417 if (alloc_slab && prot->twsk_prot)
3418 kfree(prot->twsk_prot->twsk_slab_name);
8feaf0c0 3419out_free_request_sock_slab:
b45ce321 3420 if (alloc_slab) {
3421 req_prot_cleanup(prot->rsk_prot);
0159dfd3 3422
b45ce321 3423 kmem_cache_destroy(prot->slab);
3424 prot->slab = NULL;
3425 }
b733c007 3426out:
b45ce321 3427 return ret;
1da177e4 3428}
1da177e4
LT
3429EXPORT_SYMBOL(proto_register);
3430
3431void proto_unregister(struct proto *prot)
3432{
36b77a52 3433 mutex_lock(&proto_list_mutex);
13ff3d6f 3434 release_proto_idx(prot);
0a3f4358 3435 list_del(&prot->node);
36b77a52 3436 mutex_unlock(&proto_list_mutex);
1da177e4 3437
adf78eda
JL
3438 kmem_cache_destroy(prot->slab);
3439 prot->slab = NULL;
1da177e4 3440
0159dfd3 3441 req_prot_cleanup(prot->rsk_prot);
2e6599cb 3442
6d6ee43e 3443 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
6d6ee43e 3444 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
7e56b5d6 3445 kfree(prot->twsk_prot->twsk_slab_name);
6d6ee43e 3446 prot->twsk_prot->twsk_slab = NULL;
8feaf0c0 3447 }
1da177e4 3448}
1da177e4
LT
3449EXPORT_SYMBOL(proto_unregister);
3450
bf2ae2e4
XL
3451int sock_load_diag_module(int family, int protocol)
3452{
3453 if (!protocol) {
3454 if (!sock_is_registered(family))
3455 return -ENOENT;
3456
3457 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
3458 NETLINK_SOCK_DIAG, family);
3459 }
3460
3461#ifdef CONFIG_INET
3462 if (family == AF_INET &&
c34c1287 3463 protocol != IPPROTO_RAW &&
bf2ae2e4
XL
3464 !rcu_access_pointer(inet_protos[protocol]))
3465 return -ENOENT;
3466#endif
3467
3468 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
3469 NETLINK_SOCK_DIAG, family, protocol);
3470}
3471EXPORT_SYMBOL(sock_load_diag_module);
3472
1da177e4 3473#ifdef CONFIG_PROC_FS
1da177e4 3474static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
36b77a52 3475 __acquires(proto_list_mutex)
1da177e4 3476{
36b77a52 3477 mutex_lock(&proto_list_mutex);
60f0438a 3478 return seq_list_start_head(&proto_list, *pos);
1da177e4
LT
3479}
3480
3481static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3482{
60f0438a 3483 return seq_list_next(v, &proto_list, pos);
1da177e4
LT
3484}
3485
3486static void proto_seq_stop(struct seq_file *seq, void *v)
36b77a52 3487 __releases(proto_list_mutex)
1da177e4 3488{
36b77a52 3489 mutex_unlock(&proto_list_mutex);
1da177e4
LT
3490}
3491
3492static char proto_method_implemented(const void *method)
3493{
3494 return method == NULL ? 'n' : 'y';
3495}
180d8cd9
GC
3496static long sock_prot_memory_allocated(struct proto *proto)
3497{
cb75a36c 3498 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
180d8cd9
GC
3499}
3500
7a512eb8 3501static const char *sock_prot_memory_pressure(struct proto *proto)
180d8cd9
GC
3502{
3503 return proto->memory_pressure != NULL ?
3504 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
3505}
1da177e4
LT
3506
3507static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
3508{
180d8cd9 3509
8d987e5c 3510 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
1da177e4
LT
3511 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
3512 proto->name,
3513 proto->obj_size,
14e943db 3514 sock_prot_inuse_get(seq_file_net(seq), proto),
180d8cd9
GC
3515 sock_prot_memory_allocated(proto),
3516 sock_prot_memory_pressure(proto),
1da177e4
LT
3517 proto->max_header,
3518 proto->slab == NULL ? "no" : "yes",
3519 module_name(proto->owner),
3520 proto_method_implemented(proto->close),
3521 proto_method_implemented(proto->connect),
3522 proto_method_implemented(proto->disconnect),
3523 proto_method_implemented(proto->accept),
3524 proto_method_implemented(proto->ioctl),
3525 proto_method_implemented(proto->init),
3526 proto_method_implemented(proto->destroy),
3527 proto_method_implemented(proto->shutdown),
3528 proto_method_implemented(proto->setsockopt),
3529 proto_method_implemented(proto->getsockopt),
3530 proto_method_implemented(proto->sendmsg),
3531 proto_method_implemented(proto->recvmsg),
3532 proto_method_implemented(proto->sendpage),
3533 proto_method_implemented(proto->bind),
3534 proto_method_implemented(proto->backlog_rcv),
3535 proto_method_implemented(proto->hash),
3536 proto_method_implemented(proto->unhash),
3537 proto_method_implemented(proto->get_port),
3538 proto_method_implemented(proto->enter_memory_pressure));
3539}
3540
3541static int proto_seq_show(struct seq_file *seq, void *v)
3542{
60f0438a 3543 if (v == &proto_list)
1da177e4
LT
3544 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3545 "protocol",
3546 "size",
3547 "sockets",
3548 "memory",
3549 "press",
3550 "maxhdr",
3551 "slab",
3552 "module",
3553 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3554 else
60f0438a 3555 proto_seq_printf(seq, list_entry(v, struct proto, node));
1da177e4
LT
3556 return 0;
3557}
3558
f690808e 3559static const struct seq_operations proto_seq_ops = {
1da177e4
LT
3560 .start = proto_seq_start,
3561 .next = proto_seq_next,
3562 .stop = proto_seq_stop,
3563 .show = proto_seq_show,
3564};
3565
14e943db
ED
3566static __net_init int proto_init_net(struct net *net)
3567{
c3506372
CH
3568 if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops,
3569 sizeof(struct seq_net_private)))
14e943db
ED
3570 return -ENOMEM;
3571
3572 return 0;
3573}
3574
3575static __net_exit void proto_exit_net(struct net *net)
3576{
ece31ffd 3577 remove_proc_entry("protocols", net->proc_net);
14e943db
ED
3578}
3579
3580
3581static __net_initdata struct pernet_operations proto_net_ops = {
3582 .init = proto_init_net,
3583 .exit = proto_exit_net,
1da177e4
LT
3584};
3585
3586static int __init proto_init(void)
3587{
14e943db 3588 return register_pernet_subsys(&proto_net_ops);
1da177e4
LT
3589}
3590
3591subsys_initcall(proto_init);
3592
3593#endif /* PROC_FS */
7db6b048
SS
3594
3595#ifdef CONFIG_NET_RX_BUSY_POLL
3596bool sk_busy_loop_end(void *p, unsigned long start_time)
3597{
3598 struct sock *sk = p;
3599
3600 return !skb_queue_empty(&sk->sk_receive_queue) ||
3601 sk_busy_loop_timeout(sk, start_time);
3602}
3603EXPORT_SYMBOL(sk_busy_loop_end);
3604#endif /* CONFIG_NET_RX_BUSY_POLL */