Merge tag 'nfs-for-6.10-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[linux-2.6-block.git] / fs / fcntl.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/fs/fcntl.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8#include <linux/syscalls.h>
9#include <linux/init.h>
10#include <linux/mm.h>
29930025 11#include <linux/sched/task.h>
1da177e4 12#include <linux/fs.h>
5970e15d 13#include <linux/filelock.h>
1da177e4 14#include <linux/file.h>
9f3acc31 15#include <linux/fdtable.h>
16f7e0fe 16#include <linux/capability.h>
1da177e4 17#include <linux/dnotify.h>
1da177e4
LT
18#include <linux/slab.h>
19#include <linux/module.h>
35f3d14d 20#include <linux/pipe_fs_i.h>
1da177e4
LT
21#include <linux/security.h>
22#include <linux/ptrace.h>
7ed20e1a 23#include <linux/signal.h>
ab2af1f5 24#include <linux/rcupdate.h>
b488893a 25#include <linux/pid_namespace.h>
1d151c33 26#include <linux/user_namespace.h>
5d752600 27#include <linux/memfd.h>
80f0cce6 28#include <linux/compat.h>
9eccd12c 29#include <linux/mount.h>
fe3944fb 30#include <linux/rw_hint.h>
1da177e4 31
cfe39442 32#include <linux/poll.h>
1da177e4 33#include <asm/siginfo.h>
7c0f6ba6 34#include <linux/uaccess.h>
1da177e4 35
76398425 36#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
1da177e4 37
bccb5c39 38static int setfl(int fd, struct file * filp, unsigned int arg)
1da177e4 39{
496ad9aa 40 struct inode * inode = file_inode(filp);
1da177e4
LT
41 int error = 0;
42
7d95c8f2 43 /*
44 * O_APPEND cannot be cleared if the file is marked as append-only
45 * and the file is open for write.
46 */
47 if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
1da177e4
LT
48 return -EPERM;
49
50 /* O_NOATIME can only be set by the owner or superuser */
51 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
01beba79 52 if (!inode_owner_or_capable(file_mnt_idmap(filp), inode))
1da177e4
LT
53 return -EPERM;
54
55 /* required for strict SunOS emulation */
56 if (O_NONBLOCK != O_NDELAY)
57 if (arg & O_NDELAY)
58 arg |= O_NONBLOCK;
59
0dbf5f20 60 /* Pipe packetized mode is controlled by O_DIRECT flag */
a2ad63da
N
61 if (!S_ISFIFO(inode->i_mode) &&
62 (arg & O_DIRECT) &&
63 !(filp->f_mode & FMODE_CAN_ODIRECT))
64 return -EINVAL;
1da177e4 65
72c2d531 66 if (filp->f_op->check_flags)
1da177e4
LT
67 error = filp->f_op->check_flags(arg);
68 if (error)
69 return error;
70
218d11a8 71 /*
76398425 72 * ->fasync() is responsible for setting the FASYNC bit.
218d11a8 73 */
72c2d531 74 if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) {
76398425
JC
75 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
76 if (error < 0)
77 goto out;
60aa4924
JC
78 if (error > 0)
79 error = 0;
1da177e4 80 }
db1dd4d3 81 spin_lock(&filp->f_lock);
1da177e4 82 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
164f4064 83 filp->f_iocb_flags = iocb_flags(filp);
db1dd4d3 84 spin_unlock(&filp->f_lock);
76398425 85
1da177e4 86 out:
1da177e4
LT
87 return error;
88}
89
609d7fa9 90static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
2f38d70f 91 int force)
1da177e4 92{
80e1e823 93 write_lock_irq(&filp->f_owner.lock);
1da177e4 94 if (force || !filp->f_owner.pid) {
609d7fa9
EB
95 put_pid(filp->f_owner.pid);
96 filp->f_owner.pid = get_pid(pid);
97 filp->f_owner.pid_type = type;
2f38d70f
ON
98
99 if (pid) {
100 const struct cred *cred = current_cred();
101 filp->f_owner.uid = cred->uid;
102 filp->f_owner.euid = cred->euid;
103 }
1da177e4 104 }
80e1e823 105 write_unlock_irq(&filp->f_owner.lock);
1da177e4
LT
106}
107
e0b93edd 108void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
609d7fa9 109 int force)
1da177e4 110{
e0b93edd 111 security_file_set_fowner(filp);
2f38d70f 112 f_modown(filp, pid, type, force);
1da177e4 113}
609d7fa9 114EXPORT_SYMBOL(__f_setown);
1da177e4 115
bccb5c39 116int f_setown(struct file *filp, int who, int force)
609d7fa9
EB
117{
118 enum pid_type type;
f7312735 119 struct pid *pid = NULL;
bccb5c39 120 int ret = 0;
f7312735 121
01919134 122 type = PIDTYPE_TGID;
609d7fa9 123 if (who < 0) {
fc3dc674
JS
124 /* avoid overflow below */
125 if (who == INT_MIN)
126 return -EINVAL;
127
609d7fa9
EB
128 type = PIDTYPE_PGID;
129 who = -who;
130 }
f7312735 131
609d7fa9 132 rcu_read_lock();
f7312735
JL
133 if (who) {
134 pid = find_vpid(who);
135 if (!pid)
136 ret = -ESRCH;
137 }
138
139 if (!ret)
140 __f_setown(filp, pid, type, force);
609d7fa9 141 rcu_read_unlock();
393cc3f5 142
f7312735 143 return ret;
609d7fa9 144}
1da177e4
LT
145EXPORT_SYMBOL(f_setown);
146
147void f_delown(struct file *filp)
148{
01919134 149 f_modown(filp, NULL, PIDTYPE_TGID, 1);
609d7fa9
EB
150}
151
152pid_t f_getown(struct file *filp)
153{
cc4a3f88 154 pid_t pid = 0;
f671a691
DCZX
155
156 read_lock_irq(&filp->f_owner.lock);
cc4a3f88
PT
157 rcu_read_lock();
158 if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) {
159 pid = pid_vnr(filp->f_owner.pid);
160 if (filp->f_owner.pid_type == PIDTYPE_PGID)
161 pid = -pid;
162 }
163 rcu_read_unlock();
f671a691 164 read_unlock_irq(&filp->f_owner.lock);
609d7fa9 165 return pid;
1da177e4
LT
166}
167
ba0a6c9f
PZ
168static int f_setown_ex(struct file *filp, unsigned long arg)
169{
63784dd0 170 struct f_owner_ex __user *owner_p = (void __user *)arg;
ba0a6c9f
PZ
171 struct f_owner_ex owner;
172 struct pid *pid;
173 int type;
174 int ret;
175
176 ret = copy_from_user(&owner, owner_p, sizeof(owner));
177 if (ret)
5b54470d 178 return -EFAULT;
ba0a6c9f
PZ
179
180 switch (owner.type) {
181 case F_OWNER_TID:
01919134 182 type = PIDTYPE_PID;
ba0a6c9f
PZ
183 break;
184
185 case F_OWNER_PID:
01919134 186 type = PIDTYPE_TGID;
ba0a6c9f
PZ
187 break;
188
978b4053 189 case F_OWNER_PGRP:
ba0a6c9f
PZ
190 type = PIDTYPE_PGID;
191 break;
192
193 default:
194 return -EINVAL;
195 }
196
197 rcu_read_lock();
198 pid = find_vpid(owner.pid);
199 if (owner.pid && !pid)
200 ret = -ESRCH;
201 else
e0b93edd 202 __f_setown(filp, pid, type, 1);
ba0a6c9f
PZ
203 rcu_read_unlock();
204
205 return ret;
206}
207
208static int f_getown_ex(struct file *filp, unsigned long arg)
209{
63784dd0 210 struct f_owner_ex __user *owner_p = (void __user *)arg;
cc4a3f88 211 struct f_owner_ex owner = {};
ba0a6c9f
PZ
212 int ret = 0;
213
f671a691 214 read_lock_irq(&filp->f_owner.lock);
cc4a3f88
PT
215 rcu_read_lock();
216 if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type))
217 owner.pid = pid_vnr(filp->f_owner.pid);
218 rcu_read_unlock();
ba0a6c9f 219 switch (filp->f_owner.pid_type) {
01919134 220 case PIDTYPE_PID:
ba0a6c9f
PZ
221 owner.type = F_OWNER_TID;
222 break;
223
01919134 224 case PIDTYPE_TGID:
ba0a6c9f
PZ
225 owner.type = F_OWNER_PID;
226 break;
227
228 case PIDTYPE_PGID:
978b4053 229 owner.type = F_OWNER_PGRP;
ba0a6c9f
PZ
230 break;
231
232 default:
233 WARN_ON(1);
234 ret = -EINVAL;
235 break;
236 }
f671a691 237 read_unlock_irq(&filp->f_owner.lock);
ba0a6c9f 238
5b54470d 239 if (!ret) {
ba0a6c9f 240 ret = copy_to_user(owner_p, &owner, sizeof(owner));
5b54470d
DC
241 if (ret)
242 ret = -EFAULT;
243 }
ba0a6c9f
PZ
244 return ret;
245}
246
1d151c33
CG
247#ifdef CONFIG_CHECKPOINT_RESTORE
248static int f_getowner_uids(struct file *filp, unsigned long arg)
249{
250 struct user_namespace *user_ns = current_user_ns();
63784dd0 251 uid_t __user *dst = (void __user *)arg;
1d151c33
CG
252 uid_t src[2];
253 int err;
254
f671a691 255 read_lock_irq(&filp->f_owner.lock);
1d151c33
CG
256 src[0] = from_kuid(user_ns, filp->f_owner.uid);
257 src[1] = from_kuid(user_ns, filp->f_owner.euid);
f671a691 258 read_unlock_irq(&filp->f_owner.lock);
1d151c33
CG
259
260 err = put_user(src[0], &dst[0]);
261 err |= put_user(src[1], &dst[1]);
262
263 return err;
264}
265#else
266static int f_getowner_uids(struct file *filp, unsigned long arg)
267{
268 return -EINVAL;
269}
270#endif
271
ec16b147 272static bool rw_hint_valid(u64 hint)
c75b1d94 273{
e769779c
BVA
274 BUILD_BUG_ON(WRITE_LIFE_NOT_SET != RWH_WRITE_LIFE_NOT_SET);
275 BUILD_BUG_ON(WRITE_LIFE_NONE != RWH_WRITE_LIFE_NONE);
276 BUILD_BUG_ON(WRITE_LIFE_SHORT != RWH_WRITE_LIFE_SHORT);
277 BUILD_BUG_ON(WRITE_LIFE_MEDIUM != RWH_WRITE_LIFE_MEDIUM);
278 BUILD_BUG_ON(WRITE_LIFE_LONG != RWH_WRITE_LIFE_LONG);
279 BUILD_BUG_ON(WRITE_LIFE_EXTREME != RWH_WRITE_LIFE_EXTREME);
280
c75b1d94 281 switch (hint) {
9a7f12ed 282 case RWH_WRITE_LIFE_NOT_SET:
c75b1d94
JA
283 case RWH_WRITE_LIFE_NONE:
284 case RWH_WRITE_LIFE_SHORT:
285 case RWH_WRITE_LIFE_MEDIUM:
286 case RWH_WRITE_LIFE_LONG:
287 case RWH_WRITE_LIFE_EXTREME:
288 return true;
289 default:
290 return false;
291 }
292}
293
1505ba06
BVA
294static long fcntl_get_rw_hint(struct file *file, unsigned int cmd,
295 unsigned long arg)
c75b1d94
JA
296{
297 struct inode *inode = file_inode(file);
e2003277 298 u64 __user *argp = (u64 __user *)arg;
1505ba06 299 u64 hint = READ_ONCE(inode->i_write_hint);
c75b1d94 300
1505ba06
BVA
301 if (copy_to_user(argp, &hint, sizeof(*argp)))
302 return -EFAULT;
303 return 0;
304}
c75b1d94 305
1505ba06
BVA
306static long fcntl_set_rw_hint(struct file *file, unsigned int cmd,
307 unsigned long arg)
308{
309 struct inode *inode = file_inode(file);
310 u64 __user *argp = (u64 __user *)arg;
311 u64 hint;
312
313 if (copy_from_user(&hint, argp, sizeof(hint)))
314 return -EFAULT;
315 if (!rw_hint_valid(hint))
c75b1d94 316 return -EINVAL;
1505ba06
BVA
317
318 WRITE_ONCE(inode->i_write_hint, hint);
319
ea7d8986
BVA
320 /*
321 * file->f_mapping->host may differ from inode. As an example,
322 * blkdev_open() modifies file->f_mapping.
323 */
324 if (file->f_mapping->host != inode)
325 WRITE_ONCE(file->f_mapping->host->i_write_hint, hint);
326
1505ba06 327 return 0;
c75b1d94
JA
328}
329
c62b758b
LT
330/* Is the file descriptor a dup of the file? */
331static long f_dupfd_query(int fd, struct file *filp)
332{
333 CLASS(fd_raw, f)(fd);
334
335 /*
336 * We can do the 'fdput()' immediately, as the only thing that
337 * matters is the pointer value which isn't changed by the fdput.
338 *
339 * Technically we didn't need a ref at all, and 'fdget()' was
340 * overkill, but given our lockless file pointer lookup, the
341 * alternatives are complicated.
342 */
343 return f.file == filp;
344}
345
1da177e4
LT
346static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
347 struct file *filp)
348{
a75d30c7 349 void __user *argp = (void __user *)arg;
bccb5c39 350 int argi = (int)arg;
a75d30c7 351 struct flock flock;
1da177e4
LT
352 long err = -EINVAL;
353
354 switch (cmd) {
355 case F_DUPFD:
bccb5c39 356 err = f_dupfd(argi, filp, 0);
fe17f22d 357 break;
22d2b35b 358 case F_DUPFD_CLOEXEC:
bccb5c39 359 err = f_dupfd(argi, filp, O_CLOEXEC);
1da177e4 360 break;
c62b758b
LT
361 case F_DUPFD_QUERY:
362 err = f_dupfd_query(argi, filp);
363 break;
1da177e4
LT
364 case F_GETFD:
365 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
366 break;
367 case F_SETFD:
368 err = 0;
bccb5c39 369 set_close_on_exec(fd, argi & FD_CLOEXEC);
1da177e4
LT
370 break;
371 case F_GETFL:
372 err = filp->f_flags;
373 break;
374 case F_SETFL:
bccb5c39 375 err = setfl(fd, filp, argi);
1da177e4 376 break;
5d50ffd7
JL
377#if BITS_PER_LONG != 32
378 /* 32-bit arches must use fcntl64() */
0d3f7a2d 379 case F_OFD_GETLK:
5d50ffd7 380#endif
1da177e4 381 case F_GETLK:
a75d30c7
CH
382 if (copy_from_user(&flock, argp, sizeof(flock)))
383 return -EFAULT;
384 err = fcntl_getlk(filp, cmd, &flock);
385 if (!err && copy_to_user(argp, &flock, sizeof(flock)))
386 return -EFAULT;
1da177e4 387 break;
5d50ffd7
JL
388#if BITS_PER_LONG != 32
389 /* 32-bit arches must use fcntl64() */
0d3f7a2d
JL
390 case F_OFD_SETLK:
391 case F_OFD_SETLKW:
df561f66 392 fallthrough;
e8865537 393#endif
1da177e4
LT
394 case F_SETLK:
395 case F_SETLKW:
a75d30c7
CH
396 if (copy_from_user(&flock, argp, sizeof(flock)))
397 return -EFAULT;
398 err = fcntl_setlk(fd, filp, cmd, &flock);
1da177e4
LT
399 break;
400 case F_GETOWN:
401 /*
402 * XXX If f_owner is a process group, the
403 * negative return value will get converted
404 * into an error. Oops. If we keep the
405 * current syscall conventions, the only way
406 * to fix this will be in libc.
407 */
609d7fa9 408 err = f_getown(filp);
1da177e4
LT
409 force_successful_syscall_return();
410 break;
411 case F_SETOWN:
bccb5c39 412 err = f_setown(filp, argi, 1);
1da177e4 413 break;
ba0a6c9f
PZ
414 case F_GETOWN_EX:
415 err = f_getown_ex(filp, arg);
416 break;
417 case F_SETOWN_EX:
418 err = f_setown_ex(filp, arg);
419 break;
1d151c33
CG
420 case F_GETOWNER_UIDS:
421 err = f_getowner_uids(filp, arg);
422 break;
1da177e4
LT
423 case F_GETSIG:
424 err = filp->f_owner.signum;
425 break;
426 case F_SETSIG:
427 /* arg == 0 restores default behaviour. */
bccb5c39 428 if (!valid_signal(argi)) {
1da177e4
LT
429 break;
430 }
431 err = 0;
bccb5c39 432 filp->f_owner.signum = argi;
1da177e4
LT
433 break;
434 case F_GETLEASE:
435 err = fcntl_getlease(filp);
436 break;
437 case F_SETLEASE:
bccb5c39 438 err = fcntl_setlease(fd, filp, argi);
1da177e4
LT
439 break;
440 case F_NOTIFY:
bccb5c39 441 err = fcntl_dirnotify(fd, filp, argi);
1da177e4 442 break;
35f3d14d
JA
443 case F_SETPIPE_SZ:
444 case F_GETPIPE_SZ:
bccb5c39 445 err = pipe_fcntl(filp, cmd, argi);
35f3d14d 446 break;
40e041a2
DH
447 case F_ADD_SEALS:
448 case F_GET_SEALS:
bccb5c39 449 err = memfd_fcntl(filp, cmd, argi);
40e041a2 450 break;
c75b1d94 451 case F_GET_RW_HINT:
1505ba06
BVA
452 err = fcntl_get_rw_hint(filp, cmd, arg);
453 break;
c75b1d94 454 case F_SET_RW_HINT:
1505ba06 455 err = fcntl_set_rw_hint(filp, cmd, arg);
c75b1d94 456 break;
1da177e4
LT
457 default:
458 break;
459 }
460 return err;
461}
462
1abf0c71
AV
463static int check_fcntl_cmd(unsigned cmd)
464{
465 switch (cmd) {
466 case F_DUPFD:
467 case F_DUPFD_CLOEXEC:
c62b758b 468 case F_DUPFD_QUERY:
1abf0c71
AV
469 case F_GETFD:
470 case F_SETFD:
471 case F_GETFL:
472 return 1;
473 }
474 return 0;
475}
476
a26eab24 477SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
1da177e4 478{
2903ff01 479 struct fd f = fdget_raw(fd);
1da177e4
LT
480 long err = -EBADF;
481
2903ff01 482 if (!f.file)
1da177e4
LT
483 goto out;
484
2903ff01 485 if (unlikely(f.file->f_mode & FMODE_PATH)) {
545ec2c7
AV
486 if (!check_fcntl_cmd(cmd))
487 goto out1;
1abf0c71
AV
488 }
489
2903ff01 490 err = security_file_fcntl(f.file, cmd, arg);
545ec2c7 491 if (!err)
2903ff01 492 err = do_fcntl(fd, cmd, arg, f.file);
1da177e4 493
545ec2c7 494out1:
2903ff01 495 fdput(f);
1da177e4
LT
496out:
497 return err;
498}
499
500#if BITS_PER_LONG == 32
a26eab24
HC
501SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
502 unsigned long, arg)
1da177e4 503{
a75d30c7 504 void __user *argp = (void __user *)arg;
2903ff01 505 struct fd f = fdget_raw(fd);
a75d30c7 506 struct flock64 flock;
545ec2c7 507 long err = -EBADF;
1da177e4 508
2903ff01 509 if (!f.file)
1da177e4
LT
510 goto out;
511
2903ff01 512 if (unlikely(f.file->f_mode & FMODE_PATH)) {
545ec2c7
AV
513 if (!check_fcntl_cmd(cmd))
514 goto out1;
1abf0c71
AV
515 }
516
2903ff01 517 err = security_file_fcntl(f.file, cmd, arg);
545ec2c7
AV
518 if (err)
519 goto out1;
1da177e4
LT
520
521 switch (cmd) {
5d50ffd7 522 case F_GETLK64:
0d3f7a2d 523 case F_OFD_GETLK:
a75d30c7
CH
524 err = -EFAULT;
525 if (copy_from_user(&flock, argp, sizeof(flock)))
526 break;
527 err = fcntl_getlk64(f.file, cmd, &flock);
528 if (!err && copy_to_user(argp, &flock, sizeof(flock)))
529 err = -EFAULT;
5d50ffd7
JL
530 break;
531 case F_SETLK64:
532 case F_SETLKW64:
0d3f7a2d
JL
533 case F_OFD_SETLK:
534 case F_OFD_SETLKW:
a75d30c7
CH
535 err = -EFAULT;
536 if (copy_from_user(&flock, argp, sizeof(flock)))
537 break;
538 err = fcntl_setlk64(fd, f.file, cmd, &flock);
5d50ffd7
JL
539 break;
540 default:
541 err = do_fcntl(fd, cmd, arg, f.file);
542 break;
1da177e4 543 }
545ec2c7 544out1:
2903ff01 545 fdput(f);
1da177e4
LT
546out:
547 return err;
548}
549#endif
550
80f0cce6 551#ifdef CONFIG_COMPAT
8c6657cb 552/* careful - don't use anywhere else */
b59eea55
LT
553#define copy_flock_fields(dst, src) \
554 (dst)->l_type = (src)->l_type; \
555 (dst)->l_whence = (src)->l_whence; \
556 (dst)->l_start = (src)->l_start; \
557 (dst)->l_len = (src)->l_len; \
558 (dst)->l_pid = (src)->l_pid;
559
560static int get_compat_flock(struct flock *kfl, const struct compat_flock __user *ufl)
80f0cce6 561{
8c6657cb
AV
562 struct compat_flock fl;
563
564 if (copy_from_user(&fl, ufl, sizeof(struct compat_flock)))
80f0cce6 565 return -EFAULT;
b59eea55 566 copy_flock_fields(kfl, &fl);
80f0cce6
AV
567 return 0;
568}
569
b59eea55 570static int get_compat_flock64(struct flock *kfl, const struct compat_flock64 __user *ufl)
80f0cce6 571{
8c6657cb
AV
572 struct compat_flock64 fl;
573
574 if (copy_from_user(&fl, ufl, sizeof(struct compat_flock64)))
80f0cce6 575 return -EFAULT;
b59eea55 576 copy_flock_fields(kfl, &fl);
80f0cce6
AV
577 return 0;
578}
579
b59eea55 580static int put_compat_flock(const struct flock *kfl, struct compat_flock __user *ufl)
80f0cce6 581{
8c6657cb
AV
582 struct compat_flock fl;
583
584 memset(&fl, 0, sizeof(struct compat_flock));
b59eea55 585 copy_flock_fields(&fl, kfl);
8c6657cb 586 if (copy_to_user(ufl, &fl, sizeof(struct compat_flock)))
80f0cce6
AV
587 return -EFAULT;
588 return 0;
589}
80f0cce6 590
b59eea55 591static int put_compat_flock64(const struct flock *kfl, struct compat_flock64 __user *ufl)
80f0cce6 592{
8c6657cb
AV
593 struct compat_flock64 fl;
594
4d2dc2cc
JL
595 BUILD_BUG_ON(sizeof(kfl->l_start) > sizeof(ufl->l_start));
596 BUILD_BUG_ON(sizeof(kfl->l_len) > sizeof(ufl->l_len));
597
8c6657cb 598 memset(&fl, 0, sizeof(struct compat_flock64));
b59eea55 599 copy_flock_fields(&fl, kfl);
8c6657cb 600 if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64)))
80f0cce6
AV
601 return -EFAULT;
602 return 0;
603}
8c6657cb 604#undef copy_flock_fields
80f0cce6
AV
605
606static unsigned int
607convert_fcntl_cmd(unsigned int cmd)
608{
609 switch (cmd) {
610 case F_GETLK64:
611 return F_GETLK;
612 case F_SETLK64:
613 return F_SETLK;
614 case F_SETLKW64:
615 return F_SETLKW;
616 }
617
618 return cmd;
619}
620
94073ad7
CH
621/*
622 * GETLK was successful and we need to return the data, but it needs to fit in
623 * the compat structure.
624 * l_start shouldn't be too big, unless the original start + end is greater than
625 * COMPAT_OFF_T_MAX, in which case the app was asking for trouble, so we return
626 * -EOVERFLOW in that case. l_len could be too big, in which case we just
627 * truncate it, and only allow the app to see that part of the conflicting lock
628 * that might make sense to it anyway
629 */
630static int fixup_compat_flock(struct flock *flock)
631{
632 if (flock->l_start > COMPAT_OFF_T_MAX)
633 return -EOVERFLOW;
634 if (flock->l_len > COMPAT_OFF_T_MAX)
635 flock->l_len = COMPAT_OFF_T_MAX;
636 return 0;
637}
638
e02af2ff
DB
639static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
640 compat_ulong_t arg)
80f0cce6 641{
94073ad7
CH
642 struct fd f = fdget_raw(fd);
643 struct flock flock;
644 long err = -EBADF;
645
646 if (!f.file)
647 return err;
648
649 if (unlikely(f.file->f_mode & FMODE_PATH)) {
650 if (!check_fcntl_cmd(cmd))
651 goto out_put;
652 }
653
654 err = security_file_fcntl(f.file, cmd, arg);
655 if (err)
656 goto out_put;
80f0cce6
AV
657
658 switch (cmd) {
659 case F_GETLK:
94073ad7
CH
660 err = get_compat_flock(&flock, compat_ptr(arg));
661 if (err)
662 break;
663 err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
664 if (err)
665 break;
666 err = fixup_compat_flock(&flock);
9280a601
JL
667 if (!err)
668 err = put_compat_flock(&flock, compat_ptr(arg));
94073ad7
CH
669 break;
670 case F_GETLK64:
671 case F_OFD_GETLK:
672 err = get_compat_flock64(&flock, compat_ptr(arg));
673 if (err)
674 break;
675 err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
4d2dc2cc
JL
676 if (!err)
677 err = put_compat_flock64(&flock, compat_ptr(arg));
94073ad7 678 break;
80f0cce6
AV
679 case F_SETLK:
680 case F_SETLKW:
94073ad7
CH
681 err = get_compat_flock(&flock, compat_ptr(arg));
682 if (err)
80f0cce6 683 break;
94073ad7 684 err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
80f0cce6 685 break;
80f0cce6
AV
686 case F_SETLK64:
687 case F_SETLKW64:
80f0cce6
AV
688 case F_OFD_SETLK:
689 case F_OFD_SETLKW:
94073ad7
CH
690 err = get_compat_flock64(&flock, compat_ptr(arg));
691 if (err)
80f0cce6 692 break;
94073ad7 693 err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
80f0cce6 694 break;
80f0cce6 695 default:
94073ad7 696 err = do_fcntl(fd, cmd, arg, f.file);
80f0cce6
AV
697 break;
698 }
94073ad7
CH
699out_put:
700 fdput(f);
701 return err;
80f0cce6
AV
702}
703
e02af2ff
DB
704COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
705 compat_ulong_t, arg)
706{
707 return do_compat_fcntl64(fd, cmd, arg);
708}
709
80f0cce6
AV
710COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
711 compat_ulong_t, arg)
712{
713 switch (cmd) {
714 case F_GETLK64:
715 case F_SETLK64:
716 case F_SETLKW64:
717 case F_OFD_GETLK:
718 case F_OFD_SETLK:
719 case F_OFD_SETLKW:
720 return -EINVAL;
721 }
e02af2ff 722 return do_compat_fcntl64(fd, cmd, arg);
80f0cce6
AV
723}
724#endif
725
1da177e4
LT
726/* Table to convert sigio signal codes into poll band bitmaps */
727
5dc533c6 728static const __poll_t band_table[NSIGPOLL] = {
a9a08845
LT
729 EPOLLIN | EPOLLRDNORM, /* POLL_IN */
730 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND, /* POLL_OUT */
731 EPOLLIN | EPOLLRDNORM | EPOLLMSG, /* POLL_MSG */
732 EPOLLERR, /* POLL_ERR */
733 EPOLLPRI | EPOLLRDBAND, /* POLL_PRI */
734 EPOLLHUP | EPOLLERR /* POLL_HUP */
1da177e4
LT
735};
736
737static inline int sigio_perm(struct task_struct *p,
738 struct fown_struct *fown, int sig)
739{
c69e8d9c
DH
740 const struct cred *cred;
741 int ret;
742
743 rcu_read_lock();
744 cred = __task_cred(p);
8e96e3b7
EB
745 ret = ((uid_eq(fown->euid, GLOBAL_ROOT_UID) ||
746 uid_eq(fown->euid, cred->suid) || uid_eq(fown->euid, cred->uid) ||
747 uid_eq(fown->uid, cred->suid) || uid_eq(fown->uid, cred->uid)) &&
c69e8d9c
DH
748 !security_file_send_sigiotask(p, fown, sig));
749 rcu_read_unlock();
750 return ret;
1da177e4
LT
751}
752
753static void send_sigio_to_task(struct task_struct *p,
8eeee4e2 754 struct fown_struct *fown,
9c2db007 755 int fd, int reason, enum pid_type type)
1da177e4 756{
8eeee4e2
ON
757 /*
758 * F_SETSIG can change ->signum lockless in parallel, make
759 * sure we read it once and use the same value throughout.
760 */
6aa7de05 761 int signum = READ_ONCE(fown->signum);
8eeee4e2
ON
762
763 if (!sigio_perm(p, fown, signum))
1da177e4
LT
764 return;
765
8eeee4e2 766 switch (signum) {
0a68ff5e
KC
767 default: {
768 kernel_siginfo_t si;
769
1da177e4
LT
770 /* Queue a rt signal with the appropriate fd as its
771 value. We use SI_SIGIO as the source, not
772 SI_KERNEL, since kernel signals always get
773 delivered even if we can't queue. Failure to
774 queue in this case _should_ be reported; we fall
775 back to SIGIO in that case. --sct */
faf1f22b 776 clear_siginfo(&si);
8eeee4e2 777 si.si_signo = signum;
1da177e4
LT
778 si.si_errno = 0;
779 si.si_code = reason;
d08477aa
EB
780 /*
781 * Posix definies POLL_IN and friends to be signal
782 * specific si_codes for SIG_POLL. Linux extended
783 * these si_codes to other signals in a way that is
784 * ambiguous if other signals also have signal
785 * specific si_codes. In that case use SI_SIGIO instead
786 * to remove the ambiguity.
787 */
54640d23 788 if ((signum != SIGPOLL) && sig_specific_sicodes(signum))
d08477aa
EB
789 si.si_code = SI_SIGIO;
790
1da177e4
LT
791 /* Make sure we are called with one of the POLL_*
792 reasons, otherwise we could leak kernel stack into
793 userspace. */
d08477aa 794 BUG_ON((reason < POLL_IN) || ((reason - POLL_IN) >= NSIGPOLL));
1da177e4
LT
795 if (reason - POLL_IN >= NSIGPOLL)
796 si.si_band = ~0L;
797 else
c71d227f 798 si.si_band = mangle_poll(band_table[reason - POLL_IN]);
1da177e4 799 si.si_fd = fd;
40b3b025 800 if (!do_send_sig_info(signum, &si, p, type))
1da177e4 801 break;
0a68ff5e 802 }
df561f66 803 fallthrough; /* fall back on the old plain SIGIO signal */
1da177e4 804 case 0:
40b3b025 805 do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type);
1da177e4
LT
806 }
807}
808
809void send_sigio(struct fown_struct *fown, int fd, int band)
810{
811 struct task_struct *p;
609d7fa9 812 enum pid_type type;
8d1ddb5e 813 unsigned long flags;
609d7fa9 814 struct pid *pid;
1da177e4 815
8d1ddb5e 816 read_lock_irqsave(&fown->lock, flags);
ba0a6c9f 817
609d7fa9 818 type = fown->pid_type;
1da177e4
LT
819 pid = fown->pid;
820 if (!pid)
821 goto out_unlock_fown;
01919134
EB
822
823 if (type <= PIDTYPE_TGID) {
824 rcu_read_lock();
825 p = pid_task(pid, PIDTYPE_PID);
84fe4cc0
EB
826 if (p)
827 send_sigio_to_task(p, fown, fd, band, type);
01919134
EB
828 rcu_read_unlock();
829 } else {
830 read_lock(&tasklist_lock);
831 do_each_pid_task(pid, type, p) {
9c2db007 832 send_sigio_to_task(p, fown, fd, band, type);
01919134
EB
833 } while_each_pid_task(pid, type, p);
834 read_unlock(&tasklist_lock);
835 }
1da177e4 836 out_unlock_fown:
8d1ddb5e 837 read_unlock_irqrestore(&fown->lock, flags);
1da177e4
LT
838}
839
840static void send_sigurg_to_task(struct task_struct *p,
9c2db007 841 struct fown_struct *fown, enum pid_type type)
1da177e4
LT
842{
843 if (sigio_perm(p, fown, SIGURG))
40b3b025 844 do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, type);
1da177e4
LT
845}
846
847int send_sigurg(struct fown_struct *fown)
848{
849 struct task_struct *p;
609d7fa9
EB
850 enum pid_type type;
851 struct pid *pid;
8d1ddb5e 852 unsigned long flags;
609d7fa9 853 int ret = 0;
1da177e4 854
8d1ddb5e 855 read_lock_irqsave(&fown->lock, flags);
ba0a6c9f 856
609d7fa9 857 type = fown->pid_type;
1da177e4
LT
858 pid = fown->pid;
859 if (!pid)
860 goto out_unlock_fown;
861
862 ret = 1;
01919134
EB
863
864 if (type <= PIDTYPE_TGID) {
865 rcu_read_lock();
866 p = pid_task(pid, PIDTYPE_PID);
84fe4cc0
EB
867 if (p)
868 send_sigurg_to_task(p, fown, type);
01919134
EB
869 rcu_read_unlock();
870 } else {
871 read_lock(&tasklist_lock);
872 do_each_pid_task(pid, type, p) {
9c2db007 873 send_sigurg_to_task(p, fown, type);
01919134
EB
874 } while_each_pid_task(pid, type, p);
875 read_unlock(&tasklist_lock);
876 }
1da177e4 877 out_unlock_fown:
8d1ddb5e 878 read_unlock_irqrestore(&fown->lock, flags);
1da177e4
LT
879 return ret;
880}
881
989a2979 882static DEFINE_SPINLOCK(fasync_lock);
68279f9c 883static struct kmem_cache *fasync_cache __ro_after_init;
1da177e4
LT
884
885/*
53281b6d
LT
886 * Remove a fasync entry. If successfully removed, return
887 * positive and clear the FASYNC flag. If no entry exists,
888 * do nothing and return 0.
889 *
890 * NOTE! It is very important that the FASYNC flag always
891 * match the state "is the filp on a fasync list".
892 *
1da177e4 893 */
f7347ce4 894int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
1da177e4
LT
895{
896 struct fasync_struct *fa, **fp;
1da177e4
LT
897 int result = 0;
898
53281b6d 899 spin_lock(&filp->f_lock);
989a2979 900 spin_lock(&fasync_lock);
53281b6d
LT
901 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
902 if (fa->fa_file != filp)
903 continue;
989a2979 904
7a107c0f 905 write_lock_irq(&fa->fa_lock);
989a2979 906 fa->fa_file = NULL;
7a107c0f 907 write_unlock_irq(&fa->fa_lock);
989a2979 908
53281b6d 909 *fp = fa->fa_next;
d0089603 910 kfree_rcu(fa, fa_rcu);
53281b6d
LT
911 filp->f_flags &= ~FASYNC;
912 result = 1;
913 break;
1da177e4 914 }
989a2979 915 spin_unlock(&fasync_lock);
53281b6d
LT
916 spin_unlock(&filp->f_lock);
917 return result;
918}
919
f7347ce4
LT
920struct fasync_struct *fasync_alloc(void)
921{
922 return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
923}
924
53281b6d 925/*
f7347ce4
LT
926 * NOTE! This can be used only for unused fasync entries:
927 * entries that actually got inserted on the fasync list
928 * need to be released by rcu - see fasync_remove_entry.
53281b6d 929 */
f7347ce4 930void fasync_free(struct fasync_struct *new)
53281b6d 931{
f7347ce4
LT
932 kmem_cache_free(fasync_cache, new);
933}
53281b6d 934
f7347ce4
LT
935/*
936 * Insert a new entry into the fasync list. Return the pointer to the
937 * old one if we didn't use the new one.
55f335a8
LT
938 *
939 * NOTE! It is very important that the FASYNC flag always
940 * match the state "is the filp on a fasync list".
f7347ce4
LT
941 */
942struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
943{
944 struct fasync_struct *fa, **fp;
4a6a4499 945
4a6a4499 946 spin_lock(&filp->f_lock);
989a2979 947 spin_lock(&fasync_lock);
1da177e4 948 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
53281b6d
LT
949 if (fa->fa_file != filp)
950 continue;
989a2979 951
7a107c0f 952 write_lock_irq(&fa->fa_lock);
53281b6d 953 fa->fa_fd = fd;
7a107c0f 954 write_unlock_irq(&fa->fa_lock);
53281b6d 955 goto out;
1da177e4
LT
956 }
957
7a107c0f 958 rwlock_init(&new->fa_lock);
53281b6d
LT
959 new->magic = FASYNC_MAGIC;
960 new->fa_file = filp;
961 new->fa_fd = fd;
962 new->fa_next = *fapp;
989a2979 963 rcu_assign_pointer(*fapp, new);
53281b6d
LT
964 filp->f_flags |= FASYNC;
965
1da177e4 966out:
989a2979 967 spin_unlock(&fasync_lock);
4a6a4499 968 spin_unlock(&filp->f_lock);
f7347ce4
LT
969 return fa;
970}
971
972/*
973 * Add a fasync entry. Return negative on error, positive if
974 * added, and zero if did nothing but change an existing one.
f7347ce4
LT
975 */
976static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
977{
978 struct fasync_struct *new;
979
980 new = fasync_alloc();
981 if (!new)
982 return -ENOMEM;
983
984 /*
985 * fasync_insert_entry() returns the old (update) entry if
986 * it existed.
987 *
988 * So free the (unused) new entry and return 0 to let the
989 * caller know that we didn't add any new fasync entries.
990 */
991 if (fasync_insert_entry(fd, filp, fapp, new)) {
992 fasync_free(new);
993 return 0;
994 }
995
996 return 1;
1da177e4
LT
997}
998
53281b6d
LT
999/*
1000 * fasync_helper() is used by almost all character device drivers
1001 * to set up the fasync queue, and for regular files by the file
1002 * lease code. It returns negative on error, 0 if it did no changes
1003 * and positive if it added/deleted the entry.
1004 */
1005int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
1006{
1007 if (!on)
1008 return fasync_remove_entry(filp, fapp);
1009 return fasync_add_entry(fd, filp, fapp);
1010}
1011
1da177e4
LT
1012EXPORT_SYMBOL(fasync_helper);
1013
989a2979
ED
1014/*
1015 * rcu_read_lock() is held
1016 */
1017static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
1da177e4
LT
1018{
1019 while (fa) {
989a2979 1020 struct fown_struct *fown;
2f488f69 1021 unsigned long flags;
f4985dc7 1022
1da177e4
LT
1023 if (fa->magic != FASYNC_MAGIC) {
1024 printk(KERN_ERR "kill_fasync: bad magic number in "
1025 "fasync_struct!\n");
1026 return;
1027 }
2f488f69 1028 read_lock_irqsave(&fa->fa_lock, flags);
989a2979
ED
1029 if (fa->fa_file) {
1030 fown = &fa->fa_file->f_owner;
1031 /* Don't send SIGURG to processes which have not set a
1032 queued signum: SIGURG has its own default signalling
1033 mechanism. */
1034 if (!(sig == SIGURG && fown->signum == 0))
1035 send_sigio(fown, fa->fa_fd, band);
1036 }
2f488f69 1037 read_unlock_irqrestore(&fa->fa_lock, flags);
989a2979 1038 fa = rcu_dereference(fa->fa_next);
1da177e4
LT
1039 }
1040}
1041
1da177e4
LT
1042void kill_fasync(struct fasync_struct **fp, int sig, int band)
1043{
1044 /* First a quick test without locking: usually
1045 * the list is empty.
1046 */
1047 if (*fp) {
989a2979
ED
1048 rcu_read_lock();
1049 kill_fasync_rcu(rcu_dereference(*fp), sig, band);
1050 rcu_read_unlock();
1da177e4
LT
1051 }
1052}
1053EXPORT_SYMBOL(kill_fasync);
1054
454eedb8 1055static int __init fcntl_init(void)
1da177e4 1056{
3ab04d5c
JB
1057 /*
1058 * Please add new bits here to ensure allocation uniqueness.
1059 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
1060 * is defined as O_NONBLOCK on some platforms and not on others.
1061 */
80f18379
CH
1062 BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
1063 HWEIGHT32(
1064 (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
1065 __FMODE_EXEC | __FMODE_NONOTIFY));
454eedb8 1066
1da177e4 1067 fasync_cache = kmem_cache_create("fasync_cache",
839d6820
VA
1068 sizeof(struct fasync_struct), 0,
1069 SLAB_PANIC | SLAB_ACCOUNT, NULL);
1da177e4
LT
1070 return 0;
1071}
1072
454eedb8 1073module_init(fcntl_init)