Merge tag 'nfs-for-5.7-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[linux-2.6-block.git] / ipc / mqueue.c
CommitLineData
1da177e4
LT
1/*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
f66e928b 5 * Michal Wronski (michal.wronski@gmail.com)
1da177e4
LT
6 *
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
239521f3 9 * Manfred Spraul (manfred@colorfullife.com)
1da177e4 10 *
20ca73bc
GW
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
12 *
1da177e4
LT
13 * This file is released under the GPL.
14 */
15
c59ede7b 16#include <linux/capability.h>
1da177e4
LT
17#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/file.h>
20#include <linux/mount.h>
935c6912 21#include <linux/fs_context.h>
1da177e4
LT
22#include <linux/namei.h>
23#include <linux/sysctl.h>
24#include <linux/poll.h>
25#include <linux/mqueue.h>
26#include <linux/msg.h>
27#include <linux/skbuff.h>
5b5c4d1a 28#include <linux/vmalloc.h>
1da177e4
LT
29#include <linux/netlink.h>
30#include <linux/syscalls.h>
20ca73bc 31#include <linux/audit.h>
7ed20e1a 32#include <linux/signal.h>
5f921ae9 33#include <linux/mutex.h>
b488893a
PE
34#include <linux/nsproxy.h>
35#include <linux/pid.h>
614b84cf 36#include <linux/ipc_namespace.h>
6b550f94 37#include <linux/user_namespace.h>
5a0e3ad6 38#include <linux/slab.h>
84f001e1 39#include <linux/sched/wake_q.h>
3f07c014 40#include <linux/sched/signal.h>
8703e8a4 41#include <linux/sched/user.h>
5f921ae9 42
1da177e4
LT
43#include <net/sock.h>
44#include "util.h"
45
935c6912
DH
46struct mqueue_fs_context {
47 struct ipc_namespace *ipc_ns;
48};
49
1da177e4
LT
50#define MQUEUE_MAGIC 0x19800202
51#define DIRENT_SIZE 20
52#define FILENT_SIZE 80
53
54#define SEND 0
55#define RECV 1
56
57#define STATE_NONE 0
fa6004ad 58#define STATE_READY 1
1da177e4 59
d6629859
DL
60struct posix_msg_tree_node {
61 struct rb_node rb_node;
62 struct list_head msg_list;
63 int priority;
64};
65
c5b2cbdb
MS
66/*
67 * Locking:
68 *
69 * Accesses to a message queue are synchronized by acquiring info->lock.
70 *
71 * There are two notable exceptions:
72 * - The actual wakeup of a sleeping task is performed using the wake_q
73 * framework. info->lock is already released when wake_up_q is called.
74 * - The exit codepaths after sleeping check ext_wait_queue->state without
75 * any locks. If it is STATE_READY, then the syscall is completed without
76 * acquiring info->lock.
77 *
78 * MQ_BARRIER:
79 * To achieve proper release/acquire memory barrier pairing, the state is set to
80 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
81 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
82 *
83 * This prevents the following races:
84 *
85 * 1) With the simple wake_q_add(), the task could be gone already before
86 * the increase of the reference happens
87 * Thread A
88 * Thread B
89 * WRITE_ONCE(wait.state, STATE_NONE);
90 * schedule_hrtimeout()
91 * wake_q_add(A)
92 * if (cmpxchg()) // success
93 * ->state = STATE_READY (reordered)
94 * <timeout returns>
95 * if (wait.state == STATE_READY) return;
96 * sysret to user space
97 * sys_exit()
98 * get_task_struct() // UaF
99 *
100 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
101 * the smp_store_release() that does ->state = STATE_READY.
102 *
103 * 2) Without proper _release/_acquire barriers, the woken up task
104 * could read stale data
105 *
106 * Thread A
107 * Thread B
108 * do_mq_timedreceive
109 * WRITE_ONCE(wait.state, STATE_NONE);
110 * schedule_hrtimeout()
111 * state = STATE_READY;
112 * <timeout returns>
113 * if (wait.state == STATE_READY) return;
114 * msg_ptr = wait.msg; // Access to stale data!
115 * receiver->msg = message; (reordered)
116 *
117 * Solution: use _release and _acquire barriers.
118 *
119 * 3) There is intentionally no barrier when setting current->state
120 * to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
121 * release memory barrier, and the wakeup is triggered when holding
122 * info->lock, i.e. spin_lock(&info->lock) provided a pairing
123 * acquire memory barrier.
124 */
125
1da177e4
LT
126struct ext_wait_queue { /* queue of sleeping tasks */
127 struct task_struct *task;
128 struct list_head list;
129 struct msg_msg *msg; /* ptr of loaded message */
130 int state; /* one of STATE_* values */
131};
132
133struct mqueue_inode_info {
134 spinlock_t lock;
135 struct inode vfs_inode;
136 wait_queue_head_t wait_q;
137
d6629859 138 struct rb_root msg_tree;
a5091fda 139 struct rb_node *msg_tree_rightmost;
ce2d52cc 140 struct posix_msg_tree_node *node_cache;
1da177e4
LT
141 struct mq_attr attr;
142
143 struct sigevent notify;
239521f3 144 struct pid *notify_owner;
6f9ac6d9 145 struct user_namespace *notify_user_ns;
338cec32 146 struct user_struct *user; /* user who created, for accounting */
1da177e4
LT
147 struct sock *notify_sock;
148 struct sk_buff *notify_cookie;
149
150 /* for tasks waiting for free space and messages, respectively */
151 struct ext_wait_queue e_wait_q[2];
152
153 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
154};
155
935c6912 156static struct file_system_type mqueue_fs_type;
92e1d5be 157static const struct inode_operations mqueue_dir_inode_operations;
9a32144e 158static const struct file_operations mqueue_file_operations;
b87221de 159static const struct super_operations mqueue_super_ops;
935c6912 160static const struct fs_context_operations mqueue_fs_context_ops;
1da177e4
LT
161static void remove_notification(struct mqueue_inode_info *info);
162
e18b890b 163static struct kmem_cache *mqueue_inode_cachep;
1da177e4 164
239521f3 165static struct ctl_table_header *mq_sysctl_table;
1da177e4
LT
166
167static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
168{
169 return container_of(inode, struct mqueue_inode_info, vfs_inode);
170}
171
7eafd7c7
SH
172/*
173 * This routine should be called with the mq_lock held.
174 */
175static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
614b84cf 176{
7eafd7c7 177 return get_ipc_ns(inode->i_sb->s_fs_info);
614b84cf
SH
178}
179
7eafd7c7 180static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
614b84cf 181{
7eafd7c7
SH
182 struct ipc_namespace *ns;
183
184 spin_lock(&mq_lock);
185 ns = __get_ns_from_inode(inode);
186 spin_unlock(&mq_lock);
187 return ns;
614b84cf
SH
188}
189
d6629859
DL
190/* Auxiliary functions to manipulate messages' list */
191static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
192{
193 struct rb_node **p, *parent = NULL;
194 struct posix_msg_tree_node *leaf;
a5091fda 195 bool rightmost = true;
d6629859
DL
196
197 p = &info->msg_tree.rb_node;
198 while (*p) {
199 parent = *p;
200 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
201
202 if (likely(leaf->priority == msg->m_type))
203 goto insert_msg;
a5091fda 204 else if (msg->m_type < leaf->priority) {
d6629859 205 p = &(*p)->rb_left;
a5091fda
DB
206 rightmost = false;
207 } else
d6629859
DL
208 p = &(*p)->rb_right;
209 }
ce2d52cc
DL
210 if (info->node_cache) {
211 leaf = info->node_cache;
212 info->node_cache = NULL;
213 } else {
214 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
215 if (!leaf)
216 return -ENOMEM;
ce2d52cc 217 INIT_LIST_HEAD(&leaf->msg_list);
ce2d52cc 218 }
d6629859 219 leaf->priority = msg->m_type;
a5091fda
DB
220
221 if (rightmost)
222 info->msg_tree_rightmost = &leaf->rb_node;
223
d6629859
DL
224 rb_link_node(&leaf->rb_node, parent, p);
225 rb_insert_color(&leaf->rb_node, &info->msg_tree);
d6629859
DL
226insert_msg:
227 info->attr.mq_curmsgs++;
228 info->qsize += msg->m_ts;
229 list_add_tail(&msg->m_list, &leaf->msg_list);
230 return 0;
231}
232
a5091fda
DB
233static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
234 struct mqueue_inode_info *info)
235{
236 struct rb_node *node = &leaf->rb_node;
237
238 if (info->msg_tree_rightmost == node)
239 info->msg_tree_rightmost = rb_prev(node);
240
241 rb_erase(node, &info->msg_tree);
43afe4d3 242 if (info->node_cache)
a5091fda 243 kfree(leaf);
43afe4d3 244 else
a5091fda 245 info->node_cache = leaf;
a5091fda
DB
246}
247
d6629859
DL
248static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
249{
a5091fda 250 struct rb_node *parent = NULL;
d6629859
DL
251 struct posix_msg_tree_node *leaf;
252 struct msg_msg *msg;
253
254try_again:
a5091fda
DB
255 /*
256 * During insert, low priorities go to the left and high to the
257 * right. On receive, we want the highest priorities first, so
258 * walk all the way to the right.
259 */
260 parent = info->msg_tree_rightmost;
d6629859
DL
261 if (!parent) {
262 if (info->attr.mq_curmsgs) {
263 pr_warn_once("Inconsistency in POSIX message queue, "
264 "no tree element, but supposedly messages "
265 "should exist!\n");
266 info->attr.mq_curmsgs = 0;
267 }
268 return NULL;
269 }
270 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
ce2d52cc 271 if (unlikely(list_empty(&leaf->msg_list))) {
d6629859
DL
272 pr_warn_once("Inconsistency in POSIX message queue, "
273 "empty leaf node but we haven't implemented "
274 "lazy leaf delete!\n");
a5091fda 275 msg_tree_erase(leaf, info);
d6629859
DL
276 goto try_again;
277 } else {
278 msg = list_first_entry(&leaf->msg_list,
279 struct msg_msg, m_list);
280 list_del(&msg->m_list);
281 if (list_empty(&leaf->msg_list)) {
a5091fda 282 msg_tree_erase(leaf, info);
d6629859
DL
283 }
284 }
285 info->attr.mq_curmsgs--;
286 info->qsize -= msg->m_ts;
287 return msg;
288}
289
7eafd7c7 290static struct inode *mqueue_get_inode(struct super_block *sb,
1b9d5ff7 291 struct ipc_namespace *ipc_ns, umode_t mode,
7eafd7c7 292 struct mq_attr *attr)
1da177e4 293{
86a264ab 294 struct user_struct *u = current_user();
1da177e4 295 struct inode *inode;
d40dcdb0 296 int ret = -ENOMEM;
1da177e4
LT
297
298 inode = new_inode(sb);
04715206
JS
299 if (!inode)
300 goto err;
301
302 inode->i_ino = get_next_ino();
303 inode->i_mode = mode;
304 inode->i_uid = current_fsuid();
305 inode->i_gid = current_fsgid();
078cd827 306 inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
04715206
JS
307
308 if (S_ISREG(mode)) {
309 struct mqueue_inode_info *info;
d6629859 310 unsigned long mq_bytes, mq_treesize;
04715206
JS
311
312 inode->i_fop = &mqueue_file_operations;
313 inode->i_size = FILENT_SIZE;
314 /* mqueue specific info */
315 info = MQUEUE_I(inode);
316 spin_lock_init(&info->lock);
317 init_waitqueue_head(&info->wait_q);
318 INIT_LIST_HEAD(&info->e_wait_q[0].list);
319 INIT_LIST_HEAD(&info->e_wait_q[1].list);
320 info->notify_owner = NULL;
6f9ac6d9 321 info->notify_user_ns = NULL;
04715206
JS
322 info->qsize = 0;
323 info->user = NULL; /* set when all is ok */
d6629859 324 info->msg_tree = RB_ROOT;
a5091fda 325 info->msg_tree_rightmost = NULL;
ce2d52cc 326 info->node_cache = NULL;
04715206 327 memset(&info->attr, 0, sizeof(info->attr));
cef0184c
KM
328 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
329 ipc_ns->mq_msg_default);
330 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
331 ipc_ns->mq_msgsize_default);
04715206
JS
332 if (attr) {
333 info->attr.mq_maxmsg = attr->mq_maxmsg;
334 info->attr.mq_msgsize = attr->mq_msgsize;
335 }
d6629859
DL
336 /*
337 * We used to allocate a static array of pointers and account
338 * the size of that array as well as one msg_msg struct per
339 * possible message into the queue size. That's no longer
340 * accurate as the queue is now an rbtree and will grow and
341 * shrink depending on usage patterns. We can, however, still
342 * account one msg_msg struct per message, but the nodes are
343 * allocated depending on priority usage, and most programs
344 * only use one, or a handful, of priorities. However, since
345 * this is pinned memory, we need to assume worst case, so
346 * that means the min(mq_maxmsg, max_priorities) * struct
347 * posix_msg_tree_node.
348 */
05c1b290
AV
349
350 ret = -EINVAL;
351 if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
352 goto out_inode;
353 if (capable(CAP_SYS_RESOURCE)) {
354 if (info->attr.mq_maxmsg > HARD_MSGMAX ||
355 info->attr.mq_msgsize > HARD_MSGSIZEMAX)
356 goto out_inode;
357 } else {
358 if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
359 info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
360 goto out_inode;
361 }
362 ret = -EOVERFLOW;
363 /* check for overflow */
364 if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
365 goto out_inode;
d6629859
DL
366 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
367 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
368 sizeof(struct posix_msg_tree_node);
05c1b290
AV
369 mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
370 if (mq_bytes + mq_treesize < mq_bytes)
371 goto out_inode;
372 mq_bytes += mq_treesize;
04715206
JS
373 spin_lock(&mq_lock);
374 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
2a4e64b8 375 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
04715206
JS
376 spin_unlock(&mq_lock);
377 /* mqueue_evict_inode() releases info->messages */
d40dcdb0 378 ret = -EMFILE;
04715206 379 goto out_inode;
1da177e4 380 }
04715206
JS
381 u->mq_bytes += mq_bytes;
382 spin_unlock(&mq_lock);
383
384 /* all is ok */
385 info->user = get_uid(u);
386 } else if (S_ISDIR(mode)) {
387 inc_nlink(inode);
388 /* Some things misbehave if size == 0 on a directory */
389 inode->i_size = 2 * DIRENT_SIZE;
390 inode->i_op = &mqueue_dir_inode_operations;
391 inode->i_fop = &simple_dir_operations;
1da177e4 392 }
04715206 393
1da177e4
LT
394 return inode;
395out_inode:
1da177e4 396 iput(inode);
04715206 397err:
d40dcdb0 398 return ERR_PTR(ret);
1da177e4
LT
399}
400
935c6912 401static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
1da177e4
LT
402{
403 struct inode *inode;
cfb2f6f6 404 struct ipc_namespace *ns = sb->s_fs_info;
1da177e4 405
a2982cc9 406 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
09cbfeaf
KS
407 sb->s_blocksize = PAGE_SIZE;
408 sb->s_blocksize_bits = PAGE_SHIFT;
1da177e4
LT
409 sb->s_magic = MQUEUE_MAGIC;
410 sb->s_op = &mqueue_super_ops;
411
48fde701
AV
412 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
413 if (IS_ERR(inode))
414 return PTR_ERR(inode);
1da177e4 415
48fde701
AV
416 sb->s_root = d_make_root(inode);
417 if (!sb->s_root)
418 return -ENOMEM;
419 return 0;
1da177e4
LT
420}
421
935c6912 422static int mqueue_get_tree(struct fs_context *fc)
1da177e4 423{
935c6912
DH
424 struct mqueue_fs_context *ctx = fc->fs_private;
425
533770cc 426 return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
935c6912
DH
427}
428
429static void mqueue_fs_context_free(struct fs_context *fc)
430{
431 struct mqueue_fs_context *ctx = fc->fs_private;
432
709a643d 433 put_ipc_ns(ctx->ipc_ns);
935c6912
DH
434 kfree(ctx);
435}
436
437static int mqueue_init_fs_context(struct fs_context *fc)
438{
439 struct mqueue_fs_context *ctx;
440
441 ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
442 if (!ctx)
443 return -ENOMEM;
444
445 ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
709a643d
AV
446 put_user_ns(fc->user_ns);
447 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
935c6912
DH
448 fc->fs_private = ctx;
449 fc->ops = &mqueue_fs_context_ops;
450 return 0;
451}
452
453static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
454{
455 struct mqueue_fs_context *ctx;
456 struct fs_context *fc;
457 struct vfsmount *mnt;
458
459 fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
460 if (IS_ERR(fc))
461 return ERR_CAST(fc);
462
463 ctx = fc->fs_private;
464 put_ipc_ns(ctx->ipc_ns);
465 ctx->ipc_ns = get_ipc_ns(ns);
709a643d
AV
466 put_user_ns(fc->user_ns);
467 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
935c6912
DH
468
469 mnt = fc_mount(fc);
470 put_fs_context(fc);
471 return mnt;
1da177e4
LT
472}
473
51cc5068 474static void init_once(void *foo)
1da177e4
LT
475{
476 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
477
a35afb83 478 inode_init_once(&p->vfs_inode);
1da177e4
LT
479}
480
481static struct inode *mqueue_alloc_inode(struct super_block *sb)
482{
483 struct mqueue_inode_info *ei;
484
e94b1766 485 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
1da177e4
LT
486 if (!ei)
487 return NULL;
488 return &ei->vfs_inode;
489}
490
015d7956 491static void mqueue_free_inode(struct inode *inode)
1da177e4
LT
492{
493 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
494}
495
6d8af64c 496static void mqueue_evict_inode(struct inode *inode)
1da177e4
LT
497{
498 struct mqueue_inode_info *info;
499 struct user_struct *user;
7eafd7c7 500 struct ipc_namespace *ipc_ns;
d6a2946a
LR
501 struct msg_msg *msg, *nmsg;
502 LIST_HEAD(tmp_msg);
1da177e4 503
dbd5768f 504 clear_inode(inode);
6d8af64c
AV
505
506 if (S_ISDIR(inode->i_mode))
1da177e4 507 return;
6d8af64c 508
7eafd7c7 509 ipc_ns = get_ns_from_inode(inode);
1da177e4
LT
510 info = MQUEUE_I(inode);
511 spin_lock(&info->lock);
d6629859 512 while ((msg = msg_get(info)) != NULL)
d6a2946a 513 list_add_tail(&msg->m_list, &tmp_msg);
ce2d52cc 514 kfree(info->node_cache);
1da177e4
LT
515 spin_unlock(&info->lock);
516
d6a2946a
LR
517 list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
518 list_del(&msg->m_list);
519 free_msg(msg);
520 }
521
1da177e4
LT
522 user = info->user;
523 if (user) {
a318f12e
KC
524 unsigned long mq_bytes, mq_treesize;
525
526 /* Total amount of bytes accounted for the mqueue */
527 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
528 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
529 sizeof(struct posix_msg_tree_node);
530
531 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
532 info->attr.mq_msgsize);
533
1da177e4
LT
534 spin_lock(&mq_lock);
535 user->mq_bytes -= mq_bytes;
7eafd7c7
SH
536 /*
537 * get_ns_from_inode() ensures that the
538 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
539 * to which we now hold a reference, or it is NULL.
540 * We can't put it here under mq_lock, though.
541 */
542 if (ipc_ns)
543 ipc_ns->mq_queues_count--;
1da177e4
LT
544 spin_unlock(&mq_lock);
545 free_uid(user);
546 }
7eafd7c7
SH
547 if (ipc_ns)
548 put_ipc_ns(ipc_ns);
1da177e4
LT
549}
550
eecec19d 551static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
1da177e4 552{
eecec19d 553 struct inode *dir = dentry->d_parent->d_inode;
1da177e4 554 struct inode *inode;
eecec19d 555 struct mq_attr *attr = arg;
1da177e4 556 int error;
7eafd7c7 557 struct ipc_namespace *ipc_ns;
1da177e4
LT
558
559 spin_lock(&mq_lock);
7eafd7c7
SH
560 ipc_ns = __get_ns_from_inode(dir);
561 if (!ipc_ns) {
562 error = -EACCES;
563 goto out_unlock;
564 }
f3713fd9
DB
565
566 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
567 !capable(CAP_SYS_RESOURCE)) {
1da177e4 568 error = -ENOSPC;
614b84cf 569 goto out_unlock;
1da177e4 570 }
614b84cf 571 ipc_ns->mq_queues_count++;
1da177e4
LT
572 spin_unlock(&mq_lock);
573
7eafd7c7 574 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
d40dcdb0
JS
575 if (IS_ERR(inode)) {
576 error = PTR_ERR(inode);
1da177e4 577 spin_lock(&mq_lock);
614b84cf
SH
578 ipc_ns->mq_queues_count--;
579 goto out_unlock;
1da177e4
LT
580 }
581
7eafd7c7 582 put_ipc_ns(ipc_ns);
1da177e4 583 dir->i_size += DIRENT_SIZE;
078cd827 584 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
1da177e4
LT
585
586 d_instantiate(dentry, inode);
587 dget(dentry);
588 return 0;
614b84cf 589out_unlock:
1da177e4 590 spin_unlock(&mq_lock);
7eafd7c7
SH
591 if (ipc_ns)
592 put_ipc_ns(ipc_ns);
1da177e4
LT
593 return error;
594}
595
eecec19d
AV
596static int mqueue_create(struct inode *dir, struct dentry *dentry,
597 umode_t mode, bool excl)
598{
599 return mqueue_create_attr(dentry, mode, NULL);
600}
601
1da177e4
LT
602static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
603{
75c3cfa8 604 struct inode *inode = d_inode(dentry);
1da177e4 605
078cd827 606 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
1da177e4 607 dir->i_size -= DIRENT_SIZE;
239521f3
MS
608 drop_nlink(inode);
609 dput(dentry);
610 return 0;
1da177e4
LT
611}
612
613/*
614* This is routine for system read from queue file.
615* To avoid mess with doing here some sort of mq_receive we allow
616* to read only queue size & notification info (the only values
617* that are interesting from user point of view and aren't accessible
618* through std routines)
619*/
620static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
f1a43f93 621 size_t count, loff_t *off)
1da177e4 622{
496ad9aa 623 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
1da177e4 624 char buffer[FILENT_SIZE];
f1a43f93 625 ssize_t ret;
1da177e4
LT
626
627 spin_lock(&info->lock);
628 snprintf(buffer, sizeof(buffer),
629 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
630 info->qsize,
631 info->notify_owner ? info->notify.sigev_notify : 0,
632 (info->notify_owner &&
633 info->notify.sigev_notify == SIGEV_SIGNAL) ?
634 info->notify.sigev_signo : 0,
6c5f3e7b 635 pid_vnr(info->notify_owner));
1da177e4
LT
636 spin_unlock(&info->lock);
637 buffer[sizeof(buffer)-1] = '\0';
1da177e4 638
f1a43f93
AM
639 ret = simple_read_from_buffer(u_data, count, off, buffer,
640 strlen(buffer));
641 if (ret <= 0)
642 return ret;
1da177e4 643
078cd827 644 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
f1a43f93 645 return ret;
1da177e4
LT
646}
647
75e1fcc0 648static int mqueue_flush_file(struct file *filp, fl_owner_t id)
1da177e4 649{
496ad9aa 650 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
1da177e4
LT
651
652 spin_lock(&info->lock);
a03fcb73 653 if (task_tgid(current) == info->notify_owner)
1da177e4
LT
654 remove_notification(info);
655
656 spin_unlock(&info->lock);
657 return 0;
658}
659
9dd95748 660static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
1da177e4 661{
496ad9aa 662 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
9dd95748 663 __poll_t retval = 0;
1da177e4
LT
664
665 poll_wait(filp, &info->wait_q, poll_tab);
666
667 spin_lock(&info->lock);
668 if (info->attr.mq_curmsgs)
a9a08845 669 retval = EPOLLIN | EPOLLRDNORM;
1da177e4
LT
670
671 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
a9a08845 672 retval |= EPOLLOUT | EPOLLWRNORM;
1da177e4
LT
673 spin_unlock(&info->lock);
674
675 return retval;
676}
677
678/* Adds current to info->e_wait_q[sr] before element with smaller prio */
679static void wq_add(struct mqueue_inode_info *info, int sr,
680 struct ext_wait_queue *ewp)
681{
682 struct ext_wait_queue *walk;
683
1da177e4 684 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
68e34f4e 685 if (walk->task->prio <= current->prio) {
1da177e4
LT
686 list_add_tail(&ewp->list, &walk->list);
687 return;
688 }
689 }
690 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
691}
692
693/*
694 * Puts current task to sleep. Caller must hold queue lock. After return
695 * lock isn't held.
696 * sr: SEND or RECV
697 */
698static int wq_sleep(struct mqueue_inode_info *info, int sr,
9ca7d8e6 699 ktime_t *timeout, struct ext_wait_queue *ewp)
eac0b1c3 700 __releases(&info->lock)
1da177e4
LT
701{
702 int retval;
703 signed long time;
704
705 wq_add(info, sr, ewp);
706
707 for (;;) {
c5b2cbdb 708 /* memory barrier not required, we hold info->lock */
fa6004ad 709 __set_current_state(TASK_INTERRUPTIBLE);
1da177e4
LT
710
711 spin_unlock(&info->lock);
32ea845d
WG
712 time = schedule_hrtimeout_range_clock(timeout, 0,
713 HRTIMER_MODE_ABS, CLOCK_REALTIME);
1da177e4 714
c5b2cbdb
MS
715 if (READ_ONCE(ewp->state) == STATE_READY) {
716 /* see MQ_BARRIER for purpose/pairing */
717 smp_acquire__after_ctrl_dep();
1da177e4
LT
718 retval = 0;
719 goto out;
720 }
721 spin_lock(&info->lock);
c5b2cbdb
MS
722
723 /* we hold info->lock, so no memory barrier required */
724 if (READ_ONCE(ewp->state) == STATE_READY) {
1da177e4
LT
725 retval = 0;
726 goto out_unlock;
727 }
728 if (signal_pending(current)) {
729 retval = -ERESTARTSYS;
730 break;
731 }
732 if (time == 0) {
733 retval = -ETIMEDOUT;
734 break;
735 }
736 }
737 list_del(&ewp->list);
738out_unlock:
739 spin_unlock(&info->lock);
740out:
741 return retval;
742}
743
744/*
745 * Returns waiting task that should be serviced first or NULL if none exists
746 */
747static struct ext_wait_queue *wq_get_first_waiter(
748 struct mqueue_inode_info *info, int sr)
749{
750 struct list_head *ptr;
751
752 ptr = info->e_wait_q[sr].list.prev;
753 if (ptr == &info->e_wait_q[sr].list)
754 return NULL;
755 return list_entry(ptr, struct ext_wait_queue, list);
756}
757
1da177e4
LT
758
759static inline void set_cookie(struct sk_buff *skb, char code)
760{
239521f3 761 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
1da177e4
LT
762}
763
764/*
765 * The next function is only to split too long sys_mq_timedsend
766 */
767static void __do_notify(struct mqueue_inode_info *info)
768{
769 /* notification
770 * invoked when there is registered process and there isn't process
771 * waiting synchronously for message AND state of queue changed from
772 * empty to not empty. Here we are sure that no one is waiting
773 * synchronously. */
774 if (info->notify_owner &&
775 info->attr.mq_curmsgs == 1) {
ae7795bc 776 struct kernel_siginfo sig_i;
1da177e4
LT
777 switch (info->notify.sigev_notify) {
778 case SIGEV_NONE:
779 break;
780 case SIGEV_SIGNAL:
781 /* sends signal */
782
faf1f22b 783 clear_siginfo(&sig_i);
1da177e4
LT
784 sig_i.si_signo = info->notify.sigev_signo;
785 sig_i.si_errno = 0;
786 sig_i.si_code = SI_MESGQ;
787 sig_i.si_value = info->notify.sigev_value;
6b550f94
SH
788 /* map current pid/uid into info->owner's namespaces */
789 rcu_read_lock();
a6684999
SB
790 sig_i.si_pid = task_tgid_nr_ns(current,
791 ns_of_pid(info->notify_owner));
76b6db01 792 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
6b550f94 793 rcu_read_unlock();
1da177e4 794
a03fcb73
CLG
795 kill_pid_info(info->notify.sigev_signo,
796 &sig_i, info->notify_owner);
1da177e4
LT
797 break;
798 case SIGEV_THREAD:
799 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
7ee015e0 800 netlink_sendskb(info->notify_sock, info->notify_cookie);
1da177e4
LT
801 break;
802 }
803 /* after notification unregisters process */
a03fcb73 804 put_pid(info->notify_owner);
6f9ac6d9 805 put_user_ns(info->notify_user_ns);
a03fcb73 806 info->notify_owner = NULL;
6f9ac6d9 807 info->notify_user_ns = NULL;
1da177e4
LT
808 }
809 wake_up(&info->wait_q);
810}
811
21fc538d 812static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout,
b9047726 813 struct timespec64 *ts)
1da177e4 814{
b9047726 815 if (get_timespec64(ts, u_abs_timeout))
9ca7d8e6 816 return -EFAULT;
b9047726 817 if (!timespec64_valid(ts))
9ca7d8e6 818 return -EINVAL;
9ca7d8e6 819 return 0;
1da177e4
LT
820}
821
822static void remove_notification(struct mqueue_inode_info *info)
823{
a03fcb73 824 if (info->notify_owner != NULL &&
1da177e4
LT
825 info->notify.sigev_notify == SIGEV_THREAD) {
826 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
7ee015e0 827 netlink_sendskb(info->notify_sock, info->notify_cookie);
1da177e4 828 }
a03fcb73 829 put_pid(info->notify_owner);
6f9ac6d9 830 put_user_ns(info->notify_user_ns);
a03fcb73 831 info->notify_owner = NULL;
6f9ac6d9 832 info->notify_user_ns = NULL;
1da177e4
LT
833}
834
066cc813
AV
835static int prepare_open(struct dentry *dentry, int oflag, int ro,
836 umode_t mode, struct filename *name,
614b84cf 837 struct mq_attr *attr)
1da177e4 838{
745ca247
DH
839 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
840 MAY_READ | MAY_WRITE };
765927b2 841 int acc;
066cc813 842
9b20d7fc
AV
843 if (d_really_is_negative(dentry)) {
844 if (!(oflag & O_CREAT))
066cc813 845 return -ENOENT;
9b20d7fc
AV
846 if (ro)
847 return ro;
848 audit_inode_parent_hidden(name, dentry->d_parent);
849 return vfs_mkobj(dentry, mode & ~current_umask(),
850 mqueue_create_attr, attr);
066cc813 851 }
9b20d7fc
AV
852 /* it already existed */
853 audit_inode(name, dentry, 0);
854 if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
855 return -EEXIST;
765927b2 856 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
af4a5372 857 return -EINVAL;
765927b2 858 acc = oflag2acc[oflag & O_ACCMODE];
066cc813 859 return inode_permission(d_inode(dentry), acc);
1da177e4
LT
860}
861
0d060606
AV
862static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
863 struct mq_attr *attr)
1da177e4 864{
cfb2f6f6
EB
865 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
866 struct dentry *root = mnt->mnt_root;
91a27b2a 867 struct filename *name;
a713fd7f 868 struct path path;
1da177e4 869 int fd, error;
312b90fb 870 int ro;
1da177e4 871
0d060606 872 audit_mq_open(oflag, mode, attr);
20ca73bc 873
1da177e4
LT
874 if (IS_ERR(name = getname(u_name)))
875 return PTR_ERR(name);
876
269f2134 877 fd = get_unused_fd_flags(O_CLOEXEC);
1da177e4
LT
878 if (fd < 0)
879 goto out_putname;
880
312b90fb 881 ro = mnt_want_write(mnt); /* we'll drop it in any case */
5955102c 882 inode_lock(d_inode(root));
91a27b2a 883 path.dentry = lookup_one_len(name->name, root, strlen(name->name));
765927b2
AV
884 if (IS_ERR(path.dentry)) {
885 error = PTR_ERR(path.dentry);
4294a8ee 886 goto out_putfd;
1da177e4 887 }
312b90fb 888 path.mnt = mntget(mnt);
066cc813
AV
889 error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
890 if (!error) {
891 struct file *file = dentry_open(&path, oflag, current_cred());
892 if (!IS_ERR(file))
893 fd_install(fd, file);
894 else
895 error = PTR_ERR(file);
7c7dce92 896 }
765927b2 897 path_put(&path);
7c7dce92 898out_putfd:
765927b2
AV
899 if (error) {
900 put_unused_fd(fd);
901 fd = error;
902 }
5955102c 903 inode_unlock(d_inode(root));
38d78e58
VD
904 if (!ro)
905 mnt_drop_write(mnt);
1da177e4
LT
906out_putname:
907 putname(name);
908 return fd;
909}
910
0d060606
AV
911SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
912 struct mq_attr __user *, u_attr)
913{
914 struct mq_attr attr;
915 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
916 return -EFAULT;
917
918 return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
919}
920
d5460c99 921SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
1da177e4
LT
922{
923 int err;
91a27b2a 924 struct filename *name;
1da177e4
LT
925 struct dentry *dentry;
926 struct inode *inode = NULL;
7eafd7c7 927 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
312b90fb 928 struct vfsmount *mnt = ipc_ns->mq_mnt;
1da177e4
LT
929
930 name = getname(u_name);
931 if (IS_ERR(name))
932 return PTR_ERR(name);
933
79f6530c 934 audit_inode_parent_hidden(name, mnt->mnt_root);
312b90fb
AV
935 err = mnt_want_write(mnt);
936 if (err)
937 goto out_name;
5955102c 938 inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
91a27b2a
JL
939 dentry = lookup_one_len(name->name, mnt->mnt_root,
940 strlen(name->name));
1da177e4
LT
941 if (IS_ERR(dentry)) {
942 err = PTR_ERR(dentry);
943 goto out_unlock;
944 }
945
75c3cfa8 946 inode = d_inode(dentry);
312b90fb
AV
947 if (!inode) {
948 err = -ENOENT;
949 } else {
7de9c6ee 950 ihold(inode);
75c3cfa8 951 err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL);
312b90fb 952 }
1da177e4
LT
953 dput(dentry);
954
955out_unlock:
5955102c 956 inode_unlock(d_inode(mnt->mnt_root));
1da177e4
LT
957 if (inode)
958 iput(inode);
312b90fb
AV
959 mnt_drop_write(mnt);
960out_name:
961 putname(name);
1da177e4
LT
962
963 return err;
964}
965
966/* Pipelined send and receive functions.
967 *
968 * If a receiver finds no waiting message, then it registers itself in the
969 * list of waiting receivers. A sender checks that list before adding the new
970 * message into the message array. If there is a waiting receiver, then it
971 * bypasses the message array and directly hands the message over to the
fa6004ad
DB
972 * receiver. The receiver accepts the message and returns without grabbing the
973 * queue spinlock:
974 *
975 * - Set pointer to message.
976 * - Queue the receiver task for later wakeup (without the info->lock).
977 * - Update its state to STATE_READY. Now the receiver can continue.
978 * - Wake up the process after the lock is dropped. Should the process wake up
979 * before this wakeup (due to a timeout or a signal) it will either see
980 * STATE_READY and continue or acquire the lock to check the state again.
1da177e4
LT
981 *
982 * The same algorithm is used for senders.
983 */
984
ed29f171 985static inline void __pipelined_op(struct wake_q_head *wake_q,
fa6004ad 986 struct mqueue_inode_info *info,
ed29f171 987 struct ext_wait_queue *this)
1da177e4 988{
ed29f171 989 list_del(&this->list);
c5b2cbdb
MS
990 get_task_struct(this->task);
991
992 /* see MQ_BARRIER for purpose/pairing */
993 smp_store_release(&this->state, STATE_READY);
994 wake_q_add_safe(wake_q, this->task);
ed29f171
DB
995}
996
997/* pipelined_send() - send a message directly to the task waiting in
998 * sys_mq_timedreceive() (without inserting message into a queue).
999 */
1000static inline void pipelined_send(struct wake_q_head *wake_q,
1001 struct mqueue_inode_info *info,
1002 struct msg_msg *message,
1003 struct ext_wait_queue *receiver)
1004{
1005 receiver->msg = message;
1006 __pipelined_op(wake_q, info, receiver);
1da177e4
LT
1007}
1008
1009/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
1010 * gets its message and put to the queue (we have one free place for sure). */
fa6004ad
DB
1011static inline void pipelined_receive(struct wake_q_head *wake_q,
1012 struct mqueue_inode_info *info)
1da177e4
LT
1013{
1014 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
1015
1016 if (!sender) {
1017 /* for poll */
1018 wake_up_interruptible(&info->wait_q);
1019 return;
1020 }
d6629859
DL
1021 if (msg_insert(sender->msg, info))
1022 return;
fa6004ad 1023
ed29f171 1024 __pipelined_op(wake_q, info, sender);
1da177e4
LT
1025}
1026
0d060606
AV
1027static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
1028 size_t msg_len, unsigned int msg_prio,
b9047726 1029 struct timespec64 *ts)
1da177e4 1030{
2903ff01 1031 struct fd f;
1da177e4
LT
1032 struct inode *inode;
1033 struct ext_wait_queue wait;
1034 struct ext_wait_queue *receiver;
1035 struct msg_msg *msg_ptr;
1036 struct mqueue_inode_info *info;
9ca7d8e6 1037 ktime_t expires, *timeout = NULL;
ce2d52cc 1038 struct posix_msg_tree_node *new_leaf = NULL;
2903ff01 1039 int ret = 0;
194a6b5b 1040 DEFINE_WAKE_Q(wake_q);
1da177e4
LT
1041
1042 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
1043 return -EINVAL;
1044
0d060606 1045 if (ts) {
b9047726 1046 expires = timespec64_to_ktime(*ts);
0d060606
AV
1047 timeout = &expires;
1048 }
1049
1050 audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
1da177e4 1051
2903ff01
AV
1052 f = fdget(mqdes);
1053 if (unlikely(!f.file)) {
8d8ffefa 1054 ret = -EBADF;
1da177e4 1055 goto out;
8d8ffefa 1056 }
1da177e4 1057
496ad9aa 1058 inode = file_inode(f.file);
2903ff01 1059 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
8d8ffefa 1060 ret = -EBADF;
1da177e4 1061 goto out_fput;
8d8ffefa 1062 }
1da177e4 1063 info = MQUEUE_I(inode);
9f45f5bf 1064 audit_file(f.file);
1da177e4 1065
2903ff01 1066 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
8d8ffefa 1067 ret = -EBADF;
1da177e4 1068 goto out_fput;
8d8ffefa 1069 }
1da177e4
LT
1070
1071 if (unlikely(msg_len > info->attr.mq_msgsize)) {
1072 ret = -EMSGSIZE;
1073 goto out_fput;
1074 }
1075
1076 /* First try to allocate memory, before doing anything with
1077 * existing queues. */
1078 msg_ptr = load_msg(u_msg_ptr, msg_len);
1079 if (IS_ERR(msg_ptr)) {
1080 ret = PTR_ERR(msg_ptr);
1081 goto out_fput;
1082 }
1083 msg_ptr->m_ts = msg_len;
1084 msg_ptr->m_type = msg_prio;
1085
ce2d52cc
DL
1086 /*
1087 * msg_insert really wants us to have a valid, spare node struct so
1088 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1089 * fall back to that if necessary.
1090 */
1091 if (!info->node_cache)
1092 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1093
1da177e4
LT
1094 spin_lock(&info->lock);
1095
ce2d52cc
DL
1096 if (!info->node_cache && new_leaf) {
1097 /* Save our speculative allocation into the cache */
ce2d52cc
DL
1098 INIT_LIST_HEAD(&new_leaf->msg_list);
1099 info->node_cache = new_leaf;
ce2d52cc
DL
1100 new_leaf = NULL;
1101 } else {
1102 kfree(new_leaf);
1103 }
1104
1da177e4 1105 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
2903ff01 1106 if (f.file->f_flags & O_NONBLOCK) {
1da177e4 1107 ret = -EAGAIN;
1da177e4
LT
1108 } else {
1109 wait.task = current;
1110 wait.msg = (void *) msg_ptr;
c5b2cbdb
MS
1111
1112 /* memory barrier not required, we hold info->lock */
1113 WRITE_ONCE(wait.state, STATE_NONE);
1da177e4 1114 ret = wq_sleep(info, SEND, timeout, &wait);
ce2d52cc
DL
1115 /*
1116 * wq_sleep must be called with info->lock held, and
1117 * returns with the lock released
1118 */
1119 goto out_free;
1da177e4 1120 }
1da177e4
LT
1121 } else {
1122 receiver = wq_get_first_waiter(info, RECV);
1123 if (receiver) {
fa6004ad 1124 pipelined_send(&wake_q, info, msg_ptr, receiver);
1da177e4
LT
1125 } else {
1126 /* adds message to the queue */
ce2d52cc
DL
1127 ret = msg_insert(msg_ptr, info);
1128 if (ret)
1129 goto out_unlock;
1da177e4
LT
1130 __do_notify(info);
1131 }
1132 inode->i_atime = inode->i_mtime = inode->i_ctime =
078cd827 1133 current_time(inode);
1da177e4 1134 }
ce2d52cc
DL
1135out_unlock:
1136 spin_unlock(&info->lock);
fa6004ad 1137 wake_up_q(&wake_q);
ce2d52cc
DL
1138out_free:
1139 if (ret)
1140 free_msg(msg_ptr);
1da177e4 1141out_fput:
2903ff01 1142 fdput(f);
1da177e4
LT
1143out:
1144 return ret;
1145}
1146
0d060606
AV
1147static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1148 size_t msg_len, unsigned int __user *u_msg_prio,
b9047726 1149 struct timespec64 *ts)
1da177e4 1150{
1da177e4
LT
1151 ssize_t ret;
1152 struct msg_msg *msg_ptr;
2903ff01 1153 struct fd f;
1da177e4
LT
1154 struct inode *inode;
1155 struct mqueue_inode_info *info;
1156 struct ext_wait_queue wait;
9ca7d8e6 1157 ktime_t expires, *timeout = NULL;
ce2d52cc 1158 struct posix_msg_tree_node *new_leaf = NULL;
1da177e4 1159
0d060606 1160 if (ts) {
b9047726 1161 expires = timespec64_to_ktime(*ts);
9ca7d8e6 1162 timeout = &expires;
c32c8af4 1163 }
20ca73bc 1164
0d060606 1165 audit_mq_sendrecv(mqdes, msg_len, 0, ts);
1da177e4 1166
2903ff01
AV
1167 f = fdget(mqdes);
1168 if (unlikely(!f.file)) {
8d8ffefa 1169 ret = -EBADF;
1da177e4 1170 goto out;
8d8ffefa 1171 }
1da177e4 1172
496ad9aa 1173 inode = file_inode(f.file);
2903ff01 1174 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
8d8ffefa 1175 ret = -EBADF;
1da177e4 1176 goto out_fput;
8d8ffefa 1177 }
1da177e4 1178 info = MQUEUE_I(inode);
9f45f5bf 1179 audit_file(f.file);
1da177e4 1180
2903ff01 1181 if (unlikely(!(f.file->f_mode & FMODE_READ))) {
8d8ffefa 1182 ret = -EBADF;
1da177e4 1183 goto out_fput;
8d8ffefa 1184 }
1da177e4
LT
1185
1186 /* checks if buffer is big enough */
1187 if (unlikely(msg_len < info->attr.mq_msgsize)) {
1188 ret = -EMSGSIZE;
1189 goto out_fput;
1190 }
1191
ce2d52cc
DL
1192 /*
1193 * msg_insert really wants us to have a valid, spare node struct so
1194 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1195 * fall back to that if necessary.
1196 */
1197 if (!info->node_cache)
1198 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1199
1da177e4 1200 spin_lock(&info->lock);
ce2d52cc
DL
1201
1202 if (!info->node_cache && new_leaf) {
1203 /* Save our speculative allocation into the cache */
ce2d52cc
DL
1204 INIT_LIST_HEAD(&new_leaf->msg_list);
1205 info->node_cache = new_leaf;
ce2d52cc
DL
1206 } else {
1207 kfree(new_leaf);
1208 }
1209
1da177e4 1210 if (info->attr.mq_curmsgs == 0) {
2903ff01 1211 if (f.file->f_flags & O_NONBLOCK) {
1da177e4
LT
1212 spin_unlock(&info->lock);
1213 ret = -EAGAIN;
1da177e4
LT
1214 } else {
1215 wait.task = current;
c5b2cbdb
MS
1216
1217 /* memory barrier not required, we hold info->lock */
1218 WRITE_ONCE(wait.state, STATE_NONE);
1da177e4
LT
1219 ret = wq_sleep(info, RECV, timeout, &wait);
1220 msg_ptr = wait.msg;
1221 }
1222 } else {
194a6b5b 1223 DEFINE_WAKE_Q(wake_q);
fa6004ad 1224
1da177e4
LT
1225 msg_ptr = msg_get(info);
1226
1227 inode->i_atime = inode->i_mtime = inode->i_ctime =
078cd827 1228 current_time(inode);
1da177e4
LT
1229
1230 /* There is now free space in queue. */
fa6004ad 1231 pipelined_receive(&wake_q, info);
1da177e4 1232 spin_unlock(&info->lock);
fa6004ad 1233 wake_up_q(&wake_q);
1da177e4
LT
1234 ret = 0;
1235 }
1236 if (ret == 0) {
1237 ret = msg_ptr->m_ts;
1238
1239 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1240 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1241 ret = -EFAULT;
1242 }
1243 free_msg(msg_ptr);
1244 }
1245out_fput:
2903ff01 1246 fdput(f);
1da177e4
LT
1247out:
1248 return ret;
1249}
1250
0d060606
AV
1251SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1252 size_t, msg_len, unsigned int, msg_prio,
21fc538d 1253 const struct __kernel_timespec __user *, u_abs_timeout)
0d060606 1254{
b9047726 1255 struct timespec64 ts, *p = NULL;
0d060606
AV
1256 if (u_abs_timeout) {
1257 int res = prepare_timeout(u_abs_timeout, &ts);
1258 if (res)
1259 return res;
1260 p = &ts;
1261 }
1262 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1263}
1264
1265SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1266 size_t, msg_len, unsigned int __user *, u_msg_prio,
21fc538d 1267 const struct __kernel_timespec __user *, u_abs_timeout)
0d060606 1268{
b9047726 1269 struct timespec64 ts, *p = NULL;
0d060606
AV
1270 if (u_abs_timeout) {
1271 int res = prepare_timeout(u_abs_timeout, &ts);
1272 if (res)
1273 return res;
1274 p = &ts;
1275 }
1276 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1277}
1278
1da177e4
LT
1279/*
1280 * Notes: the case when user wants us to deregister (with NULL as pointer)
1281 * and he isn't currently owner of notification, will be silently discarded.
1282 * It isn't explicitly defined in the POSIX.
1283 */
0d060606 1284static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
1da177e4 1285{
2903ff01
AV
1286 int ret;
1287 struct fd f;
1da177e4
LT
1288 struct sock *sock;
1289 struct inode *inode;
1da177e4
LT
1290 struct mqueue_inode_info *info;
1291 struct sk_buff *nc;
1292
0d060606 1293 audit_mq_notify(mqdes, notification);
1da177e4 1294
20114f71
AV
1295 nc = NULL;
1296 sock = NULL;
0d060606
AV
1297 if (notification != NULL) {
1298 if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1299 notification->sigev_notify != SIGEV_SIGNAL &&
1300 notification->sigev_notify != SIGEV_THREAD))
1da177e4 1301 return -EINVAL;
0d060606
AV
1302 if (notification->sigev_notify == SIGEV_SIGNAL &&
1303 !valid_signal(notification->sigev_signo)) {
1da177e4
LT
1304 return -EINVAL;
1305 }
0d060606 1306 if (notification->sigev_notify == SIGEV_THREAD) {
c3d8d1e3
PM
1307 long timeo;
1308
1da177e4
LT
1309 /* create the notify skb */
1310 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
c231740d
ME
1311 if (!nc)
1312 return -ENOMEM;
1313
1da177e4 1314 if (copy_from_user(nc->data,
0d060606 1315 notification->sigev_value.sival_ptr,
1da177e4 1316 NOTIFY_COOKIE_LEN)) {
8d8ffefa 1317 ret = -EFAULT;
c231740d 1318 goto free_skb;
1da177e4
LT
1319 }
1320
1321 /* TODO: add a header? */
1322 skb_put(nc, NOTIFY_COOKIE_LEN);
1323 /* and attach it to the socket */
1324retry:
0d060606 1325 f = fdget(notification->sigev_signo);
2903ff01 1326 if (!f.file) {
8d8ffefa 1327 ret = -EBADF;
1da177e4 1328 goto out;
8d8ffefa 1329 }
2903ff01
AV
1330 sock = netlink_getsockbyfilp(f.file);
1331 fdput(f);
1da177e4
LT
1332 if (IS_ERR(sock)) {
1333 ret = PTR_ERR(sock);
c231740d 1334 goto free_skb;
1da177e4
LT
1335 }
1336
c3d8d1e3 1337 timeo = MAX_SCHEDULE_TIMEOUT;
9457afee 1338 ret = netlink_attachskb(sock, nc, &timeo, NULL);
f991af3d
CW
1339 if (ret == 1) {
1340 sock = NULL;
8d8ffefa 1341 goto retry;
f991af3d 1342 }
c231740d
ME
1343 if (ret)
1344 return ret;
1da177e4
LT
1345 }
1346 }
1347
2903ff01
AV
1348 f = fdget(mqdes);
1349 if (!f.file) {
8d8ffefa 1350 ret = -EBADF;
1da177e4 1351 goto out;
8d8ffefa 1352 }
1da177e4 1353
496ad9aa 1354 inode = file_inode(f.file);
2903ff01 1355 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
8d8ffefa 1356 ret = -EBADF;
1da177e4 1357 goto out_fput;
8d8ffefa 1358 }
1da177e4
LT
1359 info = MQUEUE_I(inode);
1360
1361 ret = 0;
1362 spin_lock(&info->lock);
0d060606 1363 if (notification == NULL) {
a03fcb73 1364 if (info->notify_owner == task_tgid(current)) {
1da177e4 1365 remove_notification(info);
078cd827 1366 inode->i_atime = inode->i_ctime = current_time(inode);
1da177e4 1367 }
a03fcb73 1368 } else if (info->notify_owner != NULL) {
1da177e4
LT
1369 ret = -EBUSY;
1370 } else {
0d060606 1371 switch (notification->sigev_notify) {
1da177e4
LT
1372 case SIGEV_NONE:
1373 info->notify.sigev_notify = SIGEV_NONE;
1374 break;
1375 case SIGEV_THREAD:
1376 info->notify_sock = sock;
1377 info->notify_cookie = nc;
1378 sock = NULL;
1379 nc = NULL;
1380 info->notify.sigev_notify = SIGEV_THREAD;
1381 break;
1382 case SIGEV_SIGNAL:
0d060606
AV
1383 info->notify.sigev_signo = notification->sigev_signo;
1384 info->notify.sigev_value = notification->sigev_value;
1da177e4
LT
1385 info->notify.sigev_notify = SIGEV_SIGNAL;
1386 break;
1387 }
a03fcb73
CLG
1388
1389 info->notify_owner = get_pid(task_tgid(current));
6f9ac6d9 1390 info->notify_user_ns = get_user_ns(current_user_ns());
078cd827 1391 inode->i_atime = inode->i_ctime = current_time(inode);
1da177e4
LT
1392 }
1393 spin_unlock(&info->lock);
1394out_fput:
2903ff01 1395 fdput(f);
1da177e4 1396out:
3ab08fe2 1397 if (sock)
1da177e4 1398 netlink_detachskb(sock, nc);
97b0b1ad 1399 else
c231740d 1400free_skb:
1da177e4 1401 dev_kfree_skb(nc);
3ab08fe2 1402
1da177e4
LT
1403 return ret;
1404}
1405
0d060606
AV
1406SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1407 const struct sigevent __user *, u_notification)
1408{
1409 struct sigevent n, *p = NULL;
1410 if (u_notification) {
1411 if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1412 return -EFAULT;
1413 p = &n;
1414 }
1415 return do_mq_notify(mqdes, p);
1416}
1417
1418static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1da177e4 1419{
2903ff01 1420 struct fd f;
1da177e4
LT
1421 struct inode *inode;
1422 struct mqueue_inode_info *info;
1423
0d060606
AV
1424 if (new && (new->mq_flags & (~O_NONBLOCK)))
1425 return -EINVAL;
1da177e4 1426
2903ff01 1427 f = fdget(mqdes);
0d060606
AV
1428 if (!f.file)
1429 return -EBADF;
1da177e4 1430
2903ff01 1431 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
0d060606
AV
1432 fdput(f);
1433 return -EBADF;
8d8ffefa 1434 }
0d060606
AV
1435
1436 inode = file_inode(f.file);
1da177e4
LT
1437 info = MQUEUE_I(inode);
1438
1439 spin_lock(&info->lock);
1440
0d060606
AV
1441 if (old) {
1442 *old = info->attr;
1443 old->mq_flags = f.file->f_flags & O_NONBLOCK;
1444 }
1445 if (new) {
1446 audit_mq_getsetattr(mqdes, new);
2903ff01 1447 spin_lock(&f.file->f_lock);
0d060606 1448 if (new->mq_flags & O_NONBLOCK)
2903ff01 1449 f.file->f_flags |= O_NONBLOCK;
1da177e4 1450 else
2903ff01
AV
1451 f.file->f_flags &= ~O_NONBLOCK;
1452 spin_unlock(&f.file->f_lock);
1da177e4 1453
078cd827 1454 inode->i_atime = inode->i_ctime = current_time(inode);
1da177e4
LT
1455 }
1456
1457 spin_unlock(&info->lock);
0d060606
AV
1458 fdput(f);
1459 return 0;
1460}
1da177e4 1461
0d060606
AV
1462SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1463 const struct mq_attr __user *, u_mqstat,
1464 struct mq_attr __user *, u_omqstat)
1465{
1466 int ret;
1467 struct mq_attr mqstat, omqstat;
1468 struct mq_attr *new = NULL, *old = NULL;
1da177e4 1469
0d060606
AV
1470 if (u_mqstat) {
1471 new = &mqstat;
1472 if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1473 return -EFAULT;
1474 }
1475 if (u_omqstat)
1476 old = &omqstat;
1477
1478 ret = do_mq_getsetattr(mqdes, new, old);
1479 if (ret || !old)
1480 return ret;
1481
1482 if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1483 return -EFAULT;
1484 return 0;
1485}
1486
1487#ifdef CONFIG_COMPAT
1488
1489struct compat_mq_attr {
1490 compat_long_t mq_flags; /* message queue flags */
1491 compat_long_t mq_maxmsg; /* maximum number of messages */
1492 compat_long_t mq_msgsize; /* maximum message size */
1493 compat_long_t mq_curmsgs; /* number of messages currently queued */
1494 compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1495};
1496
1497static inline int get_compat_mq_attr(struct mq_attr *attr,
1498 const struct compat_mq_attr __user *uattr)
1499{
1500 struct compat_mq_attr v;
1501
1502 if (copy_from_user(&v, uattr, sizeof(*uattr)))
1503 return -EFAULT;
1504
1505 memset(attr, 0, sizeof(*attr));
1506 attr->mq_flags = v.mq_flags;
1507 attr->mq_maxmsg = v.mq_maxmsg;
1508 attr->mq_msgsize = v.mq_msgsize;
1509 attr->mq_curmsgs = v.mq_curmsgs;
1510 return 0;
1511}
1512
1513static inline int put_compat_mq_attr(const struct mq_attr *attr,
1514 struct compat_mq_attr __user *uattr)
1515{
1516 struct compat_mq_attr v;
1517
1518 memset(&v, 0, sizeof(v));
1519 v.mq_flags = attr->mq_flags;
1520 v.mq_maxmsg = attr->mq_maxmsg;
1521 v.mq_msgsize = attr->mq_msgsize;
1522 v.mq_curmsgs = attr->mq_curmsgs;
1523 if (copy_to_user(uattr, &v, sizeof(*uattr)))
1524 return -EFAULT;
1525 return 0;
1526}
1527
1528COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1529 int, oflag, compat_mode_t, mode,
1530 struct compat_mq_attr __user *, u_attr)
1531{
1532 struct mq_attr attr, *p = NULL;
1533 if (u_attr && oflag & O_CREAT) {
1534 p = &attr;
1535 if (get_compat_mq_attr(&attr, u_attr))
1536 return -EFAULT;
1537 }
1538 return do_mq_open(u_name, oflag, mode, p);
1539}
1540
b0d17578
AB
1541COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1542 const struct compat_sigevent __user *, u_notification)
1543{
1544 struct sigevent n, *p = NULL;
1545 if (u_notification) {
1546 if (get_compat_sigevent(&n, u_notification))
1547 return -EFAULT;
1548 if (n.sigev_notify == SIGEV_THREAD)
1549 n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1550 p = &n;
1551 }
1552 return do_mq_notify(mqdes, p);
1553}
1554
1555COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1556 const struct compat_mq_attr __user *, u_mqstat,
1557 struct compat_mq_attr __user *, u_omqstat)
1558{
1559 int ret;
1560 struct mq_attr mqstat, omqstat;
1561 struct mq_attr *new = NULL, *old = NULL;
1562
1563 if (u_mqstat) {
1564 new = &mqstat;
1565 if (get_compat_mq_attr(new, u_mqstat))
1566 return -EFAULT;
1567 }
1568 if (u_omqstat)
1569 old = &omqstat;
1570
1571 ret = do_mq_getsetattr(mqdes, new, old);
1572 if (ret || !old)
1573 return ret;
1574
1575 if (put_compat_mq_attr(old, u_omqstat))
1576 return -EFAULT;
1577 return 0;
1578}
1579#endif
1580
1581#ifdef CONFIG_COMPAT_32BIT_TIME
9afc5eee 1582static int compat_prepare_timeout(const struct old_timespec32 __user *p,
b9047726 1583 struct timespec64 *ts)
0d060606 1584{
9afc5eee 1585 if (get_old_timespec32(ts, p))
0d060606 1586 return -EFAULT;
b9047726 1587 if (!timespec64_valid(ts))
0d060606
AV
1588 return -EINVAL;
1589 return 0;
1590}
1591
8dabe724
AB
1592SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
1593 const char __user *, u_msg_ptr,
1594 unsigned int, msg_len, unsigned int, msg_prio,
1595 const struct old_timespec32 __user *, u_abs_timeout)
0d060606 1596{
b9047726 1597 struct timespec64 ts, *p = NULL;
0d060606
AV
1598 if (u_abs_timeout) {
1599 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1600 if (res)
1601 return res;
1602 p = &ts;
1603 }
1604 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1605}
1606
8dabe724
AB
1607SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
1608 char __user *, u_msg_ptr,
1609 unsigned int, msg_len, unsigned int __user *, u_msg_prio,
1610 const struct old_timespec32 __user *, u_abs_timeout)
0d060606 1611{
b9047726 1612 struct timespec64 ts, *p = NULL;
0d060606
AV
1613 if (u_abs_timeout) {
1614 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1615 if (res)
1616 return res;
1617 p = &ts;
1618 }
1619 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1620}
0d060606 1621#endif
1da177e4 1622
92e1d5be 1623static const struct inode_operations mqueue_dir_inode_operations = {
1da177e4
LT
1624 .lookup = simple_lookup,
1625 .create = mqueue_create,
1626 .unlink = mqueue_unlink,
1627};
1628
9a32144e 1629static const struct file_operations mqueue_file_operations = {
1da177e4
LT
1630 .flush = mqueue_flush_file,
1631 .poll = mqueue_poll_file,
1632 .read = mqueue_read_file,
6038f373 1633 .llseek = default_llseek,
1da177e4
LT
1634};
1635
b87221de 1636static const struct super_operations mqueue_super_ops = {
1da177e4 1637 .alloc_inode = mqueue_alloc_inode,
015d7956 1638 .free_inode = mqueue_free_inode,
6d8af64c 1639 .evict_inode = mqueue_evict_inode,
1da177e4 1640 .statfs = simple_statfs,
1da177e4
LT
1641};
1642
935c6912
DH
1643static const struct fs_context_operations mqueue_fs_context_ops = {
1644 .free = mqueue_fs_context_free,
1645 .get_tree = mqueue_get_tree,
1646};
1647
1da177e4 1648static struct file_system_type mqueue_fs_type = {
935c6912
DH
1649 .name = "mqueue",
1650 .init_fs_context = mqueue_init_fs_context,
1651 .kill_sb = kill_litter_super,
1652 .fs_flags = FS_USERNS_MOUNT,
1da177e4
LT
1653};
1654
7eafd7c7
SH
1655int mq_init_ns(struct ipc_namespace *ns)
1656{
935c6912
DH
1657 struct vfsmount *m;
1658
7eafd7c7
SH
1659 ns->mq_queues_count = 0;
1660 ns->mq_queues_max = DFLT_QUEUESMAX;
1661 ns->mq_msg_max = DFLT_MSGMAX;
1662 ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
cef0184c
KM
1663 ns->mq_msg_default = DFLT_MSG;
1664 ns->mq_msgsize_default = DFLT_MSGSIZE;
7eafd7c7 1665
935c6912
DH
1666 m = mq_create_mount(ns);
1667 if (IS_ERR(m))
1668 return PTR_ERR(m);
1669 ns->mq_mnt = m;
7eafd7c7
SH
1670 return 0;
1671}
1672
1673void mq_clear_sbinfo(struct ipc_namespace *ns)
1674{
cfb2f6f6 1675 ns->mq_mnt->mnt_sb->s_fs_info = NULL;
7eafd7c7
SH
1676}
1677
1678void mq_put_mnt(struct ipc_namespace *ns)
1679{
cfb2f6f6 1680 kern_unmount(ns->mq_mnt);
7eafd7c7
SH
1681}
1682
1da177e4
LT
1683static int __init init_mqueue_fs(void)
1684{
1685 int error;
1686
1687 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1688 sizeof(struct mqueue_inode_info), 0,
5d097056 1689 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1da177e4
LT
1690 if (mqueue_inode_cachep == NULL)
1691 return -ENOMEM;
1692
2329e392 1693 /* ignore failures - they are not fatal */
bdc8e5f8 1694 mq_sysctl_table = mq_register_sysctl_table();
1da177e4
LT
1695
1696 error = register_filesystem(&mqueue_fs_type);
1697 if (error)
1698 goto out_sysctl;
1699
7eafd7c7
SH
1700 spin_lock_init(&mq_lock);
1701
6f686574
AV
1702 error = mq_init_ns(&init_ipc_ns);
1703 if (error)
1da177e4 1704 goto out_filesystem;
1da177e4 1705
1da177e4
LT
1706 return 0;
1707
1708out_filesystem:
1709 unregister_filesystem(&mqueue_fs_type);
1710out_sysctl:
1711 if (mq_sysctl_table)
1712 unregister_sysctl_table(mq_sysctl_table);
1a1d92c1 1713 kmem_cache_destroy(mqueue_inode_cachep);
1da177e4
LT
1714 return error;
1715}
1716
6d08a256 1717device_initcall(init_mqueue_fs);