3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
21 * Pavel Emelianov <xemul@openvz.org>
23 * Better ipc lock (kern_ipc_perm.lock) handling
24 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
27 #include <linux/slab.h>
29 #include <linux/hugetlb.h>
30 #include <linux/shm.h>
31 #include <linux/init.h>
32 #include <linux/file.h>
33 #include <linux/mman.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/security.h>
36 #include <linux/syscalls.h>
37 #include <linux/audit.h>
38 #include <linux/capability.h>
39 #include <linux/ptrace.h>
40 #include <linux/seq_file.h>
41 #include <linux/rwsem.h>
42 #include <linux/nsproxy.h>
43 #include <linux/mount.h>
44 #include <linux/ipc_namespace.h>
46 #include <linux/uaccess.h>
50 struct shm_file_data {
52 struct ipc_namespace *ns;
54 const struct vm_operations_struct *vm_ops;
57 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
59 static const struct file_operations shm_file_operations;
60 static const struct vm_operations_struct shm_vm_ops;
62 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
64 #define shm_unlock(shp) \
65 ipc_unlock(&(shp)->shm_perm)
67 static int newseg(struct ipc_namespace *, struct ipc_params *);
68 static void shm_open(struct vm_area_struct *vma);
69 static void shm_close(struct vm_area_struct *vma);
70 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
72 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
75 int shm_init_ns(struct ipc_namespace *ns)
77 ns->shm_ctlmax = SHMMAX;
78 ns->shm_ctlall = SHMALL;
79 ns->shm_ctlmni = SHMMNI;
80 ns->shm_rmid_forced = 0;
82 return ipc_init_ids(&shm_ids(ns));
86 * Called with shm_ids.rwsem (writer) and the shp structure locked.
87 * Only shm_ids.rwsem remains locked on exit.
89 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
91 struct shmid_kernel *shp;
93 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
95 if (shp->shm_nattch) {
96 shp->shm_perm.mode |= SHM_DEST;
97 /* Do not find it any more */
98 ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
101 shm_destroy(ns, shp);
105 void shm_exit_ns(struct ipc_namespace *ns)
107 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
108 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
109 rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
113 static int __init ipc_ns_init(void)
115 const int err = shm_init_ns(&init_ipc_ns);
116 WARN(err, "ipc: sysv shm_init_ns failed: %d\n", err);
120 pure_initcall(ipc_ns_init);
122 void __init shm_init(void)
124 ipc_init_proc_interface("sysvipc/shm",
125 #if BITS_PER_LONG <= 32
126 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
128 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
130 IPC_SHM_IDS, sysvipc_shm_proc_show);
133 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
135 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
138 return ERR_CAST(ipcp);
140 return container_of(ipcp, struct shmid_kernel, shm_perm);
143 static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
145 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
148 return ERR_CAST(ipcp);
150 return container_of(ipcp, struct shmid_kernel, shm_perm);
154 * shm_lock_(check_) routines are called in the paths where the rwsem
155 * is not necessarily held.
157 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
159 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
162 * Callers of shm_lock() must validate the status of the returned ipc
163 * object pointer (as returned by ipc_lock()), and error out as
168 return container_of(ipcp, struct shmid_kernel, shm_perm);
171 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
174 ipc_lock_object(&ipcp->shm_perm);
177 static void shm_rcu_free(struct rcu_head *head)
179 struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
181 struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
183 security_shm_free(shp);
187 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
189 list_del(&s->shm_clist);
190 ipc_rmid(&shm_ids(ns), &s->shm_perm);
194 static int __shm_open(struct vm_area_struct *vma)
196 struct file *file = vma->vm_file;
197 struct shm_file_data *sfd = shm_file_data(file);
198 struct shmid_kernel *shp;
200 shp = shm_lock(sfd->ns, sfd->id);
205 shp->shm_atim = get_seconds();
206 shp->shm_lprid = task_tgid_vnr(current);
212 /* This is called by fork, once for every shm attach. */
213 static void shm_open(struct vm_area_struct *vma)
215 int err = __shm_open(vma);
217 * We raced in the idr lookup or with shm_destroy().
218 * Either way, the ID is busted.
224 * shm_destroy - free the struct shmid_kernel
227 * @shp: struct to free
229 * It has to be called with shp and shm_ids.rwsem (writer) locked,
230 * but returns with shp unlocked and freed.
232 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
234 struct file *shm_file;
236 shm_file = shp->shm_file;
237 shp->shm_file = NULL;
238 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
241 if (!is_file_hugepages(shm_file))
242 shmem_lock(shm_file, 0, shp->mlock_user);
243 else if (shp->mlock_user)
244 user_shm_unlock(i_size_read(file_inode(shm_file)),
247 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
251 * shm_may_destroy - identifies whether shm segment should be destroyed now
253 * Returns true if and only if there are no active users of the segment and
254 * one of the following is true:
256 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
258 * 2) sysctl kernel.shm_rmid_forced is set to 1.
260 static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
262 return (shp->shm_nattch == 0) &&
263 (ns->shm_rmid_forced ||
264 (shp->shm_perm.mode & SHM_DEST));
268 * remove the attach descriptor vma.
269 * free memory for segment if it is marked destroyed.
270 * The descriptor has already been removed from the current->mm->mmap list
271 * and will later be kfree()d.
273 static void shm_close(struct vm_area_struct *vma)
275 struct file *file = vma->vm_file;
276 struct shm_file_data *sfd = shm_file_data(file);
277 struct shmid_kernel *shp;
278 struct ipc_namespace *ns = sfd->ns;
280 down_write(&shm_ids(ns).rwsem);
281 /* remove from the list of attaches of the shm segment */
282 shp = shm_lock(ns, sfd->id);
285 * We raced in the idr lookup or with shm_destroy().
286 * Either way, the ID is busted.
288 if (WARN_ON_ONCE(IS_ERR(shp)))
289 goto done; /* no-op */
291 shp->shm_lprid = task_tgid_vnr(current);
292 shp->shm_dtim = get_seconds();
294 if (shm_may_destroy(ns, shp))
295 shm_destroy(ns, shp);
299 up_write(&shm_ids(ns).rwsem);
302 /* Called with ns->shm_ids(ns).rwsem locked */
303 static int shm_try_destroy_orphaned(int id, void *p, void *data)
305 struct ipc_namespace *ns = data;
306 struct kern_ipc_perm *ipcp = p;
307 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
310 * We want to destroy segments without users and with already
311 * exit'ed originating process.
313 * As shp->* are changed under rwsem, it's safe to skip shp locking.
315 if (shp->shm_creator != NULL)
318 if (shm_may_destroy(ns, shp)) {
319 shm_lock_by_ptr(shp);
320 shm_destroy(ns, shp);
325 void shm_destroy_orphaned(struct ipc_namespace *ns)
327 down_write(&shm_ids(ns).rwsem);
328 if (shm_ids(ns).in_use)
329 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
330 up_write(&shm_ids(ns).rwsem);
333 /* Locking assumes this will only be called with task == current */
334 void exit_shm(struct task_struct *task)
336 struct ipc_namespace *ns = task->nsproxy->ipc_ns;
337 struct shmid_kernel *shp, *n;
339 if (list_empty(&task->sysvshm.shm_clist))
343 * If kernel.shm_rmid_forced is not set then only keep track of
344 * which shmids are orphaned, so that a later set of the sysctl
347 if (!ns->shm_rmid_forced) {
348 down_read(&shm_ids(ns).rwsem);
349 list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
350 shp->shm_creator = NULL;
352 * Only under read lock but we are only called on current
353 * so no entry on the list will be shared.
355 list_del(&task->sysvshm.shm_clist);
356 up_read(&shm_ids(ns).rwsem);
361 * Destroy all already created segments, that were not yet mapped,
362 * and mark any mapped as orphan to cover the sysctl toggling.
363 * Destroy is skipped if shm_may_destroy() returns false.
365 down_write(&shm_ids(ns).rwsem);
366 list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
367 shp->shm_creator = NULL;
369 if (shm_may_destroy(ns, shp)) {
370 shm_lock_by_ptr(shp);
371 shm_destroy(ns, shp);
375 /* Remove the list head from any segments still attached. */
376 list_del(&task->sysvshm.shm_clist);
377 up_write(&shm_ids(ns).rwsem);
380 static int shm_fault(struct vm_fault *vmf)
382 struct file *file = vmf->vma->vm_file;
383 struct shm_file_data *sfd = shm_file_data(file);
385 return sfd->vm_ops->fault(vmf);
389 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
391 struct file *file = vma->vm_file;
392 struct shm_file_data *sfd = shm_file_data(file);
395 if (sfd->vm_ops->set_policy)
396 err = sfd->vm_ops->set_policy(vma, new);
400 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
403 struct file *file = vma->vm_file;
404 struct shm_file_data *sfd = shm_file_data(file);
405 struct mempolicy *pol = NULL;
407 if (sfd->vm_ops->get_policy)
408 pol = sfd->vm_ops->get_policy(vma, addr);
409 else if (vma->vm_policy)
410 pol = vma->vm_policy;
416 static int shm_mmap(struct file *file, struct vm_area_struct *vma)
418 struct shm_file_data *sfd = shm_file_data(file);
422 * In case of remap_file_pages() emulation, the file can represent
423 * removed IPC ID: propogate shm_lock() error to caller.
425 ret = __shm_open(vma);
429 ret = call_mmap(sfd->file, vma);
434 sfd->vm_ops = vma->vm_ops;
436 WARN_ON(!sfd->vm_ops->fault);
438 vma->vm_ops = &shm_vm_ops;
442 static int shm_release(struct inode *ino, struct file *file)
444 struct shm_file_data *sfd = shm_file_data(file);
447 shm_file_data(file) = NULL;
452 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
454 struct shm_file_data *sfd = shm_file_data(file);
456 if (!sfd->file->f_op->fsync)
458 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
461 static long shm_fallocate(struct file *file, int mode, loff_t offset,
464 struct shm_file_data *sfd = shm_file_data(file);
466 if (!sfd->file->f_op->fallocate)
468 return sfd->file->f_op->fallocate(file, mode, offset, len);
471 static unsigned long shm_get_unmapped_area(struct file *file,
472 unsigned long addr, unsigned long len, unsigned long pgoff,
475 struct shm_file_data *sfd = shm_file_data(file);
477 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
481 static const struct file_operations shm_file_operations = {
484 .release = shm_release,
485 .get_unmapped_area = shm_get_unmapped_area,
486 .llseek = noop_llseek,
487 .fallocate = shm_fallocate,
491 * shm_file_operations_huge is now identical to shm_file_operations,
492 * but we keep it distinct for the sake of is_file_shm_hugepages().
494 static const struct file_operations shm_file_operations_huge = {
497 .release = shm_release,
498 .get_unmapped_area = shm_get_unmapped_area,
499 .llseek = noop_llseek,
500 .fallocate = shm_fallocate,
503 bool is_file_shm_hugepages(struct file *file)
505 return file->f_op == &shm_file_operations_huge;
508 static const struct vm_operations_struct shm_vm_ops = {
509 .open = shm_open, /* callback for a new vm-area open */
510 .close = shm_close, /* callback for when the vm-area is released */
512 #if defined(CONFIG_NUMA)
513 .set_policy = shm_set_policy,
514 .get_policy = shm_get_policy,
519 * newseg - Create a new shared memory segment
521 * @params: ptr to the structure that contains key, size and shmflg
523 * Called with shm_ids.rwsem held as a writer.
525 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
527 key_t key = params->key;
528 int shmflg = params->flg;
529 size_t size = params->u.size;
531 struct shmid_kernel *shp;
532 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
535 vm_flags_t acctflag = 0;
537 if (size < SHMMIN || size > ns->shm_ctlmax)
540 if (numpages << PAGE_SHIFT < size)
543 if (ns->shm_tot + numpages < ns->shm_tot ||
544 ns->shm_tot + numpages > ns->shm_ctlall)
547 shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
551 shp->shm_perm.key = key;
552 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
553 shp->mlock_user = NULL;
555 shp->shm_perm.security = NULL;
556 error = security_shm_alloc(shp);
562 sprintf(name, "SYSV%08x", key);
563 if (shmflg & SHM_HUGETLB) {
567 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
572 hugesize = ALIGN(size, huge_page_size(hs));
574 /* hugetlb_file_setup applies strict accounting */
575 if (shmflg & SHM_NORESERVE)
576 acctflag = VM_NORESERVE;
577 file = hugetlb_file_setup(name, hugesize, acctflag,
578 &shp->mlock_user, HUGETLB_SHMFS_INODE,
579 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
582 * Do not allow no accounting for OVERCOMMIT_NEVER, even
585 if ((shmflg & SHM_NORESERVE) &&
586 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
587 acctflag = VM_NORESERVE;
588 file = shmem_kernel_file_setup(name, size, acctflag);
590 error = PTR_ERR(file);
594 shp->shm_cprid = task_tgid_vnr(current);
596 shp->shm_atim = shp->shm_dtim = 0;
597 shp->shm_ctim = get_seconds();
598 shp->shm_segsz = size;
600 shp->shm_file = file;
601 shp->shm_creator = current;
603 error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
607 list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
610 * shmid gets reported as "inode#" in /proc/pid/maps.
611 * proc-ps tools use this. Changing this will break them.
613 file_inode(file)->i_ino = shp->shm_perm.id;
615 ns->shm_tot += numpages;
616 error = shp->shm_perm.id;
618 ipc_unlock_object(&shp->shm_perm);
623 if (is_file_hugepages(file) && shp->mlock_user)
624 user_shm_unlock(size, shp->mlock_user);
627 call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
632 * Called with shm_ids.rwsem and ipcp locked.
634 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
636 struct shmid_kernel *shp;
638 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
639 return security_shm_associate(shp, shmflg);
643 * Called with shm_ids.rwsem and ipcp locked.
645 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
646 struct ipc_params *params)
648 struct shmid_kernel *shp;
650 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
651 if (shp->shm_segsz < params->u.size)
657 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
659 struct ipc_namespace *ns;
660 static const struct ipc_ops shm_ops = {
662 .associate = shm_security,
663 .more_checks = shm_more_checks,
665 struct ipc_params shm_params;
667 ns = current->nsproxy->ipc_ns;
669 shm_params.key = key;
670 shm_params.flg = shmflg;
671 shm_params.u.size = size;
673 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
676 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
680 return copy_to_user(buf, in, sizeof(*in));
685 memset(&out, 0, sizeof(out));
686 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
687 out.shm_segsz = in->shm_segsz;
688 out.shm_atime = in->shm_atime;
689 out.shm_dtime = in->shm_dtime;
690 out.shm_ctime = in->shm_ctime;
691 out.shm_cpid = in->shm_cpid;
692 out.shm_lpid = in->shm_lpid;
693 out.shm_nattch = in->shm_nattch;
695 return copy_to_user(buf, &out, sizeof(out));
702 static inline unsigned long
703 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
707 if (copy_from_user(out, buf, sizeof(*out)))
712 struct shmid_ds tbuf_old;
714 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
717 out->shm_perm.uid = tbuf_old.shm_perm.uid;
718 out->shm_perm.gid = tbuf_old.shm_perm.gid;
719 out->shm_perm.mode = tbuf_old.shm_perm.mode;
728 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
732 return copy_to_user(buf, in, sizeof(*in));
737 if (in->shmmax > INT_MAX)
738 out.shmmax = INT_MAX;
740 out.shmmax = (int)in->shmmax;
742 out.shmmin = in->shmmin;
743 out.shmmni = in->shmmni;
744 out.shmseg = in->shmseg;
745 out.shmall = in->shmall;
747 return copy_to_user(buf, &out, sizeof(out));
755 * Calculate and add used RSS and swap pages of a shm.
756 * Called with shm_ids.rwsem held as a reader
758 static void shm_add_rss_swap(struct shmid_kernel *shp,
759 unsigned long *rss_add, unsigned long *swp_add)
763 inode = file_inode(shp->shm_file);
765 if (is_file_hugepages(shp->shm_file)) {
766 struct address_space *mapping = inode->i_mapping;
767 struct hstate *h = hstate_file(shp->shm_file);
768 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
771 struct shmem_inode_info *info = SHMEM_I(inode);
773 spin_lock_irq(&info->lock);
774 *rss_add += inode->i_mapping->nrpages;
775 *swp_add += info->swapped;
776 spin_unlock_irq(&info->lock);
778 *rss_add += inode->i_mapping->nrpages;
784 * Called with shm_ids.rwsem held as a reader
786 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
795 in_use = shm_ids(ns).in_use;
797 for (total = 0, next_id = 0; total < in_use; next_id++) {
798 struct kern_ipc_perm *ipc;
799 struct shmid_kernel *shp;
801 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
804 shp = container_of(ipc, struct shmid_kernel, shm_perm);
806 shm_add_rss_swap(shp, rss, swp);
813 * This function handles some shmctl commands which require the rwsem
814 * to be held in write mode.
815 * NOTE: no locks must be held, the rwsem is taken inside this function.
817 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
818 struct shmid_ds __user *buf, int version)
820 struct kern_ipc_perm *ipcp;
821 struct shmid64_ds shmid64;
822 struct shmid_kernel *shp;
825 if (cmd == IPC_SET) {
826 if (copy_shmid_from_user(&shmid64, buf, version))
830 down_write(&shm_ids(ns).rwsem);
833 ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
834 &shmid64.shm_perm, 0);
840 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
842 err = security_shm_shmctl(shp, cmd);
848 ipc_lock_object(&shp->shm_perm);
849 /* do_shm_rmid unlocks the ipc object and rcu */
850 do_shm_rmid(ns, ipcp);
853 ipc_lock_object(&shp->shm_perm);
854 err = ipc_update_perm(&shmid64.shm_perm, ipcp);
857 shp->shm_ctim = get_seconds();
865 ipc_unlock_object(&shp->shm_perm);
869 up_write(&shm_ids(ns).rwsem);
873 static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
874 int cmd, int version, void __user *buf)
877 struct shmid_kernel *shp;
879 /* preliminary security checks for *_INFO */
880 if (cmd == IPC_INFO || cmd == SHM_INFO) {
881 err = security_shm_shmctl(NULL, cmd);
889 struct shminfo64 shminfo;
891 memset(&shminfo, 0, sizeof(shminfo));
892 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
893 shminfo.shmmax = ns->shm_ctlmax;
894 shminfo.shmall = ns->shm_ctlall;
896 shminfo.shmmin = SHMMIN;
897 if (copy_shminfo_to_user(buf, &shminfo, version))
900 down_read(&shm_ids(ns).rwsem);
901 err = ipc_get_maxid(&shm_ids(ns));
902 up_read(&shm_ids(ns).rwsem);
910 struct shm_info shm_info;
912 memset(&shm_info, 0, sizeof(shm_info));
913 down_read(&shm_ids(ns).rwsem);
914 shm_info.used_ids = shm_ids(ns).in_use;
915 shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
916 shm_info.shm_tot = ns->shm_tot;
917 shm_info.swap_attempts = 0;
918 shm_info.swap_successes = 0;
919 err = ipc_get_maxid(&shm_ids(ns));
920 up_read(&shm_ids(ns).rwsem);
921 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
926 err = err < 0 ? 0 : err;
932 struct shmid64_ds tbuf;
936 if (cmd == SHM_STAT) {
937 shp = shm_obtain_object(ns, shmid);
942 result = shp->shm_perm.id;
944 shp = shm_obtain_object_check(ns, shmid);
953 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
956 err = security_shm_shmctl(shp, cmd);
960 memset(&tbuf, 0, sizeof(tbuf));
961 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
962 tbuf.shm_segsz = shp->shm_segsz;
963 tbuf.shm_atime = shp->shm_atim;
964 tbuf.shm_dtime = shp->shm_dtim;
965 tbuf.shm_ctime = shp->shm_ctim;
966 tbuf.shm_cpid = shp->shm_cprid;
967 tbuf.shm_lpid = shp->shm_lprid;
968 tbuf.shm_nattch = shp->shm_nattch;
971 if (copy_shmid_to_user(buf, &tbuf, version))
987 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
989 struct shmid_kernel *shp;
991 struct ipc_namespace *ns;
993 if (cmd < 0 || shmid < 0)
996 version = ipc_parse_version(&cmd);
997 ns = current->nsproxy->ipc_ns;
1004 return shmctl_nolock(ns, shmid, cmd, version, buf);
1007 return shmctl_down(ns, shmid, cmd, buf, version);
1011 struct file *shm_file;
1014 shp = shm_obtain_object_check(ns, shmid);
1020 audit_ipc_obj(&(shp->shm_perm));
1021 err = security_shm_shmctl(shp, cmd);
1025 ipc_lock_object(&shp->shm_perm);
1027 /* check if shm_destroy() is tearing down shp */
1028 if (!ipc_valid_object(&shp->shm_perm)) {
1033 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1034 kuid_t euid = current_euid();
1036 if (!uid_eq(euid, shp->shm_perm.uid) &&
1037 !uid_eq(euid, shp->shm_perm.cuid)) {
1041 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1047 shm_file = shp->shm_file;
1048 if (is_file_hugepages(shm_file))
1051 if (cmd == SHM_LOCK) {
1052 struct user_struct *user = current_user();
1054 err = shmem_lock(shm_file, 1, user);
1055 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1056 shp->shm_perm.mode |= SHM_LOCKED;
1057 shp->mlock_user = user;
1063 if (!(shp->shm_perm.mode & SHM_LOCKED))
1065 shmem_lock(shm_file, 0, shp->mlock_user);
1066 shp->shm_perm.mode &= ~SHM_LOCKED;
1067 shp->mlock_user = NULL;
1069 ipc_unlock_object(&shp->shm_perm);
1071 shmem_unlock_mapping(shm_file->f_mapping);
1081 ipc_unlock_object(&shp->shm_perm);
1088 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1090 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1091 * "raddr" thing points to kernel space, and there has to be a wrapper around
1094 long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1095 ulong *raddr, unsigned long shmlba)
1097 struct shmid_kernel *shp;
1098 unsigned long addr = (unsigned long)shmaddr;
1102 unsigned long flags = MAP_SHARED;
1105 struct ipc_namespace *ns;
1106 struct shm_file_data *sfd;
1109 unsigned long populate = 0;
1116 if (addr & (shmlba - 1)) {
1118 * Round down to the nearest multiple of shmlba.
1119 * For sane do_mmap_pgoff() parameters, avoid
1120 * round downs that trigger nil-page and MAP_FIXED.
1122 if ((shmflg & SHM_RND) && addr >= shmlba)
1123 addr &= ~(shmlba - 1);
1125 #ifndef __ARCH_FORCE_SHMLBA
1126 if (addr & ~PAGE_MASK)
1132 } else if ((shmflg & SHM_REMAP))
1135 if (shmflg & SHM_RDONLY) {
1138 f_mode = FMODE_READ;
1140 prot = PROT_READ | PROT_WRITE;
1141 acc_mode = S_IRUGO | S_IWUGO;
1142 f_mode = FMODE_READ | FMODE_WRITE;
1144 if (shmflg & SHM_EXEC) {
1146 acc_mode |= S_IXUGO;
1150 * We cannot rely on the fs check since SYSV IPC does have an
1151 * additional creator id...
1153 ns = current->nsproxy->ipc_ns;
1155 shp = shm_obtain_object_check(ns, shmid);
1162 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1165 err = security_shm_shmat(shp, shmaddr, shmflg);
1169 ipc_lock_object(&shp->shm_perm);
1171 /* check if shm_destroy() is tearing down shp */
1172 if (!ipc_valid_object(&shp->shm_perm)) {
1173 ipc_unlock_object(&shp->shm_perm);
1178 path = shp->shm_file->f_path;
1181 size = i_size_read(d_inode(path.dentry));
1182 ipc_unlock_object(&shp->shm_perm);
1186 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1192 file = alloc_file(&path, f_mode,
1193 is_file_hugepages(shp->shm_file) ?
1194 &shm_file_operations_huge :
1195 &shm_file_operations);
1196 err = PTR_ERR(file);
1203 file->private_data = sfd;
1204 file->f_mapping = shp->shm_file->f_mapping;
1205 sfd->id = shp->shm_perm.id;
1206 sfd->ns = get_ipc_ns(ns);
1207 sfd->file = shp->shm_file;
1210 err = security_mmap_file(file, prot, flags);
1214 if (down_write_killable(¤t->mm->mmap_sem)) {
1219 if (addr && !(shmflg & SHM_REMAP)) {
1221 if (addr + size < addr)
1224 if (find_vma_intersection(current->mm, addr, addr + size))
1228 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL);
1231 if (IS_ERR_VALUE(addr))
1234 up_write(¤t->mm->mmap_sem);
1236 mm_populate(addr, populate);
1242 down_write(&shm_ids(ns).rwsem);
1243 shp = shm_lock(ns, shmid);
1245 if (shm_may_destroy(ns, shp))
1246 shm_destroy(ns, shp);
1249 up_write(&shm_ids(ns).rwsem);
1258 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1263 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1266 force_successful_syscall_return();
1271 * detach and kill segment if marked destroyed.
1272 * The work is done in shm_close.
1274 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1276 struct mm_struct *mm = current->mm;
1277 struct vm_area_struct *vma;
1278 unsigned long addr = (unsigned long)shmaddr;
1279 int retval = -EINVAL;
1283 struct vm_area_struct *next;
1286 if (addr & ~PAGE_MASK)
1289 if (down_write_killable(&mm->mmap_sem))
1293 * This function tries to be smart and unmap shm segments that
1294 * were modified by partial mlock or munmap calls:
1295 * - It first determines the size of the shm segment that should be
1296 * unmapped: It searches for a vma that is backed by shm and that
1297 * started at address shmaddr. It records it's size and then unmaps
1299 * - Then it unmaps all shm vmas that started at shmaddr and that
1300 * are within the initially determined size and that are from the
1301 * same shm segment from which we determined the size.
1302 * Errors from do_munmap are ignored: the function only fails if
1303 * it's called with invalid parameters or if it's called to unmap
1304 * a part of a vma. Both calls in this function are for full vmas,
1305 * the parameters are directly copied from the vma itself and always
1306 * valid - therefore do_munmap cannot fail. (famous last words?)
1309 * If it had been mremap()'d, the starting address would not
1310 * match the usual checks anyway. So assume all vma's are
1311 * above the starting address given.
1313 vma = find_vma(mm, addr);
1317 next = vma->vm_next;
1320 * Check if the starting address would match, i.e. it's
1321 * a fragment created by mprotect() and/or munmap(), or it
1322 * otherwise it starts at this address with no hassles.
1324 if ((vma->vm_ops == &shm_vm_ops) &&
1325 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1328 * Record the file of the shm segment being
1329 * unmapped. With mremap(), someone could place
1330 * page from another segment but with equal offsets
1331 * in the range we are unmapping.
1333 file = vma->vm_file;
1334 size = i_size_read(file_inode(vma->vm_file));
1335 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1337 * We discovered the size of the shm segment, so
1338 * break out of here and fall through to the next
1339 * loop that uses the size information to stop
1340 * searching for matching vma's.
1350 * We need look no further than the maximum address a fragment
1351 * could possibly have landed at. Also cast things to loff_t to
1352 * prevent overflows and make comparisons vs. equal-width types.
1354 size = PAGE_ALIGN(size);
1355 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1356 next = vma->vm_next;
1358 /* finding a matching vma now does not alter retval */
1359 if ((vma->vm_ops == &shm_vm_ops) &&
1360 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1361 (vma->vm_file == file))
1362 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1366 #else /* CONFIG_MMU */
1367 /* under NOMMU conditions, the exact address to be destroyed must be
1370 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1371 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1377 up_write(&mm->mmap_sem);
1381 #ifdef CONFIG_PROC_FS
1382 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1384 struct user_namespace *user_ns = seq_user_ns(s);
1385 struct kern_ipc_perm *ipcp = it;
1386 struct shmid_kernel *shp;
1387 unsigned long rss = 0, swp = 0;
1389 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1390 shm_add_rss_swap(shp, &rss, &swp);
1392 #if BITS_PER_LONG <= 32
1393 #define SIZE_SPEC "%10lu"
1395 #define SIZE_SPEC "%21lu"
1399 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1400 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1401 SIZE_SPEC " " SIZE_SPEC "\n",
1409 from_kuid_munged(user_ns, shp->shm_perm.uid),
1410 from_kgid_munged(user_ns, shp->shm_perm.gid),
1411 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1412 from_kgid_munged(user_ns, shp->shm_perm.cgid),