4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/module.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/smp_lock.h>
12 #include <linux/notifier.h>
13 #include <linux/reboot.h>
14 #include <linux/prctl.h>
15 #include <linux/highuid.h>
17 #include <linux/resource.h>
18 #include <linux/kernel.h>
19 #include <linux/kexec.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
35 #include <linux/cpu.h>
37 #include <linux/compat.h>
38 #include <linux/syscalls.h>
39 #include <linux/kprobes.h>
40 #include <linux/user_namespace.h>
42 #include <asm/uaccess.h>
44 #include <asm/unistd.h>
46 #ifndef SET_UNALIGN_CTL
47 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
49 #ifndef GET_UNALIGN_CTL
50 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
53 # define SET_FPEMU_CTL(a,b) (-EINVAL)
56 # define GET_FPEMU_CTL(a,b) (-EINVAL)
59 # define SET_FPEXC_CTL(a,b) (-EINVAL)
62 # define GET_FPEXC_CTL(a,b) (-EINVAL)
65 # define GET_ENDIAN(a,b) (-EINVAL)
68 # define SET_ENDIAN(a,b) (-EINVAL)
71 # define GET_TSC_CTL(a) (-EINVAL)
74 # define SET_TSC_CTL(a) (-EINVAL)
78 * this is where the system-wide overflow UID and GID are defined, for
79 * architectures that now have 32-bit UID/GID but didn't in the past
82 int overflowuid = DEFAULT_OVERFLOWUID;
83 int overflowgid = DEFAULT_OVERFLOWGID;
86 EXPORT_SYMBOL(overflowuid);
87 EXPORT_SYMBOL(overflowgid);
91 * the same as above, but for filesystems which can only store a 16-bit
92 * UID and GID. as such, this is needed on all architectures
95 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
96 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
98 EXPORT_SYMBOL(fs_overflowuid);
99 EXPORT_SYMBOL(fs_overflowgid);
102 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
107 EXPORT_SYMBOL(cad_pid);
110 * If set, this is used for preparing the system to power off.
113 void (*pm_power_off_prepare)(void);
115 static int set_one_prio(struct task_struct *p, int niceval, int error)
117 uid_t euid = current_euid();
120 if (p->cred->uid != euid &&
121 p->cred->euid != euid &&
122 !capable(CAP_SYS_NICE)) {
126 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
130 no_nice = security_task_setnice(p, niceval);
137 set_user_nice(p, niceval);
142 asmlinkage long sys_setpriority(int which, int who, int niceval)
144 struct task_struct *g, *p;
145 struct user_struct *user;
149 if (which > PRIO_USER || which < PRIO_PROCESS)
152 /* normalize: avoid signed division (rounding problems) */
159 read_lock(&tasklist_lock);
163 p = find_task_by_vpid(who);
167 error = set_one_prio(p, niceval, error);
171 pgrp = find_vpid(who);
173 pgrp = task_pgrp(current);
174 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
175 error = set_one_prio(p, niceval, error);
176 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
179 user = current->cred->user;
183 if (who != current_uid() && !(user = find_user(who)))
184 goto out_unlock; /* No processes for this user */
187 if (p->cred->uid == who)
188 error = set_one_prio(p, niceval, error);
189 while_each_thread(g, p);
190 if (who != current_uid())
191 free_uid(user); /* For find_user() */
195 read_unlock(&tasklist_lock);
201 * Ugh. To avoid negative return values, "getpriority()" will
202 * not return the normal nice-value, but a negated value that
203 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
204 * to stay compatible.
206 asmlinkage long sys_getpriority(int which, int who)
208 struct task_struct *g, *p;
209 struct user_struct *user;
210 long niceval, retval = -ESRCH;
213 if (which > PRIO_USER || which < PRIO_PROCESS)
216 read_lock(&tasklist_lock);
220 p = find_task_by_vpid(who);
224 niceval = 20 - task_nice(p);
225 if (niceval > retval)
231 pgrp = find_vpid(who);
233 pgrp = task_pgrp(current);
234 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
235 niceval = 20 - task_nice(p);
236 if (niceval > retval)
238 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
241 user = current->cred->user;
245 if (who != current_uid() && !(user = find_user(who)))
246 goto out_unlock; /* No processes for this user */
249 if (p->cred->uid == who) {
250 niceval = 20 - task_nice(p);
251 if (niceval > retval)
254 while_each_thread(g, p);
255 if (who != current_uid())
256 free_uid(user); /* for find_user() */
260 read_unlock(&tasklist_lock);
266 * emergency_restart - reboot the system
268 * Without shutting down any hardware or taking any locks
269 * reboot the system. This is called when we know we are in
270 * trouble so this is our best effort to reboot. This is
271 * safe to call in interrupt context.
273 void emergency_restart(void)
275 machine_emergency_restart();
277 EXPORT_SYMBOL_GPL(emergency_restart);
279 void kernel_restart_prepare(char *cmd)
281 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
282 system_state = SYSTEM_RESTART;
288 * kernel_restart - reboot the system
289 * @cmd: pointer to buffer containing command to execute for restart
292 * Shutdown everything and perform a clean reboot.
293 * This is not safe to call in interrupt context.
295 void kernel_restart(char *cmd)
297 kernel_restart_prepare(cmd);
299 printk(KERN_EMERG "Restarting system.\n");
301 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
302 machine_restart(cmd);
304 EXPORT_SYMBOL_GPL(kernel_restart);
306 static void kernel_shutdown_prepare(enum system_states state)
308 blocking_notifier_call_chain(&reboot_notifier_list,
309 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
310 system_state = state;
314 * kernel_halt - halt the system
316 * Shutdown everything and perform a clean system halt.
318 void kernel_halt(void)
320 kernel_shutdown_prepare(SYSTEM_HALT);
322 printk(KERN_EMERG "System halted.\n");
326 EXPORT_SYMBOL_GPL(kernel_halt);
329 * kernel_power_off - power_off the system
331 * Shutdown everything and perform a clean system power_off.
333 void kernel_power_off(void)
335 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
336 if (pm_power_off_prepare)
337 pm_power_off_prepare();
338 disable_nonboot_cpus();
340 printk(KERN_EMERG "Power down.\n");
343 EXPORT_SYMBOL_GPL(kernel_power_off);
345 * Reboot system call: for obvious reasons only root may call it,
346 * and even root needs to set up some magic numbers in the registers
347 * so that some mistake won't make this reboot the whole machine.
348 * You can also set the meaning of the ctrl-alt-del-key here.
350 * reboot doesn't sync: do that yourself before calling this.
352 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
356 /* We only trust the superuser with rebooting the system. */
357 if (!capable(CAP_SYS_BOOT))
360 /* For safety, we require "magic" arguments. */
361 if (magic1 != LINUX_REBOOT_MAGIC1 ||
362 (magic2 != LINUX_REBOOT_MAGIC2 &&
363 magic2 != LINUX_REBOOT_MAGIC2A &&
364 magic2 != LINUX_REBOOT_MAGIC2B &&
365 magic2 != LINUX_REBOOT_MAGIC2C))
368 /* Instead of trying to make the power_off code look like
369 * halt when pm_power_off is not set do it the easy way.
371 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
372 cmd = LINUX_REBOOT_CMD_HALT;
376 case LINUX_REBOOT_CMD_RESTART:
377 kernel_restart(NULL);
380 case LINUX_REBOOT_CMD_CAD_ON:
384 case LINUX_REBOOT_CMD_CAD_OFF:
388 case LINUX_REBOOT_CMD_HALT:
394 case LINUX_REBOOT_CMD_POWER_OFF:
400 case LINUX_REBOOT_CMD_RESTART2:
401 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
405 buffer[sizeof(buffer) - 1] = '\0';
407 kernel_restart(buffer);
411 case LINUX_REBOOT_CMD_KEXEC:
414 ret = kernel_kexec();
420 #ifdef CONFIG_HIBERNATION
421 case LINUX_REBOOT_CMD_SW_SUSPEND:
423 int ret = hibernate();
437 static void deferred_cad(struct work_struct *dummy)
439 kernel_restart(NULL);
443 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
444 * As it's called within an interrupt, it may NOT sync: the only choice
445 * is whether to reboot at once, or just ignore the ctrl-alt-del.
447 void ctrl_alt_del(void)
449 static DECLARE_WORK(cad_work, deferred_cad);
452 schedule_work(&cad_work);
454 kill_cad_pid(SIGINT, 1);
458 * Unprivileged users may change the real gid to the effective gid
459 * or vice versa. (BSD-style)
461 * If you set the real gid at all, or set the effective gid to a value not
462 * equal to the real gid, then the saved gid is set to the new effective gid.
464 * This makes it possible for a setgid program to completely drop its
465 * privileges, which is often a useful assertion to make when you are doing
466 * a security audit over a program.
468 * The general idea is that a program which uses just setregid() will be
469 * 100% compatible with BSD. A program which uses just setgid() will be
470 * 100% compatible with POSIX with saved IDs.
472 * SMP: There are not races, the GIDs are checked only by filesystem
473 * operations (as far as semantic preservation is concerned).
475 asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
477 struct cred *cred = current->cred;
478 int old_rgid = cred->gid;
479 int old_egid = cred->egid;
480 int new_rgid = old_rgid;
481 int new_egid = old_egid;
484 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
488 if (rgid != (gid_t) -1) {
489 if ((old_rgid == rgid) ||
490 (cred->egid == rgid) ||
496 if (egid != (gid_t) -1) {
497 if ((old_rgid == egid) ||
498 (cred->egid == egid) ||
499 (cred->sgid == egid) ||
505 if (new_egid != old_egid) {
506 set_dumpable(current->mm, suid_dumpable);
509 if (rgid != (gid_t) -1 ||
510 (egid != (gid_t) -1 && egid != old_rgid))
511 cred->sgid = new_egid;
512 cred->fsgid = new_egid;
513 cred->egid = new_egid;
514 cred->gid = new_rgid;
515 key_fsgid_changed(current);
516 proc_id_connector(current, PROC_EVENT_GID);
521 * setgid() is implemented like SysV w/ SAVED_IDS
523 * SMP: Same implicit races as above.
525 asmlinkage long sys_setgid(gid_t gid)
527 struct cred *cred = current->cred;
528 int old_egid = cred->egid;
531 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
535 if (capable(CAP_SETGID)) {
536 if (old_egid != gid) {
537 set_dumpable(current->mm, suid_dumpable);
540 cred->gid = cred->egid = cred->sgid = cred->fsgid = gid;
541 } else if ((gid == cred->gid) || (gid == cred->sgid)) {
542 if (old_egid != gid) {
543 set_dumpable(current->mm, suid_dumpable);
546 cred->egid = cred->fsgid = gid;
551 key_fsgid_changed(current);
552 proc_id_connector(current, PROC_EVENT_GID);
556 static int set_user(uid_t new_ruid, int dumpclear)
558 struct user_struct *new_user;
560 new_user = alloc_uid(current->nsproxy->user_ns, new_ruid);
564 if (atomic_read(&new_user->processes) >=
565 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
566 new_user != current->nsproxy->user_ns->root_user) {
571 switch_uid(new_user);
574 set_dumpable(current->mm, suid_dumpable);
577 current->cred->uid = new_ruid;
582 * Unprivileged users may change the real uid to the effective uid
583 * or vice versa. (BSD-style)
585 * If you set the real uid at all, or set the effective uid to a value not
586 * equal to the real uid, then the saved uid is set to the new effective uid.
588 * This makes it possible for a setuid program to completely drop its
589 * privileges, which is often a useful assertion to make when you are doing
590 * a security audit over a program.
592 * The general idea is that a program which uses just setreuid() will be
593 * 100% compatible with BSD. A program which uses just setuid() will be
594 * 100% compatible with POSIX with saved IDs.
596 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
598 struct cred *cred = current->cred;
599 int old_ruid, old_euid, old_suid, new_ruid, new_euid;
602 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
606 new_ruid = old_ruid = cred->uid;
607 new_euid = old_euid = cred->euid;
608 old_suid = cred->suid;
610 if (ruid != (uid_t) -1) {
612 if ((old_ruid != ruid) &&
613 (cred->euid != ruid) &&
614 !capable(CAP_SETUID))
618 if (euid != (uid_t) -1) {
620 if ((old_ruid != euid) &&
621 (cred->euid != euid) &&
622 (cred->suid != euid) &&
623 !capable(CAP_SETUID))
627 if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
630 if (new_euid != old_euid) {
631 set_dumpable(current->mm, suid_dumpable);
634 cred->fsuid = cred->euid = new_euid;
635 if (ruid != (uid_t) -1 ||
636 (euid != (uid_t) -1 && euid != old_ruid))
637 cred->suid = cred->euid;
638 cred->fsuid = cred->euid;
640 key_fsuid_changed(current);
641 proc_id_connector(current, PROC_EVENT_UID);
643 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
649 * setuid() is implemented like SysV with SAVED_IDS
651 * Note that SAVED_ID's is deficient in that a setuid root program
652 * like sendmail, for example, cannot set its uid to be a normal
653 * user and then switch back, because if you're root, setuid() sets
654 * the saved uid too. If you don't like this, blame the bright people
655 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
656 * will allow a root program to temporarily drop privileges and be able to
657 * regain them by swapping the real and effective uid.
659 asmlinkage long sys_setuid(uid_t uid)
661 struct cred *cred = current->cred;
662 int old_euid = cred->euid;
663 int old_ruid, old_suid, new_suid;
666 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
670 old_ruid = cred->uid;
671 old_suid = cred->suid;
674 if (capable(CAP_SETUID)) {
675 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
678 } else if ((uid != cred->uid) && (uid != new_suid))
681 if (old_euid != uid) {
682 set_dumpable(current->mm, suid_dumpable);
685 cred->fsuid = cred->euid = uid;
686 cred->suid = new_suid;
688 key_fsuid_changed(current);
689 proc_id_connector(current, PROC_EVENT_UID);
691 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
696 * This function implements a generic ability to update ruid, euid,
697 * and suid. This allows you to implement the 4.4 compatible seteuid().
699 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
701 struct cred *cred = current->cred;
702 int old_ruid = cred->uid;
703 int old_euid = cred->euid;
704 int old_suid = cred->suid;
707 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
711 if (!capable(CAP_SETUID)) {
712 if ((ruid != (uid_t) -1) && (ruid != cred->uid) &&
713 (ruid != cred->euid) && (ruid != cred->suid))
715 if ((euid != (uid_t) -1) && (euid != cred->uid) &&
716 (euid != cred->euid) && (euid != cred->suid))
718 if ((suid != (uid_t) -1) && (suid != cred->uid) &&
719 (suid != cred->euid) && (suid != cred->suid))
722 if (ruid != (uid_t) -1) {
723 if (ruid != cred->uid &&
724 set_user(ruid, euid != cred->euid) < 0)
727 if (euid != (uid_t) -1) {
728 if (euid != cred->euid) {
729 set_dumpable(current->mm, suid_dumpable);
734 cred->fsuid = cred->euid;
735 if (suid != (uid_t) -1)
738 key_fsuid_changed(current);
739 proc_id_connector(current, PROC_EVENT_UID);
741 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
744 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
746 struct cred *cred = current->cred;
749 if (!(retval = put_user(cred->uid, ruid)) &&
750 !(retval = put_user(cred->euid, euid)))
751 retval = put_user(cred->suid, suid);
757 * Same as above, but for rgid, egid, sgid.
759 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
761 struct cred *cred = current->cred;
764 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
768 if (!capable(CAP_SETGID)) {
769 if ((rgid != (gid_t) -1) && (rgid != cred->gid) &&
770 (rgid != cred->egid) && (rgid != cred->sgid))
772 if ((egid != (gid_t) -1) && (egid != cred->gid) &&
773 (egid != cred->egid) && (egid != cred->sgid))
775 if ((sgid != (gid_t) -1) && (sgid != cred->gid) &&
776 (sgid != cred->egid) && (sgid != cred->sgid))
779 if (egid != (gid_t) -1) {
780 if (egid != cred->egid) {
781 set_dumpable(current->mm, suid_dumpable);
786 cred->fsgid = cred->egid;
787 if (rgid != (gid_t) -1)
789 if (sgid != (gid_t) -1)
792 key_fsgid_changed(current);
793 proc_id_connector(current, PROC_EVENT_GID);
797 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
799 struct cred *cred = current->cred;
802 if (!(retval = put_user(cred->gid, rgid)) &&
803 !(retval = put_user(cred->egid, egid)))
804 retval = put_user(cred->sgid, sgid);
811 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
812 * is used for "access()" and for the NFS daemon (letting nfsd stay at
813 * whatever uid it wants to). It normally shadows "euid", except when
814 * explicitly set by setfsuid() or for access..
816 asmlinkage long sys_setfsuid(uid_t uid)
818 struct cred *cred = current->cred;
821 old_fsuid = cred->fsuid;
822 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
825 if (uid == cred->uid || uid == cred->euid ||
826 uid == cred->suid || uid == cred->fsuid ||
827 capable(CAP_SETUID)) {
828 if (uid != old_fsuid) {
829 set_dumpable(current->mm, suid_dumpable);
835 key_fsuid_changed(current);
836 proc_id_connector(current, PROC_EVENT_UID);
838 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
844 * Samma på svenska..
846 asmlinkage long sys_setfsgid(gid_t gid)
848 struct cred *cred = current->cred;
851 old_fsgid = cred->fsgid;
852 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
855 if (gid == cred->gid || gid == cred->egid ||
856 gid == cred->sgid || gid == cred->fsgid ||
857 capable(CAP_SETGID)) {
858 if (gid != old_fsgid) {
859 set_dumpable(current->mm, suid_dumpable);
863 key_fsgid_changed(current);
864 proc_id_connector(current, PROC_EVENT_GID);
869 void do_sys_times(struct tms *tms)
871 struct task_cputime cputime;
872 cputime_t cutime, cstime;
874 spin_lock_irq(¤t->sighand->siglock);
875 thread_group_cputime(current, &cputime);
876 cutime = current->signal->cutime;
877 cstime = current->signal->cstime;
878 spin_unlock_irq(¤t->sighand->siglock);
879 tms->tms_utime = cputime_to_clock_t(cputime.utime);
880 tms->tms_stime = cputime_to_clock_t(cputime.stime);
881 tms->tms_cutime = cputime_to_clock_t(cutime);
882 tms->tms_cstime = cputime_to_clock_t(cstime);
885 asmlinkage long sys_times(struct tms __user * tbuf)
891 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
894 return (long) jiffies_64_to_clock_t(get_jiffies_64());
898 * This needs some heavy checking ...
899 * I just haven't the stomach for it. I also don't fully
900 * understand sessions/pgrp etc. Let somebody who does explain it.
902 * OK, I think I have the protection semantics right.... this is really
903 * only important on a multi-user system anyway, to make sure one user
904 * can't send a signal to a process owned by another. -TYT, 12/12/91
906 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
909 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
911 struct task_struct *p;
912 struct task_struct *group_leader = current->group_leader;
917 pid = task_pid_vnr(group_leader);
923 /* From this point forward we keep holding onto the tasklist lock
924 * so that our parent does not change from under us. -DaveM
926 write_lock_irq(&tasklist_lock);
929 p = find_task_by_vpid(pid);
934 if (!thread_group_leader(p))
937 if (same_thread_group(p->real_parent, group_leader)) {
939 if (task_session(p) != task_session(group_leader))
946 if (p != group_leader)
951 if (p->signal->leader)
956 struct task_struct *g;
958 pgrp = find_vpid(pgid);
959 g = pid_task(pgrp, PIDTYPE_PGID);
960 if (!g || task_session(g) != task_session(group_leader))
964 err = security_task_setpgid(p, pgid);
968 if (task_pgrp(p) != pgrp) {
969 change_pid(p, PIDTYPE_PGID, pgrp);
970 set_task_pgrp(p, pid_nr(pgrp));
975 /* All paths lead to here, thus we are safe. -DaveM */
976 write_unlock_irq(&tasklist_lock);
980 asmlinkage long sys_getpgid(pid_t pid)
982 struct task_struct *p;
988 grp = task_pgrp(current);
991 p = find_task_by_vpid(pid);
998 retval = security_task_getpgid(p);
1002 retval = pid_vnr(grp);
1008 #ifdef __ARCH_WANT_SYS_GETPGRP
1010 asmlinkage long sys_getpgrp(void)
1012 return sys_getpgid(0);
1017 asmlinkage long sys_getsid(pid_t pid)
1019 struct task_struct *p;
1025 sid = task_session(current);
1028 p = find_task_by_vpid(pid);
1031 sid = task_session(p);
1035 retval = security_task_getsid(p);
1039 retval = pid_vnr(sid);
1045 asmlinkage long sys_setsid(void)
1047 struct task_struct *group_leader = current->group_leader;
1048 struct pid *sid = task_pid(group_leader);
1049 pid_t session = pid_vnr(sid);
1052 write_lock_irq(&tasklist_lock);
1053 /* Fail if I am already a session leader */
1054 if (group_leader->signal->leader)
1057 /* Fail if a process group id already exists that equals the
1058 * proposed session id.
1060 if (pid_task(sid, PIDTYPE_PGID))
1063 group_leader->signal->leader = 1;
1064 __set_special_pids(sid);
1066 proc_clear_tty(group_leader);
1070 write_unlock_irq(&tasklist_lock);
1075 * Supplementary group IDs
1078 /* init to 2 - one for init_task, one to ensure it is never freed */
1079 struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
1081 struct group_info *groups_alloc(int gidsetsize)
1083 struct group_info *group_info;
1087 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
1088 /* Make sure we always allocate at least one indirect block pointer */
1089 nblocks = nblocks ? : 1;
1090 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
1093 group_info->ngroups = gidsetsize;
1094 group_info->nblocks = nblocks;
1095 atomic_set(&group_info->usage, 1);
1097 if (gidsetsize <= NGROUPS_SMALL)
1098 group_info->blocks[0] = group_info->small_block;
1100 for (i = 0; i < nblocks; i++) {
1102 b = (void *)__get_free_page(GFP_USER);
1104 goto out_undo_partial_alloc;
1105 group_info->blocks[i] = b;
1110 out_undo_partial_alloc:
1112 free_page((unsigned long)group_info->blocks[i]);
1118 EXPORT_SYMBOL(groups_alloc);
1120 void groups_free(struct group_info *group_info)
1122 if (group_info->blocks[0] != group_info->small_block) {
1124 for (i = 0; i < group_info->nblocks; i++)
1125 free_page((unsigned long)group_info->blocks[i]);
1130 EXPORT_SYMBOL(groups_free);
1132 /* export the group_info to a user-space array */
1133 static int groups_to_user(gid_t __user *grouplist,
1134 struct group_info *group_info)
1137 unsigned int count = group_info->ngroups;
1139 for (i = 0; i < group_info->nblocks; i++) {
1140 unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
1141 unsigned int len = cp_count * sizeof(*grouplist);
1143 if (copy_to_user(grouplist, group_info->blocks[i], len))
1146 grouplist += NGROUPS_PER_BLOCK;
1152 /* fill a group_info from a user-space array - it must be allocated already */
1153 static int groups_from_user(struct group_info *group_info,
1154 gid_t __user *grouplist)
1157 unsigned int count = group_info->ngroups;
1159 for (i = 0; i < group_info->nblocks; i++) {
1160 unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
1161 unsigned int len = cp_count * sizeof(*grouplist);
1163 if (copy_from_user(group_info->blocks[i], grouplist, len))
1166 grouplist += NGROUPS_PER_BLOCK;
1172 /* a simple Shell sort */
1173 static void groups_sort(struct group_info *group_info)
1175 int base, max, stride;
1176 int gidsetsize = group_info->ngroups;
1178 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
1183 max = gidsetsize - stride;
1184 for (base = 0; base < max; base++) {
1186 int right = left + stride;
1187 gid_t tmp = GROUP_AT(group_info, right);
1189 while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
1190 GROUP_AT(group_info, right) =
1191 GROUP_AT(group_info, left);
1195 GROUP_AT(group_info, right) = tmp;
1201 /* a simple bsearch */
1202 int groups_search(struct group_info *group_info, gid_t grp)
1204 unsigned int left, right;
1210 right = group_info->ngroups;
1211 while (left < right) {
1212 unsigned int mid = (left+right)/2;
1213 int cmp = grp - GROUP_AT(group_info, mid);
1225 * set_groups - Change a group subscription in a security record
1226 * @sec: The security record to alter
1227 * @group_info: The group list to impose
1229 * Validate a group subscription and, if valid, impose it upon a task security
1232 int set_groups(struct cred *cred, struct group_info *group_info)
1235 struct group_info *old_info;
1237 retval = security_task_setgroups(group_info);
1241 groups_sort(group_info);
1242 get_group_info(group_info);
1244 spin_lock(&cred->lock);
1245 old_info = cred->group_info;
1246 cred->group_info = group_info;
1247 spin_unlock(&cred->lock);
1249 put_group_info(old_info);
1253 EXPORT_SYMBOL(set_groups);
1256 * set_current_groups - Change current's group subscription
1257 * @group_info: The group list to impose
1259 * Validate a group subscription and, if valid, impose it upon current's task
1262 int set_current_groups(struct group_info *group_info)
1264 return set_groups(current->cred, group_info);
1267 EXPORT_SYMBOL(set_current_groups);
1269 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
1271 struct cred *cred = current->cred;
1275 * SMP: Nobody else can change our grouplist. Thus we are
1282 /* no need to grab task_lock here; it cannot change */
1283 i = cred->group_info->ngroups;
1285 if (i > gidsetsize) {
1289 if (groups_to_user(grouplist, cred->group_info)) {
1299 * SMP: Our groups are copy-on-write. We can set them safely
1300 * without another task interfering.
1303 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
1305 struct group_info *group_info;
1308 if (!capable(CAP_SETGID))
1310 if ((unsigned)gidsetsize > NGROUPS_MAX)
1313 group_info = groups_alloc(gidsetsize);
1316 retval = groups_from_user(group_info, grouplist);
1318 put_group_info(group_info);
1322 retval = set_current_groups(group_info);
1323 put_group_info(group_info);
1329 * Check whether we're fsgid/egid or in the supplemental group..
1331 int in_group_p(gid_t grp)
1333 struct cred *cred = current->cred;
1335 if (grp != cred->fsgid)
1336 retval = groups_search(cred->group_info, grp);
1340 EXPORT_SYMBOL(in_group_p);
1342 int in_egroup_p(gid_t grp)
1344 struct cred *cred = current->cred;
1346 if (grp != cred->egid)
1347 retval = groups_search(cred->group_info, grp);
1351 EXPORT_SYMBOL(in_egroup_p);
1353 DECLARE_RWSEM(uts_sem);
1355 asmlinkage long sys_newuname(struct new_utsname __user * name)
1359 down_read(&uts_sem);
1360 if (copy_to_user(name, utsname(), sizeof *name))
1366 asmlinkage long sys_sethostname(char __user *name, int len)
1369 char tmp[__NEW_UTS_LEN];
1371 if (!capable(CAP_SYS_ADMIN))
1373 if (len < 0 || len > __NEW_UTS_LEN)
1375 down_write(&uts_sem);
1377 if (!copy_from_user(tmp, name, len)) {
1378 struct new_utsname *u = utsname();
1380 memcpy(u->nodename, tmp, len);
1381 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1388 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1390 asmlinkage long sys_gethostname(char __user *name, int len)
1393 struct new_utsname *u;
1397 down_read(&uts_sem);
1399 i = 1 + strlen(u->nodename);
1403 if (copy_to_user(name, u->nodename, i))
1412 * Only setdomainname; getdomainname can be implemented by calling
1415 asmlinkage long sys_setdomainname(char __user *name, int len)
1418 char tmp[__NEW_UTS_LEN];
1420 if (!capable(CAP_SYS_ADMIN))
1422 if (len < 0 || len > __NEW_UTS_LEN)
1425 down_write(&uts_sem);
1427 if (!copy_from_user(tmp, name, len)) {
1428 struct new_utsname *u = utsname();
1430 memcpy(u->domainname, tmp, len);
1431 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1438 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1440 if (resource >= RLIM_NLIMITS)
1443 struct rlimit value;
1444 task_lock(current->group_leader);
1445 value = current->signal->rlim[resource];
1446 task_unlock(current->group_leader);
1447 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1451 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1454 * Back compatibility for getrlimit. Needed for some apps.
1457 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1460 if (resource >= RLIM_NLIMITS)
1463 task_lock(current->group_leader);
1464 x = current->signal->rlim[resource];
1465 task_unlock(current->group_leader);
1466 if (x.rlim_cur > 0x7FFFFFFF)
1467 x.rlim_cur = 0x7FFFFFFF;
1468 if (x.rlim_max > 0x7FFFFFFF)
1469 x.rlim_max = 0x7FFFFFFF;
1470 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1475 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1477 struct rlimit new_rlim, *old_rlim;
1480 if (resource >= RLIM_NLIMITS)
1482 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1484 old_rlim = current->signal->rlim + resource;
1485 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1486 !capable(CAP_SYS_RESOURCE))
1489 if (resource == RLIMIT_NOFILE) {
1490 if (new_rlim.rlim_max == RLIM_INFINITY)
1491 new_rlim.rlim_max = sysctl_nr_open;
1492 if (new_rlim.rlim_cur == RLIM_INFINITY)
1493 new_rlim.rlim_cur = sysctl_nr_open;
1494 if (new_rlim.rlim_max > sysctl_nr_open)
1498 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1501 retval = security_task_setrlimit(resource, &new_rlim);
1505 if (resource == RLIMIT_CPU && new_rlim.rlim_cur == 0) {
1507 * The caller is asking for an immediate RLIMIT_CPU
1508 * expiry. But we use the zero value to mean "it was
1509 * never set". So let's cheat and make it one second
1512 new_rlim.rlim_cur = 1;
1515 task_lock(current->group_leader);
1516 *old_rlim = new_rlim;
1517 task_unlock(current->group_leader);
1519 if (resource != RLIMIT_CPU)
1523 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1524 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1525 * very long-standing error, and fixing it now risks breakage of
1526 * applications, so we live with it
1528 if (new_rlim.rlim_cur == RLIM_INFINITY)
1531 update_rlimit_cpu(new_rlim.rlim_cur);
1537 * It would make sense to put struct rusage in the task_struct,
1538 * except that would make the task_struct be *really big*. After
1539 * task_struct gets moved into malloc'ed memory, it would
1540 * make sense to do this. It will make moving the rest of the information
1541 * a lot simpler! (Which we're not doing right now because we're not
1542 * measuring them yet).
1544 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1545 * races with threads incrementing their own counters. But since word
1546 * reads are atomic, we either get new values or old values and we don't
1547 * care which for the sums. We always take the siglock to protect reading
1548 * the c* fields from p->signal from races with exit.c updating those
1549 * fields when reaping, so a sample either gets all the additions of a
1550 * given child after it's reaped, or none so this sample is before reaping.
1553 * We need to take the siglock for CHILDEREN, SELF and BOTH
1554 * for the cases current multithreaded, non-current single threaded
1555 * non-current multithreaded. Thread traversal is now safe with
1557 * Strictly speaking, we donot need to take the siglock if we are current and
1558 * single threaded, as no one else can take our signal_struct away, no one
1559 * else can reap the children to update signal->c* counters, and no one else
1560 * can race with the signal-> fields. If we do not take any lock, the
1561 * signal-> fields could be read out of order while another thread was just
1562 * exiting. So we should place a read memory barrier when we avoid the lock.
1563 * On the writer side, write memory barrier is implied in __exit_signal
1564 * as __exit_signal releases the siglock spinlock after updating the signal->
1565 * fields. But we don't do this yet to keep things simple.
1569 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1571 r->ru_nvcsw += t->nvcsw;
1572 r->ru_nivcsw += t->nivcsw;
1573 r->ru_minflt += t->min_flt;
1574 r->ru_majflt += t->maj_flt;
1575 r->ru_inblock += task_io_get_inblock(t);
1576 r->ru_oublock += task_io_get_oublock(t);
1579 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1581 struct task_struct *t;
1582 unsigned long flags;
1583 cputime_t utime, stime;
1584 struct task_cputime cputime;
1586 memset((char *) r, 0, sizeof *r);
1587 utime = stime = cputime_zero;
1589 if (who == RUSAGE_THREAD) {
1590 accumulate_thread_rusage(p, r);
1594 if (!lock_task_sighand(p, &flags))
1599 case RUSAGE_CHILDREN:
1600 utime = p->signal->cutime;
1601 stime = p->signal->cstime;
1602 r->ru_nvcsw = p->signal->cnvcsw;
1603 r->ru_nivcsw = p->signal->cnivcsw;
1604 r->ru_minflt = p->signal->cmin_flt;
1605 r->ru_majflt = p->signal->cmaj_flt;
1606 r->ru_inblock = p->signal->cinblock;
1607 r->ru_oublock = p->signal->coublock;
1609 if (who == RUSAGE_CHILDREN)
1613 thread_group_cputime(p, &cputime);
1614 utime = cputime_add(utime, cputime.utime);
1615 stime = cputime_add(stime, cputime.stime);
1616 r->ru_nvcsw += p->signal->nvcsw;
1617 r->ru_nivcsw += p->signal->nivcsw;
1618 r->ru_minflt += p->signal->min_flt;
1619 r->ru_majflt += p->signal->maj_flt;
1620 r->ru_inblock += p->signal->inblock;
1621 r->ru_oublock += p->signal->oublock;
1624 accumulate_thread_rusage(t, r);
1632 unlock_task_sighand(p, &flags);
1635 cputime_to_timeval(utime, &r->ru_utime);
1636 cputime_to_timeval(stime, &r->ru_stime);
1639 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1642 k_getrusage(p, who, &r);
1643 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1646 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
1648 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1649 who != RUSAGE_THREAD)
1651 return getrusage(current, who, ru);
1654 asmlinkage long sys_umask(int mask)
1656 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1660 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1661 unsigned long arg4, unsigned long arg5)
1663 struct task_struct *me = current;
1664 unsigned char comm[sizeof(me->comm)];
1667 if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error))
1671 case PR_SET_PDEATHSIG:
1672 if (!valid_signal(arg2)) {
1676 me->pdeath_signal = arg2;
1679 case PR_GET_PDEATHSIG:
1680 error = put_user(me->pdeath_signal, (int __user *)arg2);
1682 case PR_GET_DUMPABLE:
1683 error = get_dumpable(me->mm);
1685 case PR_SET_DUMPABLE:
1686 if (arg2 < 0 || arg2 > 1) {
1690 set_dumpable(me->mm, arg2);
1694 case PR_SET_UNALIGN:
1695 error = SET_UNALIGN_CTL(me, arg2);
1697 case PR_GET_UNALIGN:
1698 error = GET_UNALIGN_CTL(me, arg2);
1701 error = SET_FPEMU_CTL(me, arg2);
1704 error = GET_FPEMU_CTL(me, arg2);
1707 error = SET_FPEXC_CTL(me, arg2);
1710 error = GET_FPEXC_CTL(me, arg2);
1713 error = PR_TIMING_STATISTICAL;
1716 if (arg2 != PR_TIMING_STATISTICAL)
1723 comm[sizeof(me->comm)-1] = 0;
1724 if (strncpy_from_user(comm, (char __user *)arg2,
1725 sizeof(me->comm) - 1) < 0)
1727 set_task_comm(me, comm);
1730 get_task_comm(comm, me);
1731 if (copy_to_user((char __user *)arg2, comm,
1736 error = GET_ENDIAN(me, arg2);
1739 error = SET_ENDIAN(me, arg2);
1742 case PR_GET_SECCOMP:
1743 error = prctl_get_seccomp();
1745 case PR_SET_SECCOMP:
1746 error = prctl_set_seccomp(arg2);
1749 error = GET_TSC_CTL(arg2);
1752 error = SET_TSC_CTL(arg2);
1754 case PR_GET_TIMERSLACK:
1755 error = current->timer_slack_ns;
1757 case PR_SET_TIMERSLACK:
1759 current->timer_slack_ns =
1760 current->default_timer_slack_ns;
1762 current->timer_slack_ns = arg2;
1772 asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
1773 struct getcpu_cache __user *unused)
1776 int cpu = raw_smp_processor_id();
1778 err |= put_user(cpu, cpup);
1780 err |= put_user(cpu_to_node(cpu), nodep);
1781 return err ? -EFAULT : 0;
1784 char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
1786 static void argv_cleanup(char **argv, char **envp)
1792 * orderly_poweroff - Trigger an orderly system poweroff
1793 * @force: force poweroff if command execution fails
1795 * This may be called from any context to trigger a system shutdown.
1796 * If the orderly shutdown fails, it will force an immediate shutdown.
1798 int orderly_poweroff(bool force)
1801 char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
1802 static char *envp[] = {
1804 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
1808 struct subprocess_info *info;
1811 printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
1812 __func__, poweroff_cmd);
1816 info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC);
1822 call_usermodehelper_setcleanup(info, argv_cleanup);
1824 ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
1828 printk(KERN_WARNING "Failed to start orderly shutdown: "
1829 "forcing the issue\n");
1831 /* I guess this should try to kick off some daemon to
1832 sync and poweroff asap. Or not even bother syncing
1833 if we're doing an emergency shutdown? */
1840 EXPORT_SYMBOL_GPL(orderly_poweroff);