4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/module.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/smp_lock.h>
12 #include <linux/notifier.h>
13 #include <linux/reboot.h>
14 #include <linux/prctl.h>
15 #include <linux/highuid.h>
17 #include <linux/kernel.h>
18 #include <linux/kexec.h>
19 #include <linux/workqueue.h>
20 #include <linux/capability.h>
21 #include <linux/device.h>
22 #include <linux/key.h>
23 #include <linux/times.h>
24 #include <linux/posix-timers.h>
25 #include <linux/security.h>
26 #include <linux/dcookies.h>
27 #include <linux/suspend.h>
28 #include <linux/tty.h>
29 #include <linux/signal.h>
30 #include <linux/cn_proc.h>
31 #include <linux/getcpu.h>
32 #include <linux/task_io_accounting_ops.h>
34 #include <linux/compat.h>
35 #include <linux/syscalls.h>
36 #include <linux/kprobes.h>
38 #include <asm/uaccess.h>
40 #include <asm/unistd.h>
42 #ifndef SET_UNALIGN_CTL
43 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
45 #ifndef GET_UNALIGN_CTL
46 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
49 # define SET_FPEMU_CTL(a,b) (-EINVAL)
52 # define GET_FPEMU_CTL(a,b) (-EINVAL)
55 # define SET_FPEXC_CTL(a,b) (-EINVAL)
58 # define GET_FPEXC_CTL(a,b) (-EINVAL)
61 # define GET_ENDIAN(a,b) (-EINVAL)
64 # define SET_ENDIAN(a,b) (-EINVAL)
68 * this is where the system-wide overflow UID and GID are defined, for
69 * architectures that now have 32-bit UID/GID but didn't in the past
72 int overflowuid = DEFAULT_OVERFLOWUID;
73 int overflowgid = DEFAULT_OVERFLOWGID;
76 EXPORT_SYMBOL(overflowuid);
77 EXPORT_SYMBOL(overflowgid);
81 * the same as above, but for filesystems which can only store a 16-bit
82 * UID and GID. as such, this is needed on all architectures
85 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
86 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
88 EXPORT_SYMBOL(fs_overflowuid);
89 EXPORT_SYMBOL(fs_overflowgid);
92 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
97 EXPORT_SYMBOL(cad_pid);
100 * Notifier list for kernel code which wants to be called
101 * at shutdown. This is used to stop any idling DMA operations
105 static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
108 * Notifier chain core routines. The exported routines below
109 * are layered on top of these, with appropriate locking added.
112 static int notifier_chain_register(struct notifier_block **nl,
113 struct notifier_block *n)
115 while ((*nl) != NULL) {
116 if (n->priority > (*nl)->priority)
121 rcu_assign_pointer(*nl, n);
125 static int notifier_chain_unregister(struct notifier_block **nl,
126 struct notifier_block *n)
128 while ((*nl) != NULL) {
130 rcu_assign_pointer(*nl, n->next);
139 * notifier_call_chain - Informs the registered notifiers about an event.
140 * @nl: Pointer to head of the blocking notifier chain
141 * @val: Value passed unmodified to notifier function
142 * @v: Pointer passed unmodified to notifier function
143 * @nr_to_call: Number of notifier functions to be called. Don't care
144 * value of this parameter is -1.
145 * @nr_calls: Records the number of notifications sent. Don't care
146 * value of this field is NULL.
147 * @returns: notifier_call_chain returns the value returned by the
148 * last notifier function called.
151 static int __kprobes notifier_call_chain(struct notifier_block **nl,
152 unsigned long val, void *v,
153 int nr_to_call, int *nr_calls)
155 int ret = NOTIFY_DONE;
156 struct notifier_block *nb, *next_nb;
158 nb = rcu_dereference(*nl);
160 while (nb && nr_to_call) {
161 next_nb = rcu_dereference(nb->next);
162 ret = nb->notifier_call(nb, val, v);
167 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
176 * Atomic notifier chain routines. Registration and unregistration
177 * use a spinlock, and call_chain is synchronized by RCU (no locks).
181 * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
182 * @nh: Pointer to head of the atomic notifier chain
183 * @n: New entry in notifier chain
185 * Adds a notifier to an atomic notifier chain.
187 * Currently always returns zero.
190 int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
191 struct notifier_block *n)
196 spin_lock_irqsave(&nh->lock, flags);
197 ret = notifier_chain_register(&nh->head, n);
198 spin_unlock_irqrestore(&nh->lock, flags);
202 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
205 * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
206 * @nh: Pointer to head of the atomic notifier chain
207 * @n: Entry to remove from notifier chain
209 * Removes a notifier from an atomic notifier chain.
211 * Returns zero on success or %-ENOENT on failure.
213 int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
214 struct notifier_block *n)
219 spin_lock_irqsave(&nh->lock, flags);
220 ret = notifier_chain_unregister(&nh->head, n);
221 spin_unlock_irqrestore(&nh->lock, flags);
226 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
229 * __atomic_notifier_call_chain - Call functions in an atomic notifier chain
230 * @nh: Pointer to head of the atomic notifier chain
231 * @val: Value passed unmodified to notifier function
232 * @v: Pointer passed unmodified to notifier function
233 * @nr_to_call: See the comment for notifier_call_chain.
234 * @nr_calls: See the comment for notifier_call_chain.
236 * Calls each function in a notifier chain in turn. The functions
237 * run in an atomic context, so they must not block.
238 * This routine uses RCU to synchronize with changes to the chain.
240 * If the return value of the notifier can be and'ed
241 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain()
242 * will return immediately, with the return value of
243 * the notifier function which halted execution.
244 * Otherwise the return value is the return value
245 * of the last notifier function called.
248 int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
249 unsigned long val, void *v,
250 int nr_to_call, int *nr_calls)
255 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
260 EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain);
262 int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh,
263 unsigned long val, void *v)
265 return __atomic_notifier_call_chain(nh, val, v, -1, NULL);
268 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
270 * Blocking notifier chain routines. All access to the chain is
271 * synchronized by an rwsem.
275 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
276 * @nh: Pointer to head of the blocking notifier chain
277 * @n: New entry in notifier chain
279 * Adds a notifier to a blocking notifier chain.
280 * Must be called in process context.
282 * Currently always returns zero.
285 int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
286 struct notifier_block *n)
291 * This code gets used during boot-up, when task switching is
292 * not yet working and interrupts must remain disabled. At
293 * such times we must not call down_write().
295 if (unlikely(system_state == SYSTEM_BOOTING))
296 return notifier_chain_register(&nh->head, n);
298 down_write(&nh->rwsem);
299 ret = notifier_chain_register(&nh->head, n);
300 up_write(&nh->rwsem);
304 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
307 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
308 * @nh: Pointer to head of the blocking notifier chain
309 * @n: Entry to remove from notifier chain
311 * Removes a notifier from a blocking notifier chain.
312 * Must be called from process context.
314 * Returns zero on success or %-ENOENT on failure.
316 int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
317 struct notifier_block *n)
322 * This code gets used during boot-up, when task switching is
323 * not yet working and interrupts must remain disabled. At
324 * such times we must not call down_write().
326 if (unlikely(system_state == SYSTEM_BOOTING))
327 return notifier_chain_unregister(&nh->head, n);
329 down_write(&nh->rwsem);
330 ret = notifier_chain_unregister(&nh->head, n);
331 up_write(&nh->rwsem);
335 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
338 * __blocking_notifier_call_chain - Call functions in a blocking notifier chain
339 * @nh: Pointer to head of the blocking notifier chain
340 * @val: Value passed unmodified to notifier function
341 * @v: Pointer passed unmodified to notifier function
342 * @nr_to_call: See comment for notifier_call_chain.
343 * @nr_calls: See comment for notifier_call_chain.
345 * Calls each function in a notifier chain in turn. The functions
346 * run in a process context, so they are allowed to block.
348 * If the return value of the notifier can be and'ed
349 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain()
350 * will return immediately, with the return value of
351 * the notifier function which halted execution.
352 * Otherwise the return value is the return value
353 * of the last notifier function called.
356 int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
357 unsigned long val, void *v,
358 int nr_to_call, int *nr_calls)
360 int ret = NOTIFY_DONE;
363 * We check the head outside the lock, but if this access is
364 * racy then it does not matter what the result of the test
365 * is, we re-check the list after having taken the lock anyway:
367 if (rcu_dereference(nh->head)) {
368 down_read(&nh->rwsem);
369 ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
375 EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain);
377 int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
378 unsigned long val, void *v)
380 return __blocking_notifier_call_chain(nh, val, v, -1, NULL);
382 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
385 * Raw notifier chain routines. There is no protection;
386 * the caller must provide it. Use at your own risk!
390 * raw_notifier_chain_register - Add notifier to a raw notifier chain
391 * @nh: Pointer to head of the raw notifier chain
392 * @n: New entry in notifier chain
394 * Adds a notifier to a raw notifier chain.
395 * All locking must be provided by the caller.
397 * Currently always returns zero.
400 int raw_notifier_chain_register(struct raw_notifier_head *nh,
401 struct notifier_block *n)
403 return notifier_chain_register(&nh->head, n);
406 EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
409 * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
410 * @nh: Pointer to head of the raw notifier chain
411 * @n: Entry to remove from notifier chain
413 * Removes a notifier from a raw notifier chain.
414 * All locking must be provided by the caller.
416 * Returns zero on success or %-ENOENT on failure.
418 int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
419 struct notifier_block *n)
421 return notifier_chain_unregister(&nh->head, n);
424 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
427 * __raw_notifier_call_chain - Call functions in a raw notifier chain
428 * @nh: Pointer to head of the raw notifier chain
429 * @val: Value passed unmodified to notifier function
430 * @v: Pointer passed unmodified to notifier function
431 * @nr_to_call: See comment for notifier_call_chain.
432 * @nr_calls: See comment for notifier_call_chain
434 * Calls each function in a notifier chain in turn. The functions
435 * run in an undefined context.
436 * All locking must be provided by the caller.
438 * If the return value of the notifier can be and'ed
439 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain()
440 * will return immediately, with the return value of
441 * the notifier function which halted execution.
442 * Otherwise the return value is the return value
443 * of the last notifier function called.
446 int __raw_notifier_call_chain(struct raw_notifier_head *nh,
447 unsigned long val, void *v,
448 int nr_to_call, int *nr_calls)
450 return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
453 EXPORT_SYMBOL_GPL(__raw_notifier_call_chain);
455 int raw_notifier_call_chain(struct raw_notifier_head *nh,
456 unsigned long val, void *v)
458 return __raw_notifier_call_chain(nh, val, v, -1, NULL);
461 EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
464 * SRCU notifier chain routines. Registration and unregistration
465 * use a mutex, and call_chain is synchronized by SRCU (no locks).
469 * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
470 * @nh: Pointer to head of the SRCU notifier chain
471 * @n: New entry in notifier chain
473 * Adds a notifier to an SRCU notifier chain.
474 * Must be called in process context.
476 * Currently always returns zero.
479 int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
480 struct notifier_block *n)
485 * This code gets used during boot-up, when task switching is
486 * not yet working and interrupts must remain disabled. At
487 * such times we must not call mutex_lock().
489 if (unlikely(system_state == SYSTEM_BOOTING))
490 return notifier_chain_register(&nh->head, n);
492 mutex_lock(&nh->mutex);
493 ret = notifier_chain_register(&nh->head, n);
494 mutex_unlock(&nh->mutex);
498 EXPORT_SYMBOL_GPL(srcu_notifier_chain_register);
501 * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
502 * @nh: Pointer to head of the SRCU notifier chain
503 * @n: Entry to remove from notifier chain
505 * Removes a notifier from an SRCU notifier chain.
506 * Must be called from process context.
508 * Returns zero on success or %-ENOENT on failure.
510 int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
511 struct notifier_block *n)
516 * This code gets used during boot-up, when task switching is
517 * not yet working and interrupts must remain disabled. At
518 * such times we must not call mutex_lock().
520 if (unlikely(system_state == SYSTEM_BOOTING))
521 return notifier_chain_unregister(&nh->head, n);
523 mutex_lock(&nh->mutex);
524 ret = notifier_chain_unregister(&nh->head, n);
525 mutex_unlock(&nh->mutex);
526 synchronize_srcu(&nh->srcu);
530 EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
533 * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain
534 * @nh: Pointer to head of the SRCU notifier chain
535 * @val: Value passed unmodified to notifier function
536 * @v: Pointer passed unmodified to notifier function
537 * @nr_to_call: See comment for notifier_call_chain.
538 * @nr_calls: See comment for notifier_call_chain
540 * Calls each function in a notifier chain in turn. The functions
541 * run in a process context, so they are allowed to block.
543 * If the return value of the notifier can be and'ed
544 * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain()
545 * will return immediately, with the return value of
546 * the notifier function which halted execution.
547 * Otherwise the return value is the return value
548 * of the last notifier function called.
551 int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
552 unsigned long val, void *v,
553 int nr_to_call, int *nr_calls)
558 idx = srcu_read_lock(&nh->srcu);
559 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
560 srcu_read_unlock(&nh->srcu, idx);
563 EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain);
565 int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
566 unsigned long val, void *v)
568 return __srcu_notifier_call_chain(nh, val, v, -1, NULL);
570 EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
573 * srcu_init_notifier_head - Initialize an SRCU notifier head
574 * @nh: Pointer to head of the srcu notifier chain
576 * Unlike other sorts of notifier heads, SRCU notifier heads require
577 * dynamic initialization. Be sure to call this routine before
578 * calling any of the other SRCU notifier routines for this head.
580 * If an SRCU notifier head is deallocated, it must first be cleaned
581 * up by calling srcu_cleanup_notifier_head(). Otherwise the head's
582 * per-cpu data (used by the SRCU mechanism) will leak.
585 void srcu_init_notifier_head(struct srcu_notifier_head *nh)
587 mutex_init(&nh->mutex);
588 if (init_srcu_struct(&nh->srcu) < 0)
593 EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
596 * register_reboot_notifier - Register function to be called at reboot time
597 * @nb: Info about notifier function to be called
599 * Registers a function with the list of functions
600 * to be called at reboot time.
602 * Currently always returns zero, as blocking_notifier_chain_register()
603 * always returns zero.
606 int register_reboot_notifier(struct notifier_block * nb)
608 return blocking_notifier_chain_register(&reboot_notifier_list, nb);
611 EXPORT_SYMBOL(register_reboot_notifier);
614 * unregister_reboot_notifier - Unregister previously registered reboot notifier
615 * @nb: Hook to be unregistered
617 * Unregisters a previously registered reboot
620 * Returns zero on success, or %-ENOENT on failure.
623 int unregister_reboot_notifier(struct notifier_block * nb)
625 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
628 EXPORT_SYMBOL(unregister_reboot_notifier);
630 static int set_one_prio(struct task_struct *p, int niceval, int error)
634 if (p->uid != current->euid &&
635 p->euid != current->euid && !capable(CAP_SYS_NICE)) {
639 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
643 no_nice = security_task_setnice(p, niceval);
650 set_user_nice(p, niceval);
655 asmlinkage long sys_setpriority(int which, int who, int niceval)
657 struct task_struct *g, *p;
658 struct user_struct *user;
662 if (which > 2 || which < 0)
665 /* normalize: avoid signed division (rounding problems) */
672 read_lock(&tasklist_lock);
676 p = find_task_by_pid(who);
680 error = set_one_prio(p, niceval, error);
684 pgrp = find_pid(who);
686 pgrp = task_pgrp(current);
687 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
688 error = set_one_prio(p, niceval, error);
689 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
692 user = current->user;
696 if ((who != current->uid) && !(user = find_user(who)))
697 goto out_unlock; /* No processes for this user */
701 error = set_one_prio(p, niceval, error);
702 while_each_thread(g, p);
703 if (who != current->uid)
704 free_uid(user); /* For find_user() */
708 read_unlock(&tasklist_lock);
714 * Ugh. To avoid negative return values, "getpriority()" will
715 * not return the normal nice-value, but a negated value that
716 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
717 * to stay compatible.
719 asmlinkage long sys_getpriority(int which, int who)
721 struct task_struct *g, *p;
722 struct user_struct *user;
723 long niceval, retval = -ESRCH;
726 if (which > 2 || which < 0)
729 read_lock(&tasklist_lock);
733 p = find_task_by_pid(who);
737 niceval = 20 - task_nice(p);
738 if (niceval > retval)
744 pgrp = find_pid(who);
746 pgrp = task_pgrp(current);
747 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
748 niceval = 20 - task_nice(p);
749 if (niceval > retval)
751 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
754 user = current->user;
758 if ((who != current->uid) && !(user = find_user(who)))
759 goto out_unlock; /* No processes for this user */
763 niceval = 20 - task_nice(p);
764 if (niceval > retval)
767 while_each_thread(g, p);
768 if (who != current->uid)
769 free_uid(user); /* for find_user() */
773 read_unlock(&tasklist_lock);
779 * emergency_restart - reboot the system
781 * Without shutting down any hardware or taking any locks
782 * reboot the system. This is called when we know we are in
783 * trouble so this is our best effort to reboot. This is
784 * safe to call in interrupt context.
786 void emergency_restart(void)
788 machine_emergency_restart();
790 EXPORT_SYMBOL_GPL(emergency_restart);
792 static void kernel_restart_prepare(char *cmd)
794 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
795 system_state = SYSTEM_RESTART;
800 * kernel_restart - reboot the system
801 * @cmd: pointer to buffer containing command to execute for restart
804 * Shutdown everything and perform a clean reboot.
805 * This is not safe to call in interrupt context.
807 void kernel_restart(char *cmd)
809 kernel_restart_prepare(cmd);
811 printk(KERN_EMERG "Restarting system.\n");
813 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
814 machine_restart(cmd);
816 EXPORT_SYMBOL_GPL(kernel_restart);
819 * kernel_kexec - reboot the system
821 * Move into place and start executing a preloaded standalone
822 * executable. If nothing was preloaded return an error.
824 static void kernel_kexec(void)
827 struct kimage *image;
828 image = xchg(&kexec_image, NULL);
831 kernel_restart_prepare(NULL);
832 printk(KERN_EMERG "Starting new kernel\n");
834 machine_kexec(image);
838 void kernel_shutdown_prepare(enum system_states state)
840 blocking_notifier_call_chain(&reboot_notifier_list,
841 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
842 system_state = state;
846 * kernel_halt - halt the system
848 * Shutdown everything and perform a clean system halt.
850 void kernel_halt(void)
852 kernel_shutdown_prepare(SYSTEM_HALT);
853 printk(KERN_EMERG "System halted.\n");
857 EXPORT_SYMBOL_GPL(kernel_halt);
860 * kernel_power_off - power_off the system
862 * Shutdown everything and perform a clean system power_off.
864 void kernel_power_off(void)
866 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
867 printk(KERN_EMERG "Power down.\n");
870 EXPORT_SYMBOL_GPL(kernel_power_off);
872 * Reboot system call: for obvious reasons only root may call it,
873 * and even root needs to set up some magic numbers in the registers
874 * so that some mistake won't make this reboot the whole machine.
875 * You can also set the meaning of the ctrl-alt-del-key here.
877 * reboot doesn't sync: do that yourself before calling this.
879 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
883 /* We only trust the superuser with rebooting the system. */
884 if (!capable(CAP_SYS_BOOT))
887 /* For safety, we require "magic" arguments. */
888 if (magic1 != LINUX_REBOOT_MAGIC1 ||
889 (magic2 != LINUX_REBOOT_MAGIC2 &&
890 magic2 != LINUX_REBOOT_MAGIC2A &&
891 magic2 != LINUX_REBOOT_MAGIC2B &&
892 magic2 != LINUX_REBOOT_MAGIC2C))
895 /* Instead of trying to make the power_off code look like
896 * halt when pm_power_off is not set do it the easy way.
898 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
899 cmd = LINUX_REBOOT_CMD_HALT;
903 case LINUX_REBOOT_CMD_RESTART:
904 kernel_restart(NULL);
907 case LINUX_REBOOT_CMD_CAD_ON:
911 case LINUX_REBOOT_CMD_CAD_OFF:
915 case LINUX_REBOOT_CMD_HALT:
921 case LINUX_REBOOT_CMD_POWER_OFF:
927 case LINUX_REBOOT_CMD_RESTART2:
928 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
932 buffer[sizeof(buffer) - 1] = '\0';
934 kernel_restart(buffer);
937 case LINUX_REBOOT_CMD_KEXEC:
942 #ifdef CONFIG_SOFTWARE_SUSPEND
943 case LINUX_REBOOT_CMD_SW_SUSPEND:
945 int ret = hibernate();
959 static void deferred_cad(struct work_struct *dummy)
961 kernel_restart(NULL);
965 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
966 * As it's called within an interrupt, it may NOT sync: the only choice
967 * is whether to reboot at once, or just ignore the ctrl-alt-del.
969 void ctrl_alt_del(void)
971 static DECLARE_WORK(cad_work, deferred_cad);
974 schedule_work(&cad_work);
976 kill_cad_pid(SIGINT, 1);
980 * Unprivileged users may change the real gid to the effective gid
981 * or vice versa. (BSD-style)
983 * If you set the real gid at all, or set the effective gid to a value not
984 * equal to the real gid, then the saved gid is set to the new effective gid.
986 * This makes it possible for a setgid program to completely drop its
987 * privileges, which is often a useful assertion to make when you are doing
988 * a security audit over a program.
990 * The general idea is that a program which uses just setregid() will be
991 * 100% compatible with BSD. A program which uses just setgid() will be
992 * 100% compatible with POSIX with saved IDs.
994 * SMP: There are not races, the GIDs are checked only by filesystem
995 * operations (as far as semantic preservation is concerned).
997 asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
999 int old_rgid = current->gid;
1000 int old_egid = current->egid;
1001 int new_rgid = old_rgid;
1002 int new_egid = old_egid;
1005 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
1009 if (rgid != (gid_t) -1) {
1010 if ((old_rgid == rgid) ||
1011 (current->egid==rgid) ||
1012 capable(CAP_SETGID))
1017 if (egid != (gid_t) -1) {
1018 if ((old_rgid == egid) ||
1019 (current->egid == egid) ||
1020 (current->sgid == egid) ||
1021 capable(CAP_SETGID))
1026 if (new_egid != old_egid) {
1027 current->mm->dumpable = suid_dumpable;
1030 if (rgid != (gid_t) -1 ||
1031 (egid != (gid_t) -1 && egid != old_rgid))
1032 current->sgid = new_egid;
1033 current->fsgid = new_egid;
1034 current->egid = new_egid;
1035 current->gid = new_rgid;
1036 key_fsgid_changed(current);
1037 proc_id_connector(current, PROC_EVENT_GID);
1042 * setgid() is implemented like SysV w/ SAVED_IDS
1044 * SMP: Same implicit races as above.
1046 asmlinkage long sys_setgid(gid_t gid)
1048 int old_egid = current->egid;
1051 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
1055 if (capable(CAP_SETGID)) {
1056 if (old_egid != gid) {
1057 current->mm->dumpable = suid_dumpable;
1060 current->gid = current->egid = current->sgid = current->fsgid = gid;
1061 } else if ((gid == current->gid) || (gid == current->sgid)) {
1062 if (old_egid != gid) {
1063 current->mm->dumpable = suid_dumpable;
1066 current->egid = current->fsgid = gid;
1071 key_fsgid_changed(current);
1072 proc_id_connector(current, PROC_EVENT_GID);
1076 static int set_user(uid_t new_ruid, int dumpclear)
1078 struct user_struct *new_user;
1080 new_user = alloc_uid(new_ruid);
1084 if (atomic_read(&new_user->processes) >=
1085 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
1086 new_user != &root_user) {
1091 switch_uid(new_user);
1094 current->mm->dumpable = suid_dumpable;
1097 current->uid = new_ruid;
1102 * Unprivileged users may change the real uid to the effective uid
1103 * or vice versa. (BSD-style)
1105 * If you set the real uid at all, or set the effective uid to a value not
1106 * equal to the real uid, then the saved uid is set to the new effective uid.
1108 * This makes it possible for a setuid program to completely drop its
1109 * privileges, which is often a useful assertion to make when you are doing
1110 * a security audit over a program.
1112 * The general idea is that a program which uses just setreuid() will be
1113 * 100% compatible with BSD. A program which uses just setuid() will be
1114 * 100% compatible with POSIX with saved IDs.
1116 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
1118 int old_ruid, old_euid, old_suid, new_ruid, new_euid;
1121 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
1125 new_ruid = old_ruid = current->uid;
1126 new_euid = old_euid = current->euid;
1127 old_suid = current->suid;
1129 if (ruid != (uid_t) -1) {
1131 if ((old_ruid != ruid) &&
1132 (current->euid != ruid) &&
1133 !capable(CAP_SETUID))
1137 if (euid != (uid_t) -1) {
1139 if ((old_ruid != euid) &&
1140 (current->euid != euid) &&
1141 (current->suid != euid) &&
1142 !capable(CAP_SETUID))
1146 if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
1149 if (new_euid != old_euid) {
1150 current->mm->dumpable = suid_dumpable;
1153 current->fsuid = current->euid = new_euid;
1154 if (ruid != (uid_t) -1 ||
1155 (euid != (uid_t) -1 && euid != old_ruid))
1156 current->suid = current->euid;
1157 current->fsuid = current->euid;
1159 key_fsuid_changed(current);
1160 proc_id_connector(current, PROC_EVENT_UID);
1162 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
1168 * setuid() is implemented like SysV with SAVED_IDS
1170 * Note that SAVED_ID's is deficient in that a setuid root program
1171 * like sendmail, for example, cannot set its uid to be a normal
1172 * user and then switch back, because if you're root, setuid() sets
1173 * the saved uid too. If you don't like this, blame the bright people
1174 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
1175 * will allow a root program to temporarily drop privileges and be able to
1176 * regain them by swapping the real and effective uid.
1178 asmlinkage long sys_setuid(uid_t uid)
1180 int old_euid = current->euid;
1181 int old_ruid, old_suid, new_suid;
1184 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
1188 old_ruid = current->uid;
1189 old_suid = current->suid;
1190 new_suid = old_suid;
1192 if (capable(CAP_SETUID)) {
1193 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
1196 } else if ((uid != current->uid) && (uid != new_suid))
1199 if (old_euid != uid) {
1200 current->mm->dumpable = suid_dumpable;
1203 current->fsuid = current->euid = uid;
1204 current->suid = new_suid;
1206 key_fsuid_changed(current);
1207 proc_id_connector(current, PROC_EVENT_UID);
1209 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
1214 * This function implements a generic ability to update ruid, euid,
1215 * and suid. This allows you to implement the 4.4 compatible seteuid().
1217 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
1219 int old_ruid = current->uid;
1220 int old_euid = current->euid;
1221 int old_suid = current->suid;
1224 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
1228 if (!capable(CAP_SETUID)) {
1229 if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
1230 (ruid != current->euid) && (ruid != current->suid))
1232 if ((euid != (uid_t) -1) && (euid != current->uid) &&
1233 (euid != current->euid) && (euid != current->suid))
1235 if ((suid != (uid_t) -1) && (suid != current->uid) &&
1236 (suid != current->euid) && (suid != current->suid))
1239 if (ruid != (uid_t) -1) {
1240 if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
1243 if (euid != (uid_t) -1) {
1244 if (euid != current->euid) {
1245 current->mm->dumpable = suid_dumpable;
1248 current->euid = euid;
1250 current->fsuid = current->euid;
1251 if (suid != (uid_t) -1)
1252 current->suid = suid;
1254 key_fsuid_changed(current);
1255 proc_id_connector(current, PROC_EVENT_UID);
1257 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
1260 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
1264 if (!(retval = put_user(current->uid, ruid)) &&
1265 !(retval = put_user(current->euid, euid)))
1266 retval = put_user(current->suid, suid);
1272 * Same as above, but for rgid, egid, sgid.
1274 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
1278 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
1282 if (!capable(CAP_SETGID)) {
1283 if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
1284 (rgid != current->egid) && (rgid != current->sgid))
1286 if ((egid != (gid_t) -1) && (egid != current->gid) &&
1287 (egid != current->egid) && (egid != current->sgid))
1289 if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
1290 (sgid != current->egid) && (sgid != current->sgid))
1293 if (egid != (gid_t) -1) {
1294 if (egid != current->egid) {
1295 current->mm->dumpable = suid_dumpable;
1298 current->egid = egid;
1300 current->fsgid = current->egid;
1301 if (rgid != (gid_t) -1)
1302 current->gid = rgid;
1303 if (sgid != (gid_t) -1)
1304 current->sgid = sgid;
1306 key_fsgid_changed(current);
1307 proc_id_connector(current, PROC_EVENT_GID);
1311 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
1315 if (!(retval = put_user(current->gid, rgid)) &&
1316 !(retval = put_user(current->egid, egid)))
1317 retval = put_user(current->sgid, sgid);
1324 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
1325 * is used for "access()" and for the NFS daemon (letting nfsd stay at
1326 * whatever uid it wants to). It normally shadows "euid", except when
1327 * explicitly set by setfsuid() or for access..
1329 asmlinkage long sys_setfsuid(uid_t uid)
1333 old_fsuid = current->fsuid;
1334 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
1337 if (uid == current->uid || uid == current->euid ||
1338 uid == current->suid || uid == current->fsuid ||
1339 capable(CAP_SETUID)) {
1340 if (uid != old_fsuid) {
1341 current->mm->dumpable = suid_dumpable;
1344 current->fsuid = uid;
1347 key_fsuid_changed(current);
1348 proc_id_connector(current, PROC_EVENT_UID);
1350 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
1356 * Samma på svenska..
1358 asmlinkage long sys_setfsgid(gid_t gid)
1362 old_fsgid = current->fsgid;
1363 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
1366 if (gid == current->gid || gid == current->egid ||
1367 gid == current->sgid || gid == current->fsgid ||
1368 capable(CAP_SETGID)) {
1369 if (gid != old_fsgid) {
1370 current->mm->dumpable = suid_dumpable;
1373 current->fsgid = gid;
1374 key_fsgid_changed(current);
1375 proc_id_connector(current, PROC_EVENT_GID);
1380 asmlinkage long sys_times(struct tms __user * tbuf)
1383 * In the SMP world we might just be unlucky and have one of
1384 * the times increment as we use it. Since the value is an
1385 * atomically safe type this is just fine. Conceptually its
1386 * as if the syscall took an instant longer to occur.
1390 struct task_struct *tsk = current;
1391 struct task_struct *t;
1392 cputime_t utime, stime, cutime, cstime;
1394 spin_lock_irq(&tsk->sighand->siglock);
1395 utime = tsk->signal->utime;
1396 stime = tsk->signal->stime;
1399 utime = cputime_add(utime, t->utime);
1400 stime = cputime_add(stime, t->stime);
1404 cutime = tsk->signal->cutime;
1405 cstime = tsk->signal->cstime;
1406 spin_unlock_irq(&tsk->sighand->siglock);
1408 tmp.tms_utime = cputime_to_clock_t(utime);
1409 tmp.tms_stime = cputime_to_clock_t(stime);
1410 tmp.tms_cutime = cputime_to_clock_t(cutime);
1411 tmp.tms_cstime = cputime_to_clock_t(cstime);
1412 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1415 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1419 * This needs some heavy checking ...
1420 * I just haven't the stomach for it. I also don't fully
1421 * understand sessions/pgrp etc. Let somebody who does explain it.
1423 * OK, I think I have the protection semantics right.... this is really
1424 * only important on a multi-user system anyway, to make sure one user
1425 * can't send a signal to a process owned by another. -TYT, 12/12/91
1427 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1431 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
1433 struct task_struct *p;
1434 struct task_struct *group_leader = current->group_leader;
1438 pid = group_leader->pid;
1444 /* From this point forward we keep holding onto the tasklist lock
1445 * so that our parent does not change from under us. -DaveM
1447 write_lock_irq(&tasklist_lock);
1450 p = find_task_by_pid(pid);
1455 if (!thread_group_leader(p))
1458 if (p->real_parent == group_leader) {
1460 if (task_session(p) != task_session(group_leader))
1467 if (p != group_leader)
1472 if (p->signal->leader)
1476 struct task_struct *g =
1477 find_task_by_pid_type(PIDTYPE_PGID, pgid);
1479 if (!g || task_session(g) != task_session(group_leader))
1483 err = security_task_setpgid(p, pgid);
1487 if (process_group(p) != pgid) {
1488 detach_pid(p, PIDTYPE_PGID);
1489 p->signal->pgrp = pgid;
1490 attach_pid(p, PIDTYPE_PGID, pgid);
1495 /* All paths lead to here, thus we are safe. -DaveM */
1496 write_unlock_irq(&tasklist_lock);
1500 asmlinkage long sys_getpgid(pid_t pid)
1503 return process_group(current);
1506 struct task_struct *p;
1508 read_lock(&tasklist_lock);
1509 p = find_task_by_pid(pid);
1513 retval = security_task_getpgid(p);
1515 retval = process_group(p);
1517 read_unlock(&tasklist_lock);
1522 #ifdef __ARCH_WANT_SYS_GETPGRP
1524 asmlinkage long sys_getpgrp(void)
1526 /* SMP - assuming writes are word atomic this is fine */
1527 return process_group(current);
1532 asmlinkage long sys_getsid(pid_t pid)
1535 return process_session(current);
1538 struct task_struct *p;
1540 read_lock(&tasklist_lock);
1541 p = find_task_by_pid(pid);
1545 retval = security_task_getsid(p);
1547 retval = process_session(p);
1549 read_unlock(&tasklist_lock);
1554 asmlinkage long sys_setsid(void)
1556 struct task_struct *group_leader = current->group_leader;
1560 write_lock_irq(&tasklist_lock);
1562 /* Fail if I am already a session leader */
1563 if (group_leader->signal->leader)
1566 session = group_leader->pid;
1567 /* Fail if a process group id already exists that equals the
1568 * proposed session id.
1570 * Don't check if session id == 1 because kernel threads use this
1571 * session id and so the check will always fail and make it so
1572 * init cannot successfully call setsid.
1574 if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session))
1577 group_leader->signal->leader = 1;
1578 __set_special_pids(session, session);
1580 spin_lock(&group_leader->sighand->siglock);
1581 group_leader->signal->tty = NULL;
1582 spin_unlock(&group_leader->sighand->siglock);
1584 err = process_group(group_leader);
1586 write_unlock_irq(&tasklist_lock);
1591 * Supplementary group IDs
1594 /* init to 2 - one for init_task, one to ensure it is never freed */
1595 struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
1597 struct group_info *groups_alloc(int gidsetsize)
1599 struct group_info *group_info;
1603 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
1604 /* Make sure we always allocate at least one indirect block pointer */
1605 nblocks = nblocks ? : 1;
1606 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
1609 group_info->ngroups = gidsetsize;
1610 group_info->nblocks = nblocks;
1611 atomic_set(&group_info->usage, 1);
1613 if (gidsetsize <= NGROUPS_SMALL)
1614 group_info->blocks[0] = group_info->small_block;
1616 for (i = 0; i < nblocks; i++) {
1618 b = (void *)__get_free_page(GFP_USER);
1620 goto out_undo_partial_alloc;
1621 group_info->blocks[i] = b;
1626 out_undo_partial_alloc:
1628 free_page((unsigned long)group_info->blocks[i]);
1634 EXPORT_SYMBOL(groups_alloc);
1636 void groups_free(struct group_info *group_info)
1638 if (group_info->blocks[0] != group_info->small_block) {
1640 for (i = 0; i < group_info->nblocks; i++)
1641 free_page((unsigned long)group_info->blocks[i]);
1646 EXPORT_SYMBOL(groups_free);
1648 /* export the group_info to a user-space array */
1649 static int groups_to_user(gid_t __user *grouplist,
1650 struct group_info *group_info)
1653 int count = group_info->ngroups;
1655 for (i = 0; i < group_info->nblocks; i++) {
1656 int cp_count = min(NGROUPS_PER_BLOCK, count);
1657 int off = i * NGROUPS_PER_BLOCK;
1658 int len = cp_count * sizeof(*grouplist);
1660 if (copy_to_user(grouplist+off, group_info->blocks[i], len))
1668 /* fill a group_info from a user-space array - it must be allocated already */
1669 static int groups_from_user(struct group_info *group_info,
1670 gid_t __user *grouplist)
1673 int count = group_info->ngroups;
1675 for (i = 0; i < group_info->nblocks; i++) {
1676 int cp_count = min(NGROUPS_PER_BLOCK, count);
1677 int off = i * NGROUPS_PER_BLOCK;
1678 int len = cp_count * sizeof(*grouplist);
1680 if (copy_from_user(group_info->blocks[i], grouplist+off, len))
1688 /* a simple Shell sort */
1689 static void groups_sort(struct group_info *group_info)
1691 int base, max, stride;
1692 int gidsetsize = group_info->ngroups;
1694 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
1699 max = gidsetsize - stride;
1700 for (base = 0; base < max; base++) {
1702 int right = left + stride;
1703 gid_t tmp = GROUP_AT(group_info, right);
1705 while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
1706 GROUP_AT(group_info, right) =
1707 GROUP_AT(group_info, left);
1711 GROUP_AT(group_info, right) = tmp;
1717 /* a simple bsearch */
1718 int groups_search(struct group_info *group_info, gid_t grp)
1720 unsigned int left, right;
1726 right = group_info->ngroups;
1727 while (left < right) {
1728 unsigned int mid = (left+right)/2;
1729 int cmp = grp - GROUP_AT(group_info, mid);
1740 /* validate and set current->group_info */
1741 int set_current_groups(struct group_info *group_info)
1744 struct group_info *old_info;
1746 retval = security_task_setgroups(group_info);
1750 groups_sort(group_info);
1751 get_group_info(group_info);
1754 old_info = current->group_info;
1755 current->group_info = group_info;
1756 task_unlock(current);
1758 put_group_info(old_info);
1763 EXPORT_SYMBOL(set_current_groups);
1765 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
1770 * SMP: Nobody else can change our grouplist. Thus we are
1777 /* no need to grab task_lock here; it cannot change */
1778 i = current->group_info->ngroups;
1780 if (i > gidsetsize) {
1784 if (groups_to_user(grouplist, current->group_info)) {
1794 * SMP: Our groups are copy-on-write. We can set them safely
1795 * without another task interfering.
1798 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
1800 struct group_info *group_info;
1803 if (!capable(CAP_SETGID))
1805 if ((unsigned)gidsetsize > NGROUPS_MAX)
1808 group_info = groups_alloc(gidsetsize);
1811 retval = groups_from_user(group_info, grouplist);
1813 put_group_info(group_info);
1817 retval = set_current_groups(group_info);
1818 put_group_info(group_info);
1824 * Check whether we're fsgid/egid or in the supplemental group..
1826 int in_group_p(gid_t grp)
1829 if (grp != current->fsgid)
1830 retval = groups_search(current->group_info, grp);
1834 EXPORT_SYMBOL(in_group_p);
1836 int in_egroup_p(gid_t grp)
1839 if (grp != current->egid)
1840 retval = groups_search(current->group_info, grp);
1844 EXPORT_SYMBOL(in_egroup_p);
1846 DECLARE_RWSEM(uts_sem);
1848 EXPORT_SYMBOL(uts_sem);
1850 asmlinkage long sys_newuname(struct new_utsname __user * name)
1854 down_read(&uts_sem);
1855 if (copy_to_user(name, utsname(), sizeof *name))
1861 asmlinkage long sys_sethostname(char __user *name, int len)
1864 char tmp[__NEW_UTS_LEN];
1866 if (!capable(CAP_SYS_ADMIN))
1868 if (len < 0 || len > __NEW_UTS_LEN)
1870 down_write(&uts_sem);
1872 if (!copy_from_user(tmp, name, len)) {
1873 memcpy(utsname()->nodename, tmp, len);
1874 utsname()->nodename[len] = 0;
1881 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1883 asmlinkage long sys_gethostname(char __user *name, int len)
1889 down_read(&uts_sem);
1890 i = 1 + strlen(utsname()->nodename);
1894 if (copy_to_user(name, utsname()->nodename, i))
1903 * Only setdomainname; getdomainname can be implemented by calling
1906 asmlinkage long sys_setdomainname(char __user *name, int len)
1909 char tmp[__NEW_UTS_LEN];
1911 if (!capable(CAP_SYS_ADMIN))
1913 if (len < 0 || len > __NEW_UTS_LEN)
1916 down_write(&uts_sem);
1918 if (!copy_from_user(tmp, name, len)) {
1919 memcpy(utsname()->domainname, tmp, len);
1920 utsname()->domainname[len] = 0;
1927 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1929 if (resource >= RLIM_NLIMITS)
1932 struct rlimit value;
1933 task_lock(current->group_leader);
1934 value = current->signal->rlim[resource];
1935 task_unlock(current->group_leader);
1936 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1940 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1943 * Back compatibility for getrlimit. Needed for some apps.
1946 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1949 if (resource >= RLIM_NLIMITS)
1952 task_lock(current->group_leader);
1953 x = current->signal->rlim[resource];
1954 task_unlock(current->group_leader);
1955 if (x.rlim_cur > 0x7FFFFFFF)
1956 x.rlim_cur = 0x7FFFFFFF;
1957 if (x.rlim_max > 0x7FFFFFFF)
1958 x.rlim_max = 0x7FFFFFFF;
1959 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1964 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1966 struct rlimit new_rlim, *old_rlim;
1967 unsigned long it_prof_secs;
1970 if (resource >= RLIM_NLIMITS)
1972 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1974 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1976 old_rlim = current->signal->rlim + resource;
1977 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1978 !capable(CAP_SYS_RESOURCE))
1980 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
1983 retval = security_task_setrlimit(resource, &new_rlim);
1987 if (resource == RLIMIT_CPU && new_rlim.rlim_cur == 0) {
1989 * The caller is asking for an immediate RLIMIT_CPU
1990 * expiry. But we use the zero value to mean "it was
1991 * never set". So let's cheat and make it one second
1994 new_rlim.rlim_cur = 1;
1997 task_lock(current->group_leader);
1998 *old_rlim = new_rlim;
1999 task_unlock(current->group_leader);
2001 if (resource != RLIMIT_CPU)
2005 * RLIMIT_CPU handling. Note that the kernel fails to return an error
2006 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
2007 * very long-standing error, and fixing it now risks breakage of
2008 * applications, so we live with it
2010 if (new_rlim.rlim_cur == RLIM_INFINITY)
2013 it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
2014 if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
2015 unsigned long rlim_cur = new_rlim.rlim_cur;
2018 cputime = secs_to_cputime(rlim_cur);
2019 read_lock(&tasklist_lock);
2020 spin_lock_irq(¤t->sighand->siglock);
2021 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
2022 spin_unlock_irq(¤t->sighand->siglock);
2023 read_unlock(&tasklist_lock);
2030 * It would make sense to put struct rusage in the task_struct,
2031 * except that would make the task_struct be *really big*. After
2032 * task_struct gets moved into malloc'ed memory, it would
2033 * make sense to do this. It will make moving the rest of the information
2034 * a lot simpler! (Which we're not doing right now because we're not
2035 * measuring them yet).
2037 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
2038 * races with threads incrementing their own counters. But since word
2039 * reads are atomic, we either get new values or old values and we don't
2040 * care which for the sums. We always take the siglock to protect reading
2041 * the c* fields from p->signal from races with exit.c updating those
2042 * fields when reaping, so a sample either gets all the additions of a
2043 * given child after it's reaped, or none so this sample is before reaping.
2046 * We need to take the siglock for CHILDEREN, SELF and BOTH
2047 * for the cases current multithreaded, non-current single threaded
2048 * non-current multithreaded. Thread traversal is now safe with
2050 * Strictly speaking, we donot need to take the siglock if we are current and
2051 * single threaded, as no one else can take our signal_struct away, no one
2052 * else can reap the children to update signal->c* counters, and no one else
2053 * can race with the signal-> fields. If we do not take any lock, the
2054 * signal-> fields could be read out of order while another thread was just
2055 * exiting. So we should place a read memory barrier when we avoid the lock.
2056 * On the writer side, write memory barrier is implied in __exit_signal
2057 * as __exit_signal releases the siglock spinlock after updating the signal->
2058 * fields. But we don't do this yet to keep things simple.
2062 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
2064 struct task_struct *t;
2065 unsigned long flags;
2066 cputime_t utime, stime;
2068 memset((char *) r, 0, sizeof *r);
2069 utime = stime = cputime_zero;
2072 if (!lock_task_sighand(p, &flags)) {
2079 case RUSAGE_CHILDREN:
2080 utime = p->signal->cutime;
2081 stime = p->signal->cstime;
2082 r->ru_nvcsw = p->signal->cnvcsw;
2083 r->ru_nivcsw = p->signal->cnivcsw;
2084 r->ru_minflt = p->signal->cmin_flt;
2085 r->ru_majflt = p->signal->cmaj_flt;
2086 r->ru_inblock = p->signal->cinblock;
2087 r->ru_oublock = p->signal->coublock;
2089 if (who == RUSAGE_CHILDREN)
2093 utime = cputime_add(utime, p->signal->utime);
2094 stime = cputime_add(stime, p->signal->stime);
2095 r->ru_nvcsw += p->signal->nvcsw;
2096 r->ru_nivcsw += p->signal->nivcsw;
2097 r->ru_minflt += p->signal->min_flt;
2098 r->ru_majflt += p->signal->maj_flt;
2099 r->ru_inblock += p->signal->inblock;
2100 r->ru_oublock += p->signal->oublock;
2103 utime = cputime_add(utime, t->utime);
2104 stime = cputime_add(stime, t->stime);
2105 r->ru_nvcsw += t->nvcsw;
2106 r->ru_nivcsw += t->nivcsw;
2107 r->ru_minflt += t->min_flt;
2108 r->ru_majflt += t->maj_flt;
2109 r->ru_inblock += task_io_get_inblock(t);
2110 r->ru_oublock += task_io_get_oublock(t);
2119 unlock_task_sighand(p, &flags);
2122 cputime_to_timeval(utime, &r->ru_utime);
2123 cputime_to_timeval(stime, &r->ru_stime);
2126 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
2129 k_getrusage(p, who, &r);
2130 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
2133 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
2135 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
2137 return getrusage(current, who, ru);
2140 asmlinkage long sys_umask(int mask)
2142 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
2146 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
2147 unsigned long arg4, unsigned long arg5)
2151 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2156 case PR_SET_PDEATHSIG:
2157 if (!valid_signal(arg2)) {
2161 current->pdeath_signal = arg2;
2163 case PR_GET_PDEATHSIG:
2164 error = put_user(current->pdeath_signal, (int __user *)arg2);
2166 case PR_GET_DUMPABLE:
2167 error = current->mm->dumpable;
2169 case PR_SET_DUMPABLE:
2170 if (arg2 < 0 || arg2 > 1) {
2174 current->mm->dumpable = arg2;
2177 case PR_SET_UNALIGN:
2178 error = SET_UNALIGN_CTL(current, arg2);
2180 case PR_GET_UNALIGN:
2181 error = GET_UNALIGN_CTL(current, arg2);
2184 error = SET_FPEMU_CTL(current, arg2);
2187 error = GET_FPEMU_CTL(current, arg2);
2190 error = SET_FPEXC_CTL(current, arg2);
2193 error = GET_FPEXC_CTL(current, arg2);
2196 error = PR_TIMING_STATISTICAL;
2199 if (arg2 == PR_TIMING_STATISTICAL)
2205 case PR_GET_KEEPCAPS:
2206 if (current->keep_capabilities)
2209 case PR_SET_KEEPCAPS:
2210 if (arg2 != 0 && arg2 != 1) {
2214 current->keep_capabilities = arg2;
2217 struct task_struct *me = current;
2218 unsigned char ncomm[sizeof(me->comm)];
2220 ncomm[sizeof(me->comm)-1] = 0;
2221 if (strncpy_from_user(ncomm, (char __user *)arg2,
2222 sizeof(me->comm)-1) < 0)
2224 set_task_comm(me, ncomm);
2228 struct task_struct *me = current;
2229 unsigned char tcomm[sizeof(me->comm)];
2231 get_task_comm(tcomm, me);
2232 if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))
2237 error = GET_ENDIAN(current, arg2);
2240 error = SET_ENDIAN(current, arg2);
2250 asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
2251 struct getcpu_cache __user *cache)
2254 int cpu = raw_smp_processor_id();
2256 err |= put_user(cpu, cpup);
2258 err |= put_user(cpu_to_node(cpu), nodep);
2261 * The cache is not needed for this implementation,
2262 * but make sure user programs pass something
2263 * valid. vsyscall implementations can instead make
2264 * good use of the cache. Only use t0 and t1 because
2265 * these are available in both 32bit and 64bit ABI (no
2266 * need for a compat_getcpu). 32bit has enough
2269 unsigned long t0, t1;
2270 get_user(t0, &cache->blob[0]);
2271 get_user(t1, &cache->blob[1]);
2274 put_user(t0, &cache->blob[0]);
2275 put_user(t1, &cache->blob[1]);
2277 return err ? -EFAULT : 0;