4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/config.h>
8 #include <linux/module.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/smp_lock.h>
13 #include <linux/notifier.h>
14 #include <linux/reboot.h>
15 #include <linux/prctl.h>
16 #include <linux/init.h>
17 #include <linux/highuid.h>
19 #include <linux/kernel.h>
20 #include <linux/kexec.h>
21 #include <linux/workqueue.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
32 #include <linux/compat.h>
33 #include <linux/syscalls.h>
35 #include <asm/uaccess.h>
37 #include <asm/unistd.h>
39 #ifndef SET_UNALIGN_CTL
40 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
42 #ifndef GET_UNALIGN_CTL
43 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
46 # define SET_FPEMU_CTL(a,b) (-EINVAL)
49 # define GET_FPEMU_CTL(a,b) (-EINVAL)
52 # define SET_FPEXC_CTL(a,b) (-EINVAL)
55 # define GET_FPEXC_CTL(a,b) (-EINVAL)
59 * this is where the system-wide overflow UID and GID are defined, for
60 * architectures that now have 32-bit UID/GID but didn't in the past
63 int overflowuid = DEFAULT_OVERFLOWUID;
64 int overflowgid = DEFAULT_OVERFLOWGID;
67 EXPORT_SYMBOL(overflowuid);
68 EXPORT_SYMBOL(overflowgid);
72 * the same as above, but for filesystems which can only store a 16-bit
73 * UID and GID. as such, this is needed on all architectures
76 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
77 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
79 EXPORT_SYMBOL(fs_overflowuid);
80 EXPORT_SYMBOL(fs_overflowgid);
83 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
90 * Notifier list for kernel code which wants to be called
91 * at shutdown. This is used to stop any idling DMA operations
95 static struct notifier_block *reboot_notifier_list;
96 static DEFINE_RWLOCK(notifier_lock);
99 * notifier_chain_register - Add notifier to a notifier chain
100 * @list: Pointer to root list pointer
101 * @n: New entry in notifier chain
103 * Adds a notifier to a notifier chain.
105 * Currently always returns zero.
108 int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
110 write_lock(¬ifier_lock);
113 if(n->priority > (*list)->priority)
115 list= &((*list)->next);
119 write_unlock(¬ifier_lock);
123 EXPORT_SYMBOL(notifier_chain_register);
126 * notifier_chain_unregister - Remove notifier from a notifier chain
127 * @nl: Pointer to root list pointer
128 * @n: New entry in notifier chain
130 * Removes a notifier from a notifier chain.
132 * Returns zero on success, or %-ENOENT on failure.
135 int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
137 write_lock(¬ifier_lock);
143 write_unlock(¬ifier_lock);
148 write_unlock(¬ifier_lock);
152 EXPORT_SYMBOL(notifier_chain_unregister);
155 * notifier_call_chain - Call functions in a notifier chain
156 * @n: Pointer to root pointer of notifier chain
157 * @val: Value passed unmodified to notifier function
158 * @v: Pointer passed unmodified to notifier function
160 * Calls each function in a notifier chain in turn.
162 * If the return value of the notifier can be and'd
163 * with %NOTIFY_STOP_MASK, then notifier_call_chain
164 * will return immediately, with the return value of
165 * the notifier function which halted execution.
166 * Otherwise, the return value is the return value
167 * of the last notifier function called.
170 int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
173 struct notifier_block *nb = *n;
177 ret=nb->notifier_call(nb,val,v);
178 if(ret&NOTIFY_STOP_MASK)
187 EXPORT_SYMBOL(notifier_call_chain);
190 * register_reboot_notifier - Register function to be called at reboot time
191 * @nb: Info about notifier function to be called
193 * Registers a function with the list of functions
194 * to be called at reboot time.
196 * Currently always returns zero, as notifier_chain_register
197 * always returns zero.
200 int register_reboot_notifier(struct notifier_block * nb)
202 return notifier_chain_register(&reboot_notifier_list, nb);
205 EXPORT_SYMBOL(register_reboot_notifier);
208 * unregister_reboot_notifier - Unregister previously registered reboot notifier
209 * @nb: Hook to be unregistered
211 * Unregisters a previously registered reboot
214 * Returns zero on success, or %-ENOENT on failure.
217 int unregister_reboot_notifier(struct notifier_block * nb)
219 return notifier_chain_unregister(&reboot_notifier_list, nb);
222 EXPORT_SYMBOL(unregister_reboot_notifier);
224 static int set_one_prio(struct task_struct *p, int niceval, int error)
228 if (p->uid != current->euid &&
229 p->euid != current->euid && !capable(CAP_SYS_NICE)) {
233 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
237 no_nice = security_task_setnice(p, niceval);
244 set_user_nice(p, niceval);
249 asmlinkage long sys_setpriority(int which, int who, int niceval)
251 struct task_struct *g, *p;
252 struct user_struct *user;
255 if (which > 2 || which < 0)
258 /* normalize: avoid signed division (rounding problems) */
265 read_lock(&tasklist_lock);
270 p = find_task_by_pid(who);
272 error = set_one_prio(p, niceval, error);
276 who = process_group(current);
277 do_each_task_pid(who, PIDTYPE_PGID, p) {
278 error = set_one_prio(p, niceval, error);
279 } while_each_task_pid(who, PIDTYPE_PGID, p);
282 user = current->user;
286 if ((who != current->uid) && !(user = find_user(who)))
287 goto out_unlock; /* No processes for this user */
291 error = set_one_prio(p, niceval, error);
292 while_each_thread(g, p);
293 if (who != current->uid)
294 free_uid(user); /* For find_user() */
298 read_unlock(&tasklist_lock);
304 * Ugh. To avoid negative return values, "getpriority()" will
305 * not return the normal nice-value, but a negated value that
306 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
307 * to stay compatible.
309 asmlinkage long sys_getpriority(int which, int who)
311 struct task_struct *g, *p;
312 struct user_struct *user;
313 long niceval, retval = -ESRCH;
315 if (which > 2 || which < 0)
318 read_lock(&tasklist_lock);
323 p = find_task_by_pid(who);
325 niceval = 20 - task_nice(p);
326 if (niceval > retval)
332 who = process_group(current);
333 do_each_task_pid(who, PIDTYPE_PGID, p) {
334 niceval = 20 - task_nice(p);
335 if (niceval > retval)
337 } while_each_task_pid(who, PIDTYPE_PGID, p);
340 user = current->user;
344 if ((who != current->uid) && !(user = find_user(who)))
345 goto out_unlock; /* No processes for this user */
349 niceval = 20 - task_nice(p);
350 if (niceval > retval)
353 while_each_thread(g, p);
354 if (who != current->uid)
355 free_uid(user); /* for find_user() */
359 read_unlock(&tasklist_lock);
364 void emergency_restart(void)
366 machine_emergency_restart();
368 EXPORT_SYMBOL_GPL(emergency_restart);
370 void kernel_restart(char *cmd)
372 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
373 system_state = SYSTEM_RESTART;
376 printk(KERN_EMERG "Restarting system.\n");
378 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
381 machine_restart(cmd);
383 EXPORT_SYMBOL_GPL(kernel_restart);
385 void kernel_kexec(void)
388 struct kimage *image;
389 image = xchg(&kexec_image, 0);
393 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
394 system_state = SYSTEM_RESTART;
396 printk(KERN_EMERG "Starting new kernel\n");
398 machine_kexec(image);
401 EXPORT_SYMBOL_GPL(kernel_kexec);
403 void kernel_halt(void)
405 notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL);
406 system_state = SYSTEM_HALT;
407 device_suspend(PMSG_SUSPEND);
409 printk(KERN_EMERG "System halted.\n");
412 EXPORT_SYMBOL_GPL(kernel_halt);
414 void kernel_power_off(void)
416 notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL);
417 system_state = SYSTEM_POWER_OFF;
418 device_suspend(PMSG_SUSPEND);
420 printk(KERN_EMERG "Power down.\n");
423 EXPORT_SYMBOL_GPL(kernel_power_off);
426 * Reboot system call: for obvious reasons only root may call it,
427 * and even root needs to set up some magic numbers in the registers
428 * so that some mistake won't make this reboot the whole machine.
429 * You can also set the meaning of the ctrl-alt-del-key here.
431 * reboot doesn't sync: do that yourself before calling this.
433 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
437 /* We only trust the superuser with rebooting the system. */
438 if (!capable(CAP_SYS_BOOT))
441 /* For safety, we require "magic" arguments. */
442 if (magic1 != LINUX_REBOOT_MAGIC1 ||
443 (magic2 != LINUX_REBOOT_MAGIC2 &&
444 magic2 != LINUX_REBOOT_MAGIC2A &&
445 magic2 != LINUX_REBOOT_MAGIC2B &&
446 magic2 != LINUX_REBOOT_MAGIC2C))
451 case LINUX_REBOOT_CMD_RESTART:
452 kernel_restart(NULL);
455 case LINUX_REBOOT_CMD_CAD_ON:
459 case LINUX_REBOOT_CMD_CAD_OFF:
463 case LINUX_REBOOT_CMD_HALT:
469 case LINUX_REBOOT_CMD_POWER_OFF:
475 case LINUX_REBOOT_CMD_RESTART2:
476 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
480 buffer[sizeof(buffer) - 1] = '\0';
482 kernel_restart(buffer);
485 case LINUX_REBOOT_CMD_KEXEC:
490 #ifdef CONFIG_SOFTWARE_SUSPEND
491 case LINUX_REBOOT_CMD_SW_SUSPEND:
493 int ret = software_suspend();
507 static void deferred_cad(void *dummy)
509 kernel_restart(NULL);
513 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
514 * As it's called within an interrupt, it may NOT sync: the only choice
515 * is whether to reboot at once, or just ignore the ctrl-alt-del.
517 void ctrl_alt_del(void)
519 static DECLARE_WORK(cad_work, deferred_cad, NULL);
522 schedule_work(&cad_work);
524 kill_proc(cad_pid, SIGINT, 1);
529 * Unprivileged users may change the real gid to the effective gid
530 * or vice versa. (BSD-style)
532 * If you set the real gid at all, or set the effective gid to a value not
533 * equal to the real gid, then the saved gid is set to the new effective gid.
535 * This makes it possible for a setgid program to completely drop its
536 * privileges, which is often a useful assertion to make when you are doing
537 * a security audit over a program.
539 * The general idea is that a program which uses just setregid() will be
540 * 100% compatible with BSD. A program which uses just setgid() will be
541 * 100% compatible with POSIX with saved IDs.
543 * SMP: There are not races, the GIDs are checked only by filesystem
544 * operations (as far as semantic preservation is concerned).
546 asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
548 int old_rgid = current->gid;
549 int old_egid = current->egid;
550 int new_rgid = old_rgid;
551 int new_egid = old_egid;
554 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
558 if (rgid != (gid_t) -1) {
559 if ((old_rgid == rgid) ||
560 (current->egid==rgid) ||
566 if (egid != (gid_t) -1) {
567 if ((old_rgid == egid) ||
568 (current->egid == egid) ||
569 (current->sgid == egid) ||
576 if (new_egid != old_egid)
578 current->mm->dumpable = suid_dumpable;
581 if (rgid != (gid_t) -1 ||
582 (egid != (gid_t) -1 && egid != old_rgid))
583 current->sgid = new_egid;
584 current->fsgid = new_egid;
585 current->egid = new_egid;
586 current->gid = new_rgid;
587 key_fsgid_changed(current);
592 * setgid() is implemented like SysV w/ SAVED_IDS
594 * SMP: Same implicit races as above.
596 asmlinkage long sys_setgid(gid_t gid)
598 int old_egid = current->egid;
601 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
605 if (capable(CAP_SETGID))
609 current->mm->dumpable = suid_dumpable;
612 current->gid = current->egid = current->sgid = current->fsgid = gid;
614 else if ((gid == current->gid) || (gid == current->sgid))
618 current->mm->dumpable = suid_dumpable;
621 current->egid = current->fsgid = gid;
626 key_fsgid_changed(current);
630 static int set_user(uid_t new_ruid, int dumpclear)
632 struct user_struct *new_user;
634 new_user = alloc_uid(new_ruid);
638 if (atomic_read(&new_user->processes) >=
639 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
640 new_user != &root_user) {
645 switch_uid(new_user);
649 current->mm->dumpable = suid_dumpable;
652 current->uid = new_ruid;
657 * Unprivileged users may change the real uid to the effective uid
658 * or vice versa. (BSD-style)
660 * If you set the real uid at all, or set the effective uid to a value not
661 * equal to the real uid, then the saved uid is set to the new effective uid.
663 * This makes it possible for a setuid program to completely drop its
664 * privileges, which is often a useful assertion to make when you are doing
665 * a security audit over a program.
667 * The general idea is that a program which uses just setreuid() will be
668 * 100% compatible with BSD. A program which uses just setuid() will be
669 * 100% compatible with POSIX with saved IDs.
671 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
673 int old_ruid, old_euid, old_suid, new_ruid, new_euid;
676 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
680 new_ruid = old_ruid = current->uid;
681 new_euid = old_euid = current->euid;
682 old_suid = current->suid;
684 if (ruid != (uid_t) -1) {
686 if ((old_ruid != ruid) &&
687 (current->euid != ruid) &&
688 !capable(CAP_SETUID))
692 if (euid != (uid_t) -1) {
694 if ((old_ruid != euid) &&
695 (current->euid != euid) &&
696 (current->suid != euid) &&
697 !capable(CAP_SETUID))
701 if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
704 if (new_euid != old_euid)
706 current->mm->dumpable = suid_dumpable;
709 current->fsuid = current->euid = new_euid;
710 if (ruid != (uid_t) -1 ||
711 (euid != (uid_t) -1 && euid != old_ruid))
712 current->suid = current->euid;
713 current->fsuid = current->euid;
715 key_fsuid_changed(current);
717 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
723 * setuid() is implemented like SysV with SAVED_IDS
725 * Note that SAVED_ID's is deficient in that a setuid root program
726 * like sendmail, for example, cannot set its uid to be a normal
727 * user and then switch back, because if you're root, setuid() sets
728 * the saved uid too. If you don't like this, blame the bright people
729 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
730 * will allow a root program to temporarily drop privileges and be able to
731 * regain them by swapping the real and effective uid.
733 asmlinkage long sys_setuid(uid_t uid)
735 int old_euid = current->euid;
736 int old_ruid, old_suid, new_ruid, new_suid;
739 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
743 old_ruid = new_ruid = current->uid;
744 old_suid = current->suid;
747 if (capable(CAP_SETUID)) {
748 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
751 } else if ((uid != current->uid) && (uid != new_suid))
756 current->mm->dumpable = suid_dumpable;
759 current->fsuid = current->euid = uid;
760 current->suid = new_suid;
762 key_fsuid_changed(current);
764 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
769 * This function implements a generic ability to update ruid, euid,
770 * and suid. This allows you to implement the 4.4 compatible seteuid().
772 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
774 int old_ruid = current->uid;
775 int old_euid = current->euid;
776 int old_suid = current->suid;
779 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
783 if (!capable(CAP_SETUID)) {
784 if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
785 (ruid != current->euid) && (ruid != current->suid))
787 if ((euid != (uid_t) -1) && (euid != current->uid) &&
788 (euid != current->euid) && (euid != current->suid))
790 if ((suid != (uid_t) -1) && (suid != current->uid) &&
791 (suid != current->euid) && (suid != current->suid))
794 if (ruid != (uid_t) -1) {
795 if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
798 if (euid != (uid_t) -1) {
799 if (euid != current->euid)
801 current->mm->dumpable = suid_dumpable;
804 current->euid = euid;
806 current->fsuid = current->euid;
807 if (suid != (uid_t) -1)
808 current->suid = suid;
810 key_fsuid_changed(current);
812 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
815 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
819 if (!(retval = put_user(current->uid, ruid)) &&
820 !(retval = put_user(current->euid, euid)))
821 retval = put_user(current->suid, suid);
827 * Same as above, but for rgid, egid, sgid.
829 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
833 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
837 if (!capable(CAP_SETGID)) {
838 if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
839 (rgid != current->egid) && (rgid != current->sgid))
841 if ((egid != (gid_t) -1) && (egid != current->gid) &&
842 (egid != current->egid) && (egid != current->sgid))
844 if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
845 (sgid != current->egid) && (sgid != current->sgid))
848 if (egid != (gid_t) -1) {
849 if (egid != current->egid)
851 current->mm->dumpable = suid_dumpable;
854 current->egid = egid;
856 current->fsgid = current->egid;
857 if (rgid != (gid_t) -1)
859 if (sgid != (gid_t) -1)
860 current->sgid = sgid;
862 key_fsgid_changed(current);
866 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
870 if (!(retval = put_user(current->gid, rgid)) &&
871 !(retval = put_user(current->egid, egid)))
872 retval = put_user(current->sgid, sgid);
879 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
880 * is used for "access()" and for the NFS daemon (letting nfsd stay at
881 * whatever uid it wants to). It normally shadows "euid", except when
882 * explicitly set by setfsuid() or for access..
884 asmlinkage long sys_setfsuid(uid_t uid)
888 old_fsuid = current->fsuid;
889 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
892 if (uid == current->uid || uid == current->euid ||
893 uid == current->suid || uid == current->fsuid ||
896 if (uid != old_fsuid)
898 current->mm->dumpable = suid_dumpable;
901 current->fsuid = uid;
904 key_fsuid_changed(current);
906 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
912 * Samma på svenska..
914 asmlinkage long sys_setfsgid(gid_t gid)
918 old_fsgid = current->fsgid;
919 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
922 if (gid == current->gid || gid == current->egid ||
923 gid == current->sgid || gid == current->fsgid ||
926 if (gid != old_fsgid)
928 current->mm->dumpable = suid_dumpable;
931 current->fsgid = gid;
932 key_fsgid_changed(current);
937 asmlinkage long sys_times(struct tms __user * tbuf)
940 * In the SMP world we might just be unlucky and have one of
941 * the times increment as we use it. Since the value is an
942 * atomically safe type this is just fine. Conceptually its
943 * as if the syscall took an instant longer to occur.
947 cputime_t utime, stime, cutime, cstime;
950 if (thread_group_empty(current)) {
952 * Single thread case without the use of any locks.
954 * We may race with release_task if two threads are
955 * executing. However, release task first adds up the
956 * counters (__exit_signal) before removing the task
957 * from the process tasklist (__unhash_process).
958 * __exit_signal also acquires and releases the
959 * siglock which results in the proper memory ordering
960 * so that the list modifications are always visible
961 * after the counters have been updated.
963 * If the counters have been updated by the second thread
964 * but the thread has not yet been removed from the list
965 * then the other branch will be executing which will
966 * block on tasklist_lock until the exit handling of the
967 * other task is finished.
969 * This also implies that the sighand->siglock cannot
970 * be held by another processor. So we can also
971 * skip acquiring that lock.
973 utime = cputime_add(current->signal->utime, current->utime);
974 stime = cputime_add(current->signal->utime, current->stime);
975 cutime = current->signal->cutime;
976 cstime = current->signal->cstime;
981 /* Process with multiple threads */
982 struct task_struct *tsk = current;
983 struct task_struct *t;
985 read_lock(&tasklist_lock);
986 utime = tsk->signal->utime;
987 stime = tsk->signal->stime;
990 utime = cputime_add(utime, t->utime);
991 stime = cputime_add(stime, t->stime);
996 * While we have tasklist_lock read-locked, no dying thread
997 * can be updating current->signal->[us]time. Instead,
998 * we got their counts included in the live thread loop.
999 * However, another thread can come in right now and
1000 * do a wait call that updates current->signal->c[us]time.
1001 * To make sure we always see that pair updated atomically,
1002 * we take the siglock around fetching them.
1004 spin_lock_irq(&tsk->sighand->siglock);
1005 cutime = tsk->signal->cutime;
1006 cstime = tsk->signal->cstime;
1007 spin_unlock_irq(&tsk->sighand->siglock);
1008 read_unlock(&tasklist_lock);
1010 tmp.tms_utime = cputime_to_clock_t(utime);
1011 tmp.tms_stime = cputime_to_clock_t(stime);
1012 tmp.tms_cutime = cputime_to_clock_t(cutime);
1013 tmp.tms_cstime = cputime_to_clock_t(cstime);
1014 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1017 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1021 * This needs some heavy checking ...
1022 * I just haven't the stomach for it. I also don't fully
1023 * understand sessions/pgrp etc. Let somebody who does explain it.
1025 * OK, I think I have the protection semantics right.... this is really
1026 * only important on a multi-user system anyway, to make sure one user
1027 * can't send a signal to a process owned by another. -TYT, 12/12/91
1029 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1033 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
1035 struct task_struct *p;
1045 /* From this point forward we keep holding onto the tasklist lock
1046 * so that our parent does not change from under us. -DaveM
1048 write_lock_irq(&tasklist_lock);
1051 p = find_task_by_pid(pid);
1056 if (!thread_group_leader(p))
1059 if (p->parent == current || p->real_parent == current) {
1061 if (p->signal->session != current->signal->session)
1073 if (p->signal->leader)
1077 struct task_struct *p;
1079 do_each_task_pid(pgid, PIDTYPE_PGID, p) {
1080 if (p->signal->session == current->signal->session)
1082 } while_each_task_pid(pgid, PIDTYPE_PGID, p);
1087 err = security_task_setpgid(p, pgid);
1091 if (process_group(p) != pgid) {
1092 detach_pid(p, PIDTYPE_PGID);
1093 p->signal->pgrp = pgid;
1094 attach_pid(p, PIDTYPE_PGID, pgid);
1099 /* All paths lead to here, thus we are safe. -DaveM */
1100 write_unlock_irq(&tasklist_lock);
1104 asmlinkage long sys_getpgid(pid_t pid)
1107 return process_group(current);
1110 struct task_struct *p;
1112 read_lock(&tasklist_lock);
1113 p = find_task_by_pid(pid);
1117 retval = security_task_getpgid(p);
1119 retval = process_group(p);
1121 read_unlock(&tasklist_lock);
1126 #ifdef __ARCH_WANT_SYS_GETPGRP
1128 asmlinkage long sys_getpgrp(void)
1130 /* SMP - assuming writes are word atomic this is fine */
1131 return process_group(current);
1136 asmlinkage long sys_getsid(pid_t pid)
1139 return current->signal->session;
1142 struct task_struct *p;
1144 read_lock(&tasklist_lock);
1145 p = find_task_by_pid(pid);
1149 retval = security_task_getsid(p);
1151 retval = p->signal->session;
1153 read_unlock(&tasklist_lock);
1158 asmlinkage long sys_setsid(void)
1163 if (!thread_group_leader(current))
1167 write_lock_irq(&tasklist_lock);
1169 pid = find_pid(PIDTYPE_PGID, current->pid);
1173 current->signal->leader = 1;
1174 __set_special_pids(current->pid, current->pid);
1175 current->signal->tty = NULL;
1176 current->signal->tty_old_pgrp = 0;
1177 err = process_group(current);
1179 write_unlock_irq(&tasklist_lock);
1185 * Supplementary group IDs
1188 /* init to 2 - one for init_task, one to ensure it is never freed */
1189 struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
1191 struct group_info *groups_alloc(int gidsetsize)
1193 struct group_info *group_info;
1197 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
1198 /* Make sure we always allocate at least one indirect block pointer */
1199 nblocks = nblocks ? : 1;
1200 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
1203 group_info->ngroups = gidsetsize;
1204 group_info->nblocks = nblocks;
1205 atomic_set(&group_info->usage, 1);
1207 if (gidsetsize <= NGROUPS_SMALL) {
1208 group_info->blocks[0] = group_info->small_block;
1210 for (i = 0; i < nblocks; i++) {
1212 b = (void *)__get_free_page(GFP_USER);
1214 goto out_undo_partial_alloc;
1215 group_info->blocks[i] = b;
1220 out_undo_partial_alloc:
1222 free_page((unsigned long)group_info->blocks[i]);
1228 EXPORT_SYMBOL(groups_alloc);
1230 void groups_free(struct group_info *group_info)
1232 if (group_info->blocks[0] != group_info->small_block) {
1234 for (i = 0; i < group_info->nblocks; i++)
1235 free_page((unsigned long)group_info->blocks[i]);
1240 EXPORT_SYMBOL(groups_free);
1242 /* export the group_info to a user-space array */
1243 static int groups_to_user(gid_t __user *grouplist,
1244 struct group_info *group_info)
1247 int count = group_info->ngroups;
1249 for (i = 0; i < group_info->nblocks; i++) {
1250 int cp_count = min(NGROUPS_PER_BLOCK, count);
1251 int off = i * NGROUPS_PER_BLOCK;
1252 int len = cp_count * sizeof(*grouplist);
1254 if (copy_to_user(grouplist+off, group_info->blocks[i], len))
1262 /* fill a group_info from a user-space array - it must be allocated already */
1263 static int groups_from_user(struct group_info *group_info,
1264 gid_t __user *grouplist)
1267 int count = group_info->ngroups;
1269 for (i = 0; i < group_info->nblocks; i++) {
1270 int cp_count = min(NGROUPS_PER_BLOCK, count);
1271 int off = i * NGROUPS_PER_BLOCK;
1272 int len = cp_count * sizeof(*grouplist);
1274 if (copy_from_user(group_info->blocks[i], grouplist+off, len))
1282 /* a simple Shell sort */
1283 static void groups_sort(struct group_info *group_info)
1285 int base, max, stride;
1286 int gidsetsize = group_info->ngroups;
1288 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
1293 max = gidsetsize - stride;
1294 for (base = 0; base < max; base++) {
1296 int right = left + stride;
1297 gid_t tmp = GROUP_AT(group_info, right);
1299 while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
1300 GROUP_AT(group_info, right) =
1301 GROUP_AT(group_info, left);
1305 GROUP_AT(group_info, right) = tmp;
1311 /* a simple bsearch */
1312 int groups_search(struct group_info *group_info, gid_t grp)
1320 right = group_info->ngroups;
1321 while (left < right) {
1322 int mid = (left+right)/2;
1323 int cmp = grp - GROUP_AT(group_info, mid);
1334 /* validate and set current->group_info */
1335 int set_current_groups(struct group_info *group_info)
1338 struct group_info *old_info;
1340 retval = security_task_setgroups(group_info);
1344 groups_sort(group_info);
1345 get_group_info(group_info);
1348 old_info = current->group_info;
1349 current->group_info = group_info;
1350 task_unlock(current);
1352 put_group_info(old_info);
1357 EXPORT_SYMBOL(set_current_groups);
1359 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
1364 * SMP: Nobody else can change our grouplist. Thus we are
1371 /* no need to grab task_lock here; it cannot change */
1372 get_group_info(current->group_info);
1373 i = current->group_info->ngroups;
1375 if (i > gidsetsize) {
1379 if (groups_to_user(grouplist, current->group_info)) {
1385 put_group_info(current->group_info);
1390 * SMP: Our groups are copy-on-write. We can set them safely
1391 * without another task interfering.
1394 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
1396 struct group_info *group_info;
1399 if (!capable(CAP_SETGID))
1401 if ((unsigned)gidsetsize > NGROUPS_MAX)
1404 group_info = groups_alloc(gidsetsize);
1407 retval = groups_from_user(group_info, grouplist);
1409 put_group_info(group_info);
1413 retval = set_current_groups(group_info);
1414 put_group_info(group_info);
1420 * Check whether we're fsgid/egid or in the supplemental group..
1422 int in_group_p(gid_t grp)
1425 if (grp != current->fsgid) {
1426 get_group_info(current->group_info);
1427 retval = groups_search(current->group_info, grp);
1428 put_group_info(current->group_info);
1433 EXPORT_SYMBOL(in_group_p);
1435 int in_egroup_p(gid_t grp)
1438 if (grp != current->egid) {
1439 get_group_info(current->group_info);
1440 retval = groups_search(current->group_info, grp);
1441 put_group_info(current->group_info);
1446 EXPORT_SYMBOL(in_egroup_p);
1448 DECLARE_RWSEM(uts_sem);
1450 EXPORT_SYMBOL(uts_sem);
1452 asmlinkage long sys_newuname(struct new_utsname __user * name)
1456 down_read(&uts_sem);
1457 if (copy_to_user(name,&system_utsname,sizeof *name))
1463 asmlinkage long sys_sethostname(char __user *name, int len)
1466 char tmp[__NEW_UTS_LEN];
1468 if (!capable(CAP_SYS_ADMIN))
1470 if (len < 0 || len > __NEW_UTS_LEN)
1472 down_write(&uts_sem);
1474 if (!copy_from_user(tmp, name, len)) {
1475 memcpy(system_utsname.nodename, tmp, len);
1476 system_utsname.nodename[len] = 0;
1483 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1485 asmlinkage long sys_gethostname(char __user *name, int len)
1491 down_read(&uts_sem);
1492 i = 1 + strlen(system_utsname.nodename);
1496 if (copy_to_user(name, system_utsname.nodename, i))
1505 * Only setdomainname; getdomainname can be implemented by calling
1508 asmlinkage long sys_setdomainname(char __user *name, int len)
1511 char tmp[__NEW_UTS_LEN];
1513 if (!capable(CAP_SYS_ADMIN))
1515 if (len < 0 || len > __NEW_UTS_LEN)
1518 down_write(&uts_sem);
1520 if (!copy_from_user(tmp, name, len)) {
1521 memcpy(system_utsname.domainname, tmp, len);
1522 system_utsname.domainname[len] = 0;
1529 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1531 if (resource >= RLIM_NLIMITS)
1534 struct rlimit value;
1535 task_lock(current->group_leader);
1536 value = current->signal->rlim[resource];
1537 task_unlock(current->group_leader);
1538 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1542 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1545 * Back compatibility for getrlimit. Needed for some apps.
1548 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1551 if (resource >= RLIM_NLIMITS)
1554 task_lock(current->group_leader);
1555 x = current->signal->rlim[resource];
1556 task_unlock(current->group_leader);
1557 if(x.rlim_cur > 0x7FFFFFFF)
1558 x.rlim_cur = 0x7FFFFFFF;
1559 if(x.rlim_max > 0x7FFFFFFF)
1560 x.rlim_max = 0x7FFFFFFF;
1561 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1566 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1568 struct rlimit new_rlim, *old_rlim;
1571 if (resource >= RLIM_NLIMITS)
1573 if(copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1575 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1577 old_rlim = current->signal->rlim + resource;
1578 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1579 !capable(CAP_SYS_RESOURCE))
1581 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
1584 retval = security_task_setrlimit(resource, &new_rlim);
1588 task_lock(current->group_leader);
1589 *old_rlim = new_rlim;
1590 task_unlock(current->group_leader);
1592 if (resource == RLIMIT_CPU && new_rlim.rlim_cur != RLIM_INFINITY &&
1593 (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
1594 new_rlim.rlim_cur <= cputime_to_secs(
1595 current->signal->it_prof_expires))) {
1596 cputime_t cputime = secs_to_cputime(new_rlim.rlim_cur);
1597 read_lock(&tasklist_lock);
1598 spin_lock_irq(¤t->sighand->siglock);
1599 set_process_cpu_timer(current, CPUCLOCK_PROF,
1601 spin_unlock_irq(¤t->sighand->siglock);
1602 read_unlock(&tasklist_lock);
1609 * It would make sense to put struct rusage in the task_struct,
1610 * except that would make the task_struct be *really big*. After
1611 * task_struct gets moved into malloc'ed memory, it would
1612 * make sense to do this. It will make moving the rest of the information
1613 * a lot simpler! (Which we're not doing right now because we're not
1614 * measuring them yet).
1616 * This expects to be called with tasklist_lock read-locked or better,
1617 * and the siglock not locked. It may momentarily take the siglock.
1619 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1620 * races with threads incrementing their own counters. But since word
1621 * reads are atomic, we either get new values or old values and we don't
1622 * care which for the sums. We always take the siglock to protect reading
1623 * the c* fields from p->signal from races with exit.c updating those
1624 * fields when reaping, so a sample either gets all the additions of a
1625 * given child after it's reaped, or none so this sample is before reaping.
1628 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1630 struct task_struct *t;
1631 unsigned long flags;
1632 cputime_t utime, stime;
1634 memset((char *) r, 0, sizeof *r);
1636 if (unlikely(!p->signal))
1640 case RUSAGE_CHILDREN:
1641 spin_lock_irqsave(&p->sighand->siglock, flags);
1642 utime = p->signal->cutime;
1643 stime = p->signal->cstime;
1644 r->ru_nvcsw = p->signal->cnvcsw;
1645 r->ru_nivcsw = p->signal->cnivcsw;
1646 r->ru_minflt = p->signal->cmin_flt;
1647 r->ru_majflt = p->signal->cmaj_flt;
1648 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1649 cputime_to_timeval(utime, &r->ru_utime);
1650 cputime_to_timeval(stime, &r->ru_stime);
1653 spin_lock_irqsave(&p->sighand->siglock, flags);
1654 utime = stime = cputime_zero;
1657 spin_lock_irqsave(&p->sighand->siglock, flags);
1658 utime = p->signal->cutime;
1659 stime = p->signal->cstime;
1660 r->ru_nvcsw = p->signal->cnvcsw;
1661 r->ru_nivcsw = p->signal->cnivcsw;
1662 r->ru_minflt = p->signal->cmin_flt;
1663 r->ru_majflt = p->signal->cmaj_flt;
1665 utime = cputime_add(utime, p->signal->utime);
1666 stime = cputime_add(stime, p->signal->stime);
1667 r->ru_nvcsw += p->signal->nvcsw;
1668 r->ru_nivcsw += p->signal->nivcsw;
1669 r->ru_minflt += p->signal->min_flt;
1670 r->ru_majflt += p->signal->maj_flt;
1673 utime = cputime_add(utime, t->utime);
1674 stime = cputime_add(stime, t->stime);
1675 r->ru_nvcsw += t->nvcsw;
1676 r->ru_nivcsw += t->nivcsw;
1677 r->ru_minflt += t->min_flt;
1678 r->ru_majflt += t->maj_flt;
1681 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1682 cputime_to_timeval(utime, &r->ru_utime);
1683 cputime_to_timeval(stime, &r->ru_stime);
1690 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1693 read_lock(&tasklist_lock);
1694 k_getrusage(p, who, &r);
1695 read_unlock(&tasklist_lock);
1696 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1699 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
1701 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
1703 return getrusage(current, who, ru);
1706 asmlinkage long sys_umask(int mask)
1708 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1712 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1713 unsigned long arg4, unsigned long arg5)
1718 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
1723 case PR_SET_PDEATHSIG:
1725 if (!valid_signal(sig)) {
1729 current->pdeath_signal = sig;
1731 case PR_GET_PDEATHSIG:
1732 error = put_user(current->pdeath_signal, (int __user *)arg2);
1734 case PR_GET_DUMPABLE:
1735 if (current->mm->dumpable)
1738 case PR_SET_DUMPABLE:
1739 if (arg2 < 0 || arg2 > 2) {
1743 current->mm->dumpable = arg2;
1746 case PR_SET_UNALIGN:
1747 error = SET_UNALIGN_CTL(current, arg2);
1749 case PR_GET_UNALIGN:
1750 error = GET_UNALIGN_CTL(current, arg2);
1753 error = SET_FPEMU_CTL(current, arg2);
1756 error = GET_FPEMU_CTL(current, arg2);
1759 error = SET_FPEXC_CTL(current, arg2);
1762 error = GET_FPEXC_CTL(current, arg2);
1765 error = PR_TIMING_STATISTICAL;
1768 if (arg2 == PR_TIMING_STATISTICAL)
1774 case PR_GET_KEEPCAPS:
1775 if (current->keep_capabilities)
1778 case PR_SET_KEEPCAPS:
1779 if (arg2 != 0 && arg2 != 1) {
1783 current->keep_capabilities = arg2;
1786 struct task_struct *me = current;
1787 unsigned char ncomm[sizeof(me->comm)];
1789 ncomm[sizeof(me->comm)-1] = 0;
1790 if (strncpy_from_user(ncomm, (char __user *)arg2,
1791 sizeof(me->comm)-1) < 0)
1793 set_task_comm(me, ncomm);
1797 struct task_struct *me = current;
1798 unsigned char tcomm[sizeof(me->comm)];
1800 get_task_comm(tcomm, me);
1801 if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))