#include <linux/pid_namespace.h>
#include <linux/idr.h>
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
+#include <linux/capability.h>
#include <asm/atomic.h>
retval = ss->can_attach(ss, cgrp, tsk, false);
if (retval)
return retval;
+ } else if (!capable(CAP_SYS_ADMIN)) {
+ const struct cred *cred = current_cred(), *tcred;
+
+ /* No can_attach() - check perms generically */
+ tcred = __task_cred(tsk);
+ if (cred->euid != tcred->uid &&
+ cred->euid != tcred->suid) {
+ return -EACCES;
+ }
}
}
static int attach_task_by_pid(struct cgroup *cgrp, u64 pid)
{
struct task_struct *tsk;
- const struct cred *cred = current_cred(), *tcred;
int ret;
if (pid) {
rcu_read_unlock();
return -ESRCH;
}
-
- tcred = __task_cred(tsk);
- if (cred->euid &&
- cred->euid != tcred->uid &&
- cred->euid != tcred->suid) {
- rcu_read_unlock();
- return -EACCES;
- }
get_task_struct(tsk);
rcu_read_unlock();
} else {
{
struct freezer *freezer;
+ if ((current != task) && (!capable(CAP_SYS_ADMIN))) {
+ const struct cred *cred = current_cred(), *tcred;
+
+ tcred = __task_cred(task);
+ if (cred->euid != tcred->uid && cred->euid != tcred->suid)
+ return -EPERM;
+ }
+
/*
* Anything frozen can't move or be moved to/from.
*
int ret;
struct cpuset *cs = cgroup_cs(cont);
+ if ((current != task) && (!capable(CAP_SYS_ADMIN))) {
+ const struct cred *cred = current_cred(), *tcred;
+
+ if (cred->euid != tcred->uid && cred->euid != tcred->suid)
+ return -EPERM;
+ }
+
if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
return -ENOSPC;
static int
cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
+ if ((current != tsk) && (!capable(CAP_SYS_NICE))) {
+ const struct cred *cred = current_cred(), *tcred;
+
+ tcred = __task_cred(tsk);
+
+ if (cred->euid != tcred->uid && cred->euid != tcred->suid)
+ return -EPERM;
+ }
+
#ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
return -EINVAL;