/* #define SECCOMP_DEBUG 1 */
-#ifdef CONFIG_SECCOMP_FILTER
+#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
#include <asm/syscall.h>
+#endif
+
+#ifdef CONFIG_SECCOMP_FILTER
#include <linux/filter.h>
#include <linux/pid.h>
#include <linux/ptrace.h>
struct seccomp_filter {
atomic_t usage;
struct seccomp_filter *prev;
- struct sk_filter *prog;
+ struct bpf_prog *prog;
};
/* Limit any path through the tree to 256KB worth of instructions. */
* @filter: filter to verify
* @flen: length of filter
*
- * Takes a previously checked filter (by sk_chk_filter) and
+ * Takes a previously checked filter (by bpf_check_classic) and
* redirects all filter code that loads struct sk_buff data
* and related data through seccomp_bpf_load. It also
* enforces length and alignment checking of those loads.
*
* Returns valid seccomp BPF response codes.
*/
-static u32 seccomp_run_filters(int syscall)
+static u32 seccomp_run_filters(void)
{
struct seccomp_filter *f = ACCESS_ONCE(current->seccomp.filter);
struct seccomp_data sd;
* value always takes priority (ignoring the DATA).
*/
for (; f; f = f->prev) {
- u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd);
+ u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)&sd);
if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
ret = cur_ret;
static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
{
- BUG_ON(!spin_is_locked(¤t->sighand->siglock));
+ assert_spin_locked(¤t->sighand->siglock);
if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
return false;
static inline void seccomp_assign_mode(struct task_struct *task,
unsigned long seccomp_mode)
{
- BUG_ON(!spin_is_locked(&task->sighand->siglock));
+ assert_spin_locked(&task->sighand->siglock);
task->seccomp.mode = seccomp_mode;
/*
struct task_struct *thread, *caller;
BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
- BUG_ON(!spin_is_locked(¤t->sighand->siglock));
+ assert_spin_locked(¤t->sighand->siglock);
/* Validate all threads being eligible for synchronization. */
caller = current;
struct task_struct *thread, *caller;
BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
- BUG_ON(!spin_is_locked(¤t->sighand->siglock));
+ assert_spin_locked(¤t->sighand->siglock);
/* Synchronize all threads. */
caller = current;
goto free_prog;
/* Check and rewrite the fprog via the skb checker */
- ret = sk_chk_filter(fp, fprog->len);
+ ret = bpf_check_classic(fp, fprog->len);
if (ret)
goto free_prog;
if (ret)
goto free_prog;
- /* Convert 'sock_filter' insns to 'sock_filter_int' insns */
- ret = sk_convert_filter(fp, fprog->len, NULL, &new_len);
+ /* Convert 'sock_filter' insns to 'bpf_insn' insns */
+ ret = bpf_convert_filter(fp, fprog->len, NULL, &new_len);
if (ret)
goto free_prog;
if (!filter)
goto free_prog;
- filter->prog = kzalloc(sk_filter_size(new_len),
+ filter->prog = kzalloc(bpf_prog_size(new_len),
GFP_KERNEL|__GFP_NOWARN);
if (!filter->prog)
goto free_filter;
- ret = sk_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
+ ret = bpf_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
if (ret)
goto free_filter_prog;
kfree(fp);
atomic_set(&filter->usage, 1);
filter->prog->len = new_len;
- sk_filter_select_runtime(filter->prog);
+ bpf_prog_select_runtime(filter->prog);
return filter;
unsigned long total_insns;
struct seccomp_filter *walker;
- BUG_ON(!spin_is_locked(¤t->sighand->siglock));
+ assert_spin_locked(¤t->sighand->siglock);
/* Validate resulting filter length. */
total_insns = filter->prog->len;
static inline void seccomp_filter_free(struct seccomp_filter *filter)
{
if (filter) {
- sk_filter_free(filter->prog);
+ bpf_prog_free(filter->prog);
kfree(filter);
}
}
};
#endif
-int __secure_computing(int this_syscall)
+static void __secure_computing_strict(int this_syscall)
+{
+ int *syscall_whitelist = mode1_syscalls;
+#ifdef CONFIG_COMPAT
+ if (is_compat_task())
+ syscall_whitelist = mode1_syscalls_32;
+#endif
+ do {
+ if (*syscall_whitelist == this_syscall)
+ return;
+ } while (*++syscall_whitelist);
+
+#ifdef SECCOMP_DEBUG
+ dump_stack();
+#endif
+ audit_seccomp(this_syscall, SIGKILL, SECCOMP_RET_KILL);
+ do_exit(SIGKILL);
+}
+
+#ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
+void secure_computing_strict(int this_syscall)
+{
+ int mode = current->seccomp.mode;
+
+ if (mode == 0)
+ return;
+ else if (mode == SECCOMP_MODE_STRICT)
+ __secure_computing_strict(this_syscall);
+ else
+ BUG();
+}
+#else
+int __secure_computing(void)
{
+ struct pt_regs *regs = task_pt_regs(current);
+ int this_syscall = syscall_get_nr(current, regs);
int exit_sig = 0;
- int *syscall;
u32 ret;
/*
switch (current->seccomp.mode) {
case SECCOMP_MODE_STRICT:
- syscall = mode1_syscalls;
-#ifdef CONFIG_COMPAT
- if (is_compat_task())
- syscall = mode1_syscalls_32;
-#endif
- do {
- if (*syscall == this_syscall)
- return 0;
- } while (*++syscall);
- exit_sig = SIGKILL;
- ret = SECCOMP_RET_KILL;
- break;
+ __secure_computing_strict(this_syscall);
+ return 0;
#ifdef CONFIG_SECCOMP_FILTER
case SECCOMP_MODE_FILTER: {
int data;
- struct pt_regs *regs = task_pt_regs(current);
- ret = seccomp_run_filters(this_syscall);
+ ret = seccomp_run_filters();
data = ret & SECCOMP_RET_DATA;
ret &= SECCOMP_RET_ACTION;
switch (ret) {
#ifdef CONFIG_SECCOMP_FILTER
skip:
audit_seccomp(this_syscall, exit_sig, ret);
-#endif
return -1;
+#endif
}
+#endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
long prctl_get_seccomp(void)
{