2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/uaccess.h>
37 #include <asm/unaligned.h>
38 #include <linux/filter.h>
39 #include <linux/reciprocal_div.h>
40 #include <linux/ratelimit.h>
41 #include <linux/seccomp.h>
43 /* No hurry in this branch
45 * Exported for the bpf jit load helper.
47 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
52 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
53 else if (k >= SKF_LL_OFF)
54 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
56 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
61 static inline void *load_pointer(const struct sk_buff *skb, int k,
62 unsigned int size, void *buffer)
65 return skb_header_pointer(skb, k, size, buffer);
66 return bpf_internal_load_pointer_neg_helper(skb, k, size);
70 * sk_filter - run a packet through a socket filter
71 * @sk: sock associated with &sk_buff
72 * @skb: buffer to filter
74 * Run the filter code and then cut skb->data to correct size returned by
75 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
76 * than pkt_len we keep whole skb->data. This is the socket level
77 * wrapper to sk_run_filter. It returns 0 if the packet should
78 * be accepted or -EPERM if the packet should be tossed.
81 int sk_filter(struct sock *sk, struct sk_buff *skb)
84 struct sk_filter *filter;
87 * If the skb was allocated from pfmemalloc reserves, only
88 * allow SOCK_MEMALLOC sockets to use it as this socket is
91 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
94 err = security_sock_rcv_skb(sk, skb);
99 filter = rcu_dereference(sk->sk_filter);
101 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
103 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
109 EXPORT_SYMBOL(sk_filter);
112 * sk_run_filter - run a filter on a socket
113 * @skb: buffer to run the filter on
114 * @fentry: filter to apply
116 * Decode and apply filter instructions to the skb->data.
117 * Return length to keep, 0 for none. @skb is the data we are
118 * filtering, @filter is the array of filter instructions.
119 * Because all jumps are guaranteed to be before last instruction,
120 * and last instruction guaranteed to be a RET, we dont need to check
121 * flen. (We used to pass to this function the length of filter)
123 unsigned int sk_run_filter(const struct sk_buff *skb,
124 const struct sock_filter *fentry)
127 u32 A = 0; /* Accumulator */
128 u32 X = 0; /* Index Register */
129 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
134 * Process array of filter instructions.
137 #if defined(CONFIG_X86_32)
138 #define K (fentry->k)
140 const u32 K = fentry->k;
143 switch (fentry->code) {
144 case BPF_S_ALU_ADD_X:
147 case BPF_S_ALU_ADD_K:
150 case BPF_S_ALU_SUB_X:
153 case BPF_S_ALU_SUB_K:
156 case BPF_S_ALU_MUL_X:
159 case BPF_S_ALU_MUL_K:
162 case BPF_S_ALU_DIV_X:
167 case BPF_S_ALU_DIV_K:
168 A = reciprocal_divide(A, K);
170 case BPF_S_ALU_AND_X:
173 case BPF_S_ALU_AND_K:
182 case BPF_S_ALU_LSH_X:
185 case BPF_S_ALU_LSH_K:
188 case BPF_S_ALU_RSH_X:
191 case BPF_S_ALU_RSH_K:
200 case BPF_S_JMP_JGT_K:
201 fentry += (A > K) ? fentry->jt : fentry->jf;
203 case BPF_S_JMP_JGE_K:
204 fentry += (A >= K) ? fentry->jt : fentry->jf;
206 case BPF_S_JMP_JEQ_K:
207 fentry += (A == K) ? fentry->jt : fentry->jf;
209 case BPF_S_JMP_JSET_K:
210 fentry += (A & K) ? fentry->jt : fentry->jf;
212 case BPF_S_JMP_JGT_X:
213 fentry += (A > X) ? fentry->jt : fentry->jf;
215 case BPF_S_JMP_JGE_X:
216 fentry += (A >= X) ? fentry->jt : fentry->jf;
218 case BPF_S_JMP_JEQ_X:
219 fentry += (A == X) ? fentry->jt : fentry->jf;
221 case BPF_S_JMP_JSET_X:
222 fentry += (A & X) ? fentry->jt : fentry->jf;
227 ptr = load_pointer(skb, k, 4, &tmp);
229 A = get_unaligned_be32(ptr);
236 ptr = load_pointer(skb, k, 2, &tmp);
238 A = get_unaligned_be16(ptr);
245 ptr = load_pointer(skb, k, 1, &tmp);
254 case BPF_S_LDX_W_LEN:
266 case BPF_S_LDX_B_MSH:
267 ptr = load_pointer(skb, K, 1, &tmp);
269 X = (*(u8 *)ptr & 0xf) << 2;
301 case BPF_S_ANC_PROTOCOL:
302 A = ntohs(skb->protocol);
304 case BPF_S_ANC_PKTTYPE:
307 case BPF_S_ANC_IFINDEX:
310 A = skb->dev->ifindex;
315 case BPF_S_ANC_QUEUE:
316 A = skb->queue_mapping;
318 case BPF_S_ANC_HATYPE:
323 case BPF_S_ANC_RXHASH:
327 A = raw_smp_processor_id();
329 case BPF_S_ANC_ALU_XOR_X:
332 case BPF_S_ANC_NLATTR: {
335 if (skb_is_nonlinear(skb))
337 if (A > skb->len - sizeof(struct nlattr))
340 nla = nla_find((struct nlattr *)&skb->data[A],
343 A = (void *)nla - (void *)skb->data;
348 case BPF_S_ANC_NLATTR_NEST: {
351 if (skb_is_nonlinear(skb))
353 if (A > skb->len - sizeof(struct nlattr))
356 nla = (struct nlattr *)&skb->data[A];
357 if (nla->nla_len > A - skb->len)
360 nla = nla_find_nested(nla, X);
362 A = (void *)nla - (void *)skb->data;
367 #ifdef CONFIG_SECCOMP_FILTER
368 case BPF_S_ANC_SECCOMP_LD_W:
369 A = seccomp_bpf_load(fentry->k);
373 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
374 fentry->code, fentry->jt,
375 fentry->jf, fentry->k);
382 EXPORT_SYMBOL(sk_run_filter);
386 * A BPF program is able to use 16 cells of memory to store intermediate
387 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
388 * As we dont want to clear mem[] array for each packet going through
389 * sk_run_filter(), we check that filter loaded by user never try to read
390 * a cell if not previously written, and we check all branches to be sure
391 * a malicious user doesn't try to abuse us.
393 static int check_load_and_stores(struct sock_filter *filter, int flen)
395 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
398 BUILD_BUG_ON(BPF_MEMWORDS > 16);
399 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
402 memset(masks, 0xff, flen * sizeof(*masks));
404 for (pc = 0; pc < flen; pc++) {
405 memvalid &= masks[pc];
407 switch (filter[pc].code) {
410 memvalid |= (1 << filter[pc].k);
414 if (!(memvalid & (1 << filter[pc].k))) {
420 /* a jump must set masks on target */
421 masks[pc + 1 + filter[pc].k] &= memvalid;
424 case BPF_S_JMP_JEQ_K:
425 case BPF_S_JMP_JEQ_X:
426 case BPF_S_JMP_JGE_K:
427 case BPF_S_JMP_JGE_X:
428 case BPF_S_JMP_JGT_K:
429 case BPF_S_JMP_JGT_X:
430 case BPF_S_JMP_JSET_X:
431 case BPF_S_JMP_JSET_K:
432 /* a jump must set masks on targets */
433 masks[pc + 1 + filter[pc].jt] &= memvalid;
434 masks[pc + 1 + filter[pc].jf] &= memvalid;
445 * sk_chk_filter - verify socket filter code
446 * @filter: filter to verify
447 * @flen: length of filter
449 * Check the user's filter code. If we let some ugly
450 * filter code slip through kaboom! The filter must contain
451 * no references or jumps that are out of range, no illegal
452 * instructions, and must end with a RET instruction.
454 * All jumps are forward as they are not signed.
456 * Returns 0 if the rule set is legal or -EINVAL if not.
458 int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
461 * Valid instructions are initialized to non-0.
462 * Invalid instructions are initialized to 0.
464 static const u8 codes[] = {
465 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
466 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
467 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
468 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
469 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
470 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
471 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
472 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
473 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
474 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
475 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
476 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
477 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
478 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
479 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
480 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
481 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
482 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
483 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
484 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
485 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
486 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
487 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
488 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
489 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
490 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
491 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
492 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
493 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
494 [BPF_RET|BPF_K] = BPF_S_RET_K,
495 [BPF_RET|BPF_A] = BPF_S_RET_A,
496 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
497 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
498 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
500 [BPF_STX] = BPF_S_STX,
501 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
502 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
503 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
504 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
505 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
506 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
507 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
508 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
509 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
513 if (flen == 0 || flen > BPF_MAXINSNS)
516 /* check the filter code now */
517 for (pc = 0; pc < flen; pc++) {
518 struct sock_filter *ftest = &filter[pc];
519 u16 code = ftest->code;
521 if (code >= ARRAY_SIZE(codes))
526 /* Some instructions need special checks */
528 case BPF_S_ALU_DIV_K:
529 /* check for division by zero */
532 ftest->k = reciprocal_value(ftest->k);
538 /* check for invalid memory addresses */
539 if (ftest->k >= BPF_MEMWORDS)
544 * Note, the large ftest->k might cause loops.
545 * Compare this with conditional jumps below,
546 * where offsets are limited. --ANK (981016)
548 if (ftest->k >= (unsigned int)(flen-pc-1))
551 case BPF_S_JMP_JEQ_K:
552 case BPF_S_JMP_JEQ_X:
553 case BPF_S_JMP_JGE_K:
554 case BPF_S_JMP_JGE_X:
555 case BPF_S_JMP_JGT_K:
556 case BPF_S_JMP_JGT_X:
557 case BPF_S_JMP_JSET_X:
558 case BPF_S_JMP_JSET_K:
559 /* for conditionals both must be safe */
560 if (pc + ftest->jt + 1 >= flen ||
561 pc + ftest->jf + 1 >= flen)
567 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
568 code = BPF_S_ANC_##CODE; \
575 ANCILLARY(NLATTR_NEST);
581 ANCILLARY(ALU_XOR_X);
587 /* last instruction must be a RET code */
588 switch (filter[flen - 1].code) {
591 return check_load_and_stores(filter, flen);
595 EXPORT_SYMBOL(sk_chk_filter);
598 * sk_filter_release_rcu - Release a socket filter by rcu_head
599 * @rcu: rcu_head that contains the sk_filter to free
601 void sk_filter_release_rcu(struct rcu_head *rcu)
603 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
608 EXPORT_SYMBOL(sk_filter_release_rcu);
610 static int __sk_prepare_filter(struct sk_filter *fp)
614 fp->bpf_func = sk_run_filter;
616 err = sk_chk_filter(fp->insns, fp->len);
625 * sk_unattached_filter_create - create an unattached filter
626 * @fprog: the filter program
627 * @pfp: the unattached filter that is created
629 * Create a filter independent of any socket. We first run some
630 * sanity checks on it to make sure it does not explode on us later.
631 * If an error occurs or there is insufficient memory for the filter
632 * a negative errno code is returned. On success the return is zero.
634 int sk_unattached_filter_create(struct sk_filter **pfp,
635 struct sock_fprog *fprog)
637 struct sk_filter *fp;
638 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
641 /* Make sure new filter is there and in the right amounts. */
642 if (fprog->filter == NULL)
645 fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
648 memcpy(fp->insns, fprog->filter, fsize);
650 atomic_set(&fp->refcnt, 1);
651 fp->len = fprog->len;
653 err = __sk_prepare_filter(fp);
663 EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
665 void sk_unattached_filter_destroy(struct sk_filter *fp)
667 sk_filter_release(fp);
669 EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
672 * sk_attach_filter - attach a socket filter
673 * @fprog: the filter program
674 * @sk: the socket to use
676 * Attach the user's filter code. We first run some sanity checks on
677 * it to make sure it does not explode on us later. If an error
678 * occurs or there is insufficient memory for the filter a negative
679 * errno code is returned. On success the return is zero.
681 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
683 struct sk_filter *fp, *old_fp;
684 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
687 /* Make sure new filter is there and in the right amounts. */
688 if (fprog->filter == NULL)
691 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
694 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
695 sock_kfree_s(sk, fp, fsize+sizeof(*fp));
699 atomic_set(&fp->refcnt, 1);
700 fp->len = fprog->len;
702 err = __sk_prepare_filter(fp);
704 sk_filter_uncharge(sk, fp);
708 old_fp = rcu_dereference_protected(sk->sk_filter,
709 sock_owned_by_user(sk));
710 rcu_assign_pointer(sk->sk_filter, fp);
713 sk_filter_uncharge(sk, old_fp);
716 EXPORT_SYMBOL_GPL(sk_attach_filter);
718 int sk_detach_filter(struct sock *sk)
721 struct sk_filter *filter;
723 filter = rcu_dereference_protected(sk->sk_filter,
724 sock_owned_by_user(sk));
726 RCU_INIT_POINTER(sk->sk_filter, NULL);
727 sk_filter_uncharge(sk, filter);
732 EXPORT_SYMBOL_GPL(sk_detach_filter);