2 * Berkeley Packet Filter based traffic classifier
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <net/rtnetlink.h>
20 #include <net/pkt_cls.h>
23 MODULE_LICENSE("GPL");
24 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
25 MODULE_DESCRIPTION("TC BPF based classifier");
28 struct list_head plist;
34 struct bpf_prog *filter;
35 struct sock_filter *bpf_ops;
37 struct tcf_result res;
38 struct list_head link;
45 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
46 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
47 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
48 [TCA_BPF_OPS] = { .type = NLA_BINARY,
49 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
52 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
53 struct tcf_result *res)
55 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
56 struct cls_bpf_prog *prog;
59 list_for_each_entry_rcu(prog, &head->plist, link) {
60 int filter_res = BPF_PROG_RUN(prog->filter, skb);
67 res->classid = filter_res;
69 ret = tcf_exts_exec(skb, &prog->exts, res);
79 static int cls_bpf_init(struct tcf_proto *tp)
81 struct cls_bpf_head *head;
83 head = kzalloc(sizeof(*head), GFP_KERNEL);
87 INIT_LIST_HEAD_RCU(&head->plist);
88 rcu_assign_pointer(tp->root, head);
93 static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
95 tcf_exts_destroy(&prog->exts);
97 bpf_prog_destroy(prog->filter);
103 static void __cls_bpf_delete_prog(struct rcu_head *rcu)
105 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
107 cls_bpf_delete_prog(prog->tp, prog);
110 static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
112 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
114 list_del_rcu(&prog->link);
115 tcf_unbind_filter(tp, &prog->res);
116 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
120 static void cls_bpf_destroy(struct tcf_proto *tp)
122 struct cls_bpf_head *head = rtnl_dereference(tp->root);
123 struct cls_bpf_prog *prog, *tmp;
125 list_for_each_entry_safe(prog, tmp, &head->plist, link) {
126 list_del_rcu(&prog->link);
127 tcf_unbind_filter(tp, &prog->res);
128 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
131 RCU_INIT_POINTER(tp->root, NULL);
132 kfree_rcu(head, rcu);
135 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
137 struct cls_bpf_head *head = rtnl_dereference(tp->root);
138 struct cls_bpf_prog *prog;
139 unsigned long ret = 0UL;
144 list_for_each_entry(prog, &head->plist, link) {
145 if (prog->handle == handle) {
146 ret = (unsigned long) prog;
154 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
155 struct cls_bpf_prog *prog,
156 unsigned long base, struct nlattr **tb,
157 struct nlattr *est, bool ovr)
159 struct sock_filter *bpf_ops;
160 struct tcf_exts exts;
161 struct sock_fprog_kern tmp;
163 u16 bpf_size, bpf_len;
167 if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID])
170 tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
171 ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
175 classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
176 bpf_len = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
177 if (bpf_len > BPF_MAXINSNS || bpf_len == 0) {
182 bpf_size = bpf_len * sizeof(*bpf_ops);
183 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
184 if (bpf_ops == NULL) {
189 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
192 tmp.filter = bpf_ops;
194 ret = bpf_prog_create(&fp, &tmp);
198 prog->bpf_len = bpf_len;
199 prog->bpf_ops = bpf_ops;
201 prog->res.classid = classid;
203 tcf_bind_filter(tp, &prog->res, base);
204 tcf_exts_change(tp, &prog->exts, &exts);
210 tcf_exts_destroy(&exts);
214 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
215 struct cls_bpf_head *head)
217 unsigned int i = 0x80000000;
220 if (++head->hgen == 0x7FFFFFFF)
222 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
224 pr_err("Insufficient number of handles\n");
229 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
230 struct tcf_proto *tp, unsigned long base,
231 u32 handle, struct nlattr **tca,
232 unsigned long *arg, bool ovr)
234 struct cls_bpf_head *head = rtnl_dereference(tp->root);
235 struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
236 struct nlattr *tb[TCA_BPF_MAX + 1];
237 struct cls_bpf_prog *prog;
240 if (tca[TCA_OPTIONS] == NULL)
243 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
247 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
251 tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
254 if (handle && oldprog->handle != handle) {
261 prog->handle = cls_bpf_grab_new_handle(tp, head);
263 prog->handle = handle;
264 if (prog->handle == 0) {
269 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
274 list_replace_rcu(&prog->link, &oldprog->link);
275 tcf_unbind_filter(tp, &oldprog->res);
276 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
278 list_add_rcu(&prog->link, &head->plist);
281 *arg = (unsigned long) prog;
289 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
290 struct sk_buff *skb, struct tcmsg *tm)
292 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
293 struct nlattr *nest, *nla;
298 tm->tcm_handle = prog->handle;
300 nest = nla_nest_start(skb, TCA_OPTIONS);
302 goto nla_put_failure;
304 if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
305 goto nla_put_failure;
306 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_len))
307 goto nla_put_failure;
309 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_len *
310 sizeof(struct sock_filter));
312 goto nla_put_failure;
314 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
316 if (tcf_exts_dump(skb, &prog->exts) < 0)
317 goto nla_put_failure;
319 nla_nest_end(skb, nest);
321 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
322 goto nla_put_failure;
327 nla_nest_cancel(skb, nest);
331 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
333 struct cls_bpf_head *head = rtnl_dereference(tp->root);
334 struct cls_bpf_prog *prog;
336 list_for_each_entry(prog, &head->plist, link) {
337 if (arg->count < arg->skip)
339 if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
348 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
350 .owner = THIS_MODULE,
351 .classify = cls_bpf_classify,
352 .init = cls_bpf_init,
353 .destroy = cls_bpf_destroy,
355 .change = cls_bpf_change,
356 .delete = cls_bpf_delete,
357 .walk = cls_bpf_walk,
358 .dump = cls_bpf_dump,
361 static int __init cls_bpf_init_mod(void)
363 return register_tcf_proto_ops(&cls_bpf_ops);
366 static void __exit cls_bpf_exit_mod(void)
368 unregister_tcf_proto_ops(&cls_bpf_ops);
371 module_init(cls_bpf_init_mod);
372 module_exit(cls_bpf_exit_mod);