rcu_assign_pointer(*walk, rtnl_dereference(f->next));
}
tcf_unbind_filter(tp, &r->res);
- tcf_exts_destroy(tp, &r->exts);
+ tcf_exts_destroy(&r->exts);
if (f)
kfree_rcu(f, rcu);
return 0;
if (err < 0)
return err;
+ err = -ENOMEM;
/* tcindex_data attributes must look atomic to classifier/lookup so
* allocate new tcindex data and RCU assign it onto root. Keeping
* perfect hash and hash pointers from old data.
*/
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
- if (!cp) {
- err = -ENOMEM;
+ if (!cp)
goto errout;
- }
cp->mask = p->mask;
cp->shift = p->shift;
cp->tp = tp;
if (p->perfect) {
+ int i;
+
cp->perfect = kmemdup(p->perfect,
sizeof(*r) * cp->hash, GFP_KERNEL);
if (!cp->perfect)
goto errout;
+ for (i = 0; i < cp->hash; i++)
+ tcf_exts_init(&cp->perfect[i].exts,
+ TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
balloc = 1;
}
cp->h = p->h;
cp->hash = DEFAULT_HASH_SIZE;
}
- if (!cp->perfect && cp->h)
+ if (!cp->perfect && !cp->h)
cp->alloc_hash = cp->hash;
/* Note: this could be as restrictive as if (handle & ~(mask >> shift))
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
goto errout_alloc;
+ f->key = handle;
+ tcindex_filter_result_init(&f->result);
+ f->next = NULL;
}
if (tb[TCA_TCINDEX_CLASSID]) {
struct tcindex_filter *nfp;
struct tcindex_filter __rcu **fp;
- f->key = handle;
- f->result = new_filter_result;
- f->next = NULL;
+ tcf_exts_change(tp, &f->result.exts, &r->exts);
fp = cp->h + (handle % cp->hash);
for (nfp = rtnl_dereference(*fp);
kfree(cp->h);
errout:
kfree(cp);
- tcf_exts_destroy(tp, &e);
+ tcf_exts_destroy(&e);
return err;
}