2 * Fair Queue CoDel discipline
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/jhash.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/flow_keys.h>
27 #include <net/codel.h>
32 * Packets are classified (internal classifier or external) on flows.
33 * This is a Stochastic model (as we use a hash, several flows
34 * might be hashed on same slot)
35 * Each flow has a CoDel managed queue.
36 * Flows are linked onto two (Round Robin) lists,
37 * so that new flows have priority on old ones.
39 * For a given flow, packets are not reordered (CoDel uses a FIFO)
41 * ECN capability is on by default.
42 * Low memory footprint (64 bytes per flow)
45 struct fq_codel_flow {
48 struct list_head flowchain;
50 u32 dropped; /* number of drops (or ECN marks) on this flow */
51 struct codel_vars cvars;
52 }; /* please try to keep this structure <= 64 bytes */
54 struct fq_codel_sched_data {
55 struct tcf_proto *filter_list; /* optional external classifier */
56 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
57 u32 *backlogs; /* backlog table [flows_cnt] */
58 u32 flows_cnt; /* number of flows */
59 u32 perturbation; /* hash perturbation */
60 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
61 struct codel_params cparams;
62 struct codel_stats cstats;
66 struct list_head new_flows; /* list of new flows */
67 struct list_head old_flows; /* list of old flows */
70 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
71 const struct sk_buff *skb)
73 struct flow_keys keys;
76 skb_flow_dissect(skb, &keys);
77 hash = jhash_3words((__force u32)keys.dst,
78 (__force u32)keys.src ^ keys.ip_proto,
79 (__force u32)keys.ports, q->perturbation);
81 return reciprocal_scale(hash, q->flows_cnt);
84 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
87 struct fq_codel_sched_data *q = qdisc_priv(sch);
88 struct tcf_result res;
91 if (TC_H_MAJ(skb->priority) == sch->handle &&
92 TC_H_MIN(skb->priority) > 0 &&
93 TC_H_MIN(skb->priority) <= q->flows_cnt)
94 return TC_H_MIN(skb->priority);
97 return fq_codel_hash(q, skb) + 1;
99 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
100 result = tc_classify(skb, q->filter_list, &res);
102 #ifdef CONFIG_NET_CLS_ACT
106 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
111 if (TC_H_MIN(res.classid) <= q->flows_cnt)
112 return TC_H_MIN(res.classid);
117 /* helper functions : might be changed when/if skb use a standard list_head */
119 /* remove one skb from head of slot queue */
120 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
122 struct sk_buff *skb = flow->head;
124 flow->head = skb->next;
129 /* add skb to flow queue (tail add) */
130 static inline void flow_queue_add(struct fq_codel_flow *flow,
133 if (flow->head == NULL)
136 flow->tail->next = skb;
141 static unsigned int fq_codel_drop(struct Qdisc *sch)
143 struct fq_codel_sched_data *q = qdisc_priv(sch);
145 unsigned int maxbacklog = 0, idx = 0, i, len;
146 struct fq_codel_flow *flow;
148 /* Queue is full! Find the fat flow and drop packet from it.
149 * This might sound expensive, but with 1024 flows, we scan
150 * 4KB of memory, and we dont need to handle a complex tree
151 * in fast path (packet queue/enqueue) with many cache misses.
153 for (i = 0; i < q->flows_cnt; i++) {
154 if (q->backlogs[i] > maxbacklog) {
155 maxbacklog = q->backlogs[i];
159 flow = &q->flows[idx];
160 skb = dequeue_head(flow);
161 len = qdisc_pkt_len(skb);
162 q->backlogs[idx] -= len;
166 sch->qstats.backlog -= len;
171 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
173 struct fq_codel_sched_data *q = qdisc_priv(sch);
175 struct fq_codel_flow *flow;
176 int uninitialized_var(ret);
178 idx = fq_codel_classify(skb, sch, &ret);
180 if (ret & __NET_XMIT_BYPASS)
187 codel_set_enqueue_time(skb);
188 flow = &q->flows[idx];
189 flow_queue_add(flow, skb);
190 q->backlogs[idx] += qdisc_pkt_len(skb);
191 sch->qstats.backlog += qdisc_pkt_len(skb);
193 if (list_empty(&flow->flowchain)) {
194 list_add_tail(&flow->flowchain, &q->new_flows);
196 flow->deficit = q->quantum;
199 if (++sch->q.qlen <= sch->limit)
200 return NET_XMIT_SUCCESS;
203 /* Return Congestion Notification only if we dropped a packet
206 if (fq_codel_drop(sch) == idx)
209 /* As we dropped a packet, better let upper stack know this */
210 qdisc_tree_decrease_qlen(sch, 1);
211 return NET_XMIT_SUCCESS;
214 /* This is the specific function called from codel_dequeue()
215 * to dequeue a packet from queue. Note: backlog is handled in
216 * codel, we dont need to reduce it here.
218 static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
220 struct fq_codel_sched_data *q = qdisc_priv(sch);
221 struct fq_codel_flow *flow;
222 struct sk_buff *skb = NULL;
224 flow = container_of(vars, struct fq_codel_flow, cvars);
226 skb = dequeue_head(flow);
227 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
233 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
235 struct fq_codel_sched_data *q = qdisc_priv(sch);
237 struct fq_codel_flow *flow;
238 struct list_head *head;
239 u32 prev_drop_count, prev_ecn_mark;
242 head = &q->new_flows;
243 if (list_empty(head)) {
244 head = &q->old_flows;
245 if (list_empty(head))
248 flow = list_first_entry(head, struct fq_codel_flow, flowchain);
250 if (flow->deficit <= 0) {
251 flow->deficit += q->quantum;
252 list_move_tail(&flow->flowchain, &q->old_flows);
256 prev_drop_count = q->cstats.drop_count;
257 prev_ecn_mark = q->cstats.ecn_mark;
259 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
262 flow->dropped += q->cstats.drop_count - prev_drop_count;
263 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
266 /* force a pass through old_flows to prevent starvation */
267 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
268 list_move_tail(&flow->flowchain, &q->old_flows);
270 list_del_init(&flow->flowchain);
273 qdisc_bstats_update(sch, skb);
274 flow->deficit -= qdisc_pkt_len(skb);
275 /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
276 * or HTB crashes. Defer it for next round.
278 if (q->cstats.drop_count && sch->q.qlen) {
279 qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
280 q->cstats.drop_count = 0;
285 static void fq_codel_reset(struct Qdisc *sch)
289 while ((skb = fq_codel_dequeue(sch)) != NULL)
293 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
294 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
295 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
296 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
297 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
298 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
299 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
302 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
304 struct fq_codel_sched_data *q = qdisc_priv(sch);
305 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
311 err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
314 if (tb[TCA_FQ_CODEL_FLOWS]) {
317 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
319 q->flows_cnt > 65536)
324 if (tb[TCA_FQ_CODEL_TARGET]) {
325 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
327 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
330 if (tb[TCA_FQ_CODEL_INTERVAL]) {
331 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
333 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
336 if (tb[TCA_FQ_CODEL_LIMIT])
337 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
339 if (tb[TCA_FQ_CODEL_ECN])
340 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
342 if (tb[TCA_FQ_CODEL_QUANTUM])
343 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
345 while (sch->q.qlen > sch->limit) {
346 struct sk_buff *skb = fq_codel_dequeue(sch);
349 q->cstats.drop_count++;
351 qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
352 q->cstats.drop_count = 0;
354 sch_tree_unlock(sch);
358 static void *fq_codel_zalloc(size_t sz)
360 void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
367 static void fq_codel_free(void *addr)
372 static void fq_codel_destroy(struct Qdisc *sch)
374 struct fq_codel_sched_data *q = qdisc_priv(sch);
376 tcf_destroy_chain(&q->filter_list);
377 fq_codel_free(q->backlogs);
378 fq_codel_free(q->flows);
381 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
383 struct fq_codel_sched_data *q = qdisc_priv(sch);
386 sch->limit = 10*1024;
388 q->quantum = psched_mtu(qdisc_dev(sch));
389 q->perturbation = prandom_u32();
390 INIT_LIST_HEAD(&q->new_flows);
391 INIT_LIST_HEAD(&q->old_flows);
392 codel_params_init(&q->cparams);
393 codel_stats_init(&q->cstats);
394 q->cparams.ecn = true;
397 int err = fq_codel_change(sch, opt);
403 q->flows = fq_codel_zalloc(q->flows_cnt *
404 sizeof(struct fq_codel_flow));
407 q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
409 fq_codel_free(q->flows);
412 for (i = 0; i < q->flows_cnt; i++) {
413 struct fq_codel_flow *flow = q->flows + i;
415 INIT_LIST_HEAD(&flow->flowchain);
416 codel_vars_init(&flow->cvars);
420 sch->flags |= TCQ_F_CAN_BYPASS;
422 sch->flags &= ~TCQ_F_CAN_BYPASS;
426 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
428 struct fq_codel_sched_data *q = qdisc_priv(sch);
431 opts = nla_nest_start(skb, TCA_OPTIONS);
433 goto nla_put_failure;
435 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
436 codel_time_to_us(q->cparams.target)) ||
437 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
439 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
440 codel_time_to_us(q->cparams.interval)) ||
441 nla_put_u32(skb, TCA_FQ_CODEL_ECN,
443 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
445 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
447 goto nla_put_failure;
449 return nla_nest_end(skb, opts);
455 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
457 struct fq_codel_sched_data *q = qdisc_priv(sch);
458 struct tc_fq_codel_xstats st = {
459 .type = TCA_FQ_CODEL_XSTATS_QDISC,
461 struct list_head *pos;
463 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
464 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
465 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
466 st.qdisc_stats.new_flow_count = q->new_flow_count;
468 list_for_each(pos, &q->new_flows)
469 st.qdisc_stats.new_flows_len++;
471 list_for_each(pos, &q->old_flows)
472 st.qdisc_stats.old_flows_len++;
474 return gnet_stats_copy_app(d, &st, sizeof(st));
477 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
482 static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
487 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
490 /* we cannot bypass queue discipline anymore */
491 sch->flags &= ~TCQ_F_CAN_BYPASS;
495 static void fq_codel_put(struct Qdisc *q, unsigned long cl)
499 static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl)
501 struct fq_codel_sched_data *q = qdisc_priv(sch);
505 return &q->filter_list;
508 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
509 struct sk_buff *skb, struct tcmsg *tcm)
511 tcm->tcm_handle |= TC_H_MIN(cl);
515 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
518 struct fq_codel_sched_data *q = qdisc_priv(sch);
520 struct gnet_stats_queue qs = { 0 };
521 struct tc_fq_codel_xstats xstats;
523 if (idx < q->flows_cnt) {
524 const struct fq_codel_flow *flow = &q->flows[idx];
525 const struct sk_buff *skb = flow->head;
527 memset(&xstats, 0, sizeof(xstats));
528 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
529 xstats.class_stats.deficit = flow->deficit;
530 xstats.class_stats.ldelay =
531 codel_time_to_us(flow->cvars.ldelay);
532 xstats.class_stats.count = flow->cvars.count;
533 xstats.class_stats.lastcount = flow->cvars.lastcount;
534 xstats.class_stats.dropping = flow->cvars.dropping;
535 if (flow->cvars.dropping) {
536 codel_tdiff_t delta = flow->cvars.drop_next -
539 xstats.class_stats.drop_next = (delta >= 0) ?
540 codel_time_to_us(delta) :
541 -codel_time_to_us(-delta);
547 qs.backlog = q->backlogs[idx];
548 qs.drops = flow->dropped;
550 if (gnet_stats_copy_queue(d, &qs) < 0)
552 if (idx < q->flows_cnt)
553 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
557 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
559 struct fq_codel_sched_data *q = qdisc_priv(sch);
565 for (i = 0; i < q->flows_cnt; i++) {
566 if (list_empty(&q->flows[i].flowchain) ||
567 arg->count < arg->skip) {
571 if (arg->fn(sch, i + 1, arg) < 0) {
579 static const struct Qdisc_class_ops fq_codel_class_ops = {
580 .leaf = fq_codel_leaf,
583 .tcf_chain = fq_codel_find_tcf,
584 .bind_tcf = fq_codel_bind,
585 .unbind_tcf = fq_codel_put,
586 .dump = fq_codel_dump_class,
587 .dump_stats = fq_codel_dump_class_stats,
588 .walk = fq_codel_walk,
591 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
592 .cl_ops = &fq_codel_class_ops,
594 .priv_size = sizeof(struct fq_codel_sched_data),
595 .enqueue = fq_codel_enqueue,
596 .dequeue = fq_codel_dequeue,
597 .peek = qdisc_peek_dequeued,
598 .drop = fq_codel_drop,
599 .init = fq_codel_init,
600 .reset = fq_codel_reset,
601 .destroy = fq_codel_destroy,
602 .change = fq_codel_change,
603 .dump = fq_codel_dump,
604 .dump_stats = fq_codel_dump_stats,
605 .owner = THIS_MODULE,
608 static int __init fq_codel_module_init(void)
610 return register_qdisc(&fq_codel_qdisc_ops);
613 static void __exit fq_codel_module_exit(void)
615 unregister_qdisc(&fq_codel_qdisc_ops);
618 module_init(fq_codel_module_init)
619 module_exit(fq_codel_module_exit)
620 MODULE_AUTHOR("Eric Dumazet");
621 MODULE_LICENSE("GPL");