2 * net/sched/sch_tbf.c Token Bucket Filter queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <net/netlink.h>
22 #include <net/sch_generic.h>
23 #include <net/pkt_sched.h>
27 /* Simple Token Bucket Filter.
28 =======================================
38 A data flow obeys TBF with rate R and depth B, if for any
39 time interval t_i...t_f the number of transmitted bits
40 does not exceed B + R*(t_f-t_i).
42 Packetized version of this definition:
43 The sequence of packets of sizes s_i served at moments t_i
44 obeys TBF, if for any i<=k:
46 s_i+....+s_k <= B + R*(t_k - t_i)
51 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
53 N(t+delta) = min{B/R, N(t) + delta}
55 If the first packet in queue has length S, it may be
56 transmitted only at the time t_* when S/R <= N(t_*),
57 and in this case N(t) jumps:
59 N(t_* + 0) = N(t_* - 0) - S/R.
63 Actually, QoS requires two TBF to be applied to a data stream.
64 One of them controls steady state burst size, another
65 one with rate P (peak rate) and depth M (equal to link MTU)
66 limits bursts at a smaller time scale.
68 It is easy to see that P>R, and B>M. If P is infinity, this double
69 TBF is equivalent to a single one.
71 When TBF works in reshaping mode, latency is estimated as:
73 lat = max ((L-B)/R, (L-M)/P)
79 If TBF throttles, it starts a watchdog timer, which will wake it up
80 when it is ready to transmit.
81 Note that the minimal timer resolution is 1/HZ.
82 If no new packets arrive during this period,
83 or if the device is not awaken by EOI for some previous packet,
84 TBF can stop its activity for 1/HZ.
87 This means, that with depth B, the maximal rate is
91 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
93 Note that the peak rate TBF is much more tough: with MTU 1500
94 P_crit = 150Kbytes/sec. So, if you need greater peak
95 rates, use alpha with HZ=1000 :-)
97 With classful TBF, limit is just kept for backwards compatibility.
98 It is passed to the default bfifo qdisc - if the inner qdisc is
99 changed the limit is not effective anymore.
102 struct tbf_sched_data {
104 u32 limit; /* Maximal length of backlog: bytes */
105 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
108 struct psched_ratecfg rate;
109 struct psched_ratecfg peak;
113 s64 tokens; /* Current number of B tokens */
114 s64 ptokens; /* Current number of P tokens */
115 s64 t_c; /* Time check-point */
116 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
117 struct qdisc_watchdog watchdog; /* Watchdog timer */
121 /* Time to Length, convert time in ns to length in bytes
122 * to determinate how many bytes can be sent in given time.
124 static u64 psched_ns_t2l(const struct psched_ratecfg *r,
128 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
130 u64 len = time_in_ns * r->rate_bytes_ps;
132 do_div(len, NSEC_PER_SEC);
134 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
139 if (len > r->overhead)
148 * Return length of individual segments of a gso packet,
149 * including all headers (MAC, IP, TCP/UDP)
151 static unsigned int skb_gso_seglen(const struct sk_buff *skb)
153 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
154 const struct skb_shared_info *shinfo = skb_shinfo(skb);
156 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
157 hdr_len += tcp_hdrlen(skb);
159 hdr_len += sizeof(struct udphdr);
160 return hdr_len + shinfo->gso_size;
163 /* GSO packet is too big, segment it so that tbf can transmit
164 * each segment in time
166 static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
168 struct tbf_sched_data *q = qdisc_priv(sch);
169 struct sk_buff *segs, *nskb;
170 netdev_features_t features = netif_skb_features(skb);
173 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
175 if (IS_ERR_OR_NULL(segs))
176 return qdisc_reshape_fail(skb, sch);
182 qdisc_skb_cb(segs)->pkt_len = segs->len;
183 ret = qdisc_enqueue(segs, q->qdisc);
184 if (ret != NET_XMIT_SUCCESS) {
185 if (net_xmit_drop_count(ret))
194 qdisc_tree_decrease_qlen(sch, 1 - nb);
196 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
199 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
201 struct tbf_sched_data *q = qdisc_priv(sch);
204 if (qdisc_pkt_len(skb) > q->max_size) {
205 if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size)
206 return tbf_segment(skb, sch);
207 return qdisc_reshape_fail(skb, sch);
209 ret = qdisc_enqueue(skb, q->qdisc);
210 if (ret != NET_XMIT_SUCCESS) {
211 if (net_xmit_drop_count(ret))
217 return NET_XMIT_SUCCESS;
220 static unsigned int tbf_drop(struct Qdisc *sch)
222 struct tbf_sched_data *q = qdisc_priv(sch);
223 unsigned int len = 0;
225 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
232 static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
234 struct tbf_sched_data *q = qdisc_priv(sch);
237 skb = q->qdisc->ops->peek(q->qdisc);
243 unsigned int len = qdisc_pkt_len(skb);
245 now = ktime_to_ns(ktime_get());
246 toks = min_t(s64, now - q->t_c, q->buffer);
248 if (q->peak_present) {
249 ptoks = toks + q->ptokens;
252 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
255 if (toks > q->buffer)
257 toks -= (s64) psched_l2t_ns(&q->rate, len);
259 if ((toks|ptoks) >= 0) {
260 skb = qdisc_dequeue_peeked(q->qdisc);
268 qdisc_unthrottled(sch);
269 qdisc_bstats_update(sch, skb);
273 qdisc_watchdog_schedule_ns(&q->watchdog,
274 now + max_t(long, -toks, -ptoks));
276 /* Maybe we have a shorter packet in the queue,
277 which can be sent now. It sounds cool,
278 but, however, this is wrong in principle.
279 We MUST NOT reorder packets under these circumstances.
281 Really, if we split the flow into independent
282 subflows, it would be a very good solution.
283 This is the main idea of all FQ algorithms
284 (cf. CSZ, HPFQ, HFSC)
287 sch->qstats.overlimits++;
292 static void tbf_reset(struct Qdisc *sch)
294 struct tbf_sched_data *q = qdisc_priv(sch);
296 qdisc_reset(q->qdisc);
298 q->t_c = ktime_to_ns(ktime_get());
299 q->tokens = q->buffer;
301 qdisc_watchdog_cancel(&q->watchdog);
304 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
305 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
306 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
307 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
308 [TCA_TBF_RATE64] = { .type = NLA_U64 },
309 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
310 [TCA_TBF_BURST] = { .type = NLA_U32 },
311 [TCA_TBF_PBURST] = { .type = NLA_U32 },
314 static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
317 struct tbf_sched_data *q = qdisc_priv(sch);
318 struct nlattr *tb[TCA_TBF_MAX + 1];
319 struct tc_tbf_qopt *qopt;
320 struct Qdisc *child = NULL;
321 struct psched_ratecfg rate;
322 struct psched_ratecfg peak;
325 u64 rate64 = 0, prate64 = 0;
327 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
332 if (tb[TCA_TBF_PARMS] == NULL)
335 qopt = nla_data(tb[TCA_TBF_PARMS]);
336 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
337 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
340 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
341 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
344 if (q->qdisc != &noop_qdisc) {
345 err = fifo_set_limit(q->qdisc, qopt->limit);
348 } else if (qopt->limit > 0) {
349 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
351 err = PTR_ERR(child);
356 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
357 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
359 if (tb[TCA_TBF_RATE64])
360 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
361 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
363 if (tb[TCA_TBF_BURST]) {
364 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
365 buffer = psched_l2t_ns(&rate, max_size);
367 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
370 if (qopt->peakrate.rate) {
371 if (tb[TCA_TBF_PRATE64])
372 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
373 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
374 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
375 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
376 peak.rate_bytes_ps, rate.rate_bytes_ps);
381 if (tb[TCA_TBF_PBURST]) {
382 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
383 max_size = min_t(u32, max_size, pburst);
384 mtu = psched_l2t_ns(&peak, pburst);
386 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
390 if (max_size < psched_mtu(qdisc_dev(sch)))
391 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
392 max_size, qdisc_dev(sch)->name,
393 psched_mtu(qdisc_dev(sch)));
402 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
403 qdisc_destroy(q->qdisc);
406 q->limit = qopt->limit;
407 if (tb[TCA_TBF_PBURST])
410 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
411 q->max_size = max_size;
412 if (tb[TCA_TBF_BURST])
415 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
416 q->tokens = q->buffer;
419 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
420 if (qopt->peakrate.rate) {
421 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
422 q->peak_present = true;
424 q->peak_present = false;
427 sch_tree_unlock(sch);
433 static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
435 struct tbf_sched_data *q = qdisc_priv(sch);
440 q->t_c = ktime_to_ns(ktime_get());
441 qdisc_watchdog_init(&q->watchdog, sch);
442 q->qdisc = &noop_qdisc;
444 return tbf_change(sch, opt);
447 static void tbf_destroy(struct Qdisc *sch)
449 struct tbf_sched_data *q = qdisc_priv(sch);
451 qdisc_watchdog_cancel(&q->watchdog);
452 qdisc_destroy(q->qdisc);
455 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
457 struct tbf_sched_data *q = qdisc_priv(sch);
459 struct tc_tbf_qopt opt;
461 sch->qstats.backlog = q->qdisc->qstats.backlog;
462 nest = nla_nest_start(skb, TCA_OPTIONS);
464 goto nla_put_failure;
466 opt.limit = q->limit;
467 psched_ratecfg_getrate(&opt.rate, &q->rate);
469 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
471 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
472 opt.mtu = PSCHED_NS2TICKS(q->mtu);
473 opt.buffer = PSCHED_NS2TICKS(q->buffer);
474 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
475 goto nla_put_failure;
476 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
477 nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps))
478 goto nla_put_failure;
479 if (q->peak_present &&
480 q->peak.rate_bytes_ps >= (1ULL << 32) &&
481 nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps))
482 goto nla_put_failure;
484 nla_nest_end(skb, nest);
488 nla_nest_cancel(skb, nest);
492 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
493 struct sk_buff *skb, struct tcmsg *tcm)
495 struct tbf_sched_data *q = qdisc_priv(sch);
497 tcm->tcm_handle |= TC_H_MIN(1);
498 tcm->tcm_info = q->qdisc->handle;
503 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
506 struct tbf_sched_data *q = qdisc_priv(sch);
514 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
516 sch_tree_unlock(sch);
521 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
523 struct tbf_sched_data *q = qdisc_priv(sch);
527 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
532 static void tbf_put(struct Qdisc *sch, unsigned long arg)
536 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
539 if (walker->count >= walker->skip)
540 if (walker->fn(sch, 1, walker) < 0) {
548 static const struct Qdisc_class_ops tbf_class_ops = {
554 .dump = tbf_dump_class,
557 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
559 .cl_ops = &tbf_class_ops,
561 .priv_size = sizeof(struct tbf_sched_data),
562 .enqueue = tbf_enqueue,
563 .dequeue = tbf_dequeue,
564 .peek = qdisc_peek_dequeued,
568 .destroy = tbf_destroy,
569 .change = tbf_change,
571 .owner = THIS_MODULE,
574 static int __init tbf_module_init(void)
576 return register_qdisc(&tbf_qdisc_ops);
579 static void __exit tbf_module_exit(void)
581 unregister_qdisc(&tbf_qdisc_ops);
583 module_init(tbf_module_init)
584 module_exit(tbf_module_exit)
585 MODULE_LICENSE("GPL");