2 * net/sched/sch_tbf.c Token Bucket Filter queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <net/netlink.h>
22 #include <net/sch_generic.h>
23 #include <net/pkt_sched.h>
27 /* Simple Token Bucket Filter.
28 =======================================
38 A data flow obeys TBF with rate R and depth B, if for any
39 time interval t_i...t_f the number of transmitted bits
40 does not exceed B + R*(t_f-t_i).
42 Packetized version of this definition:
43 The sequence of packets of sizes s_i served at moments t_i
44 obeys TBF, if for any i<=k:
46 s_i+....+s_k <= B + R*(t_k - t_i)
51 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
53 N(t+delta) = min{B/R, N(t) + delta}
55 If the first packet in queue has length S, it may be
56 transmitted only at the time t_* when S/R <= N(t_*),
57 and in this case N(t) jumps:
59 N(t_* + 0) = N(t_* - 0) - S/R.
63 Actually, QoS requires two TBF to be applied to a data stream.
64 One of them controls steady state burst size, another
65 one with rate P (peak rate) and depth M (equal to link MTU)
66 limits bursts at a smaller time scale.
68 It is easy to see that P>R, and B>M. If P is infinity, this double
69 TBF is equivalent to a single one.
71 When TBF works in reshaping mode, latency is estimated as:
73 lat = max ((L-B)/R, (L-M)/P)
79 If TBF throttles, it starts a watchdog timer, which will wake it up
80 when it is ready to transmit.
81 Note that the minimal timer resolution is 1/HZ.
82 If no new packets arrive during this period,
83 or if the device is not awaken by EOI for some previous packet,
84 TBF can stop its activity for 1/HZ.
87 This means, that with depth B, the maximal rate is
91 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
93 Note that the peak rate TBF is much more tough: with MTU 1500
94 P_crit = 150Kbytes/sec. So, if you need greater peak
95 rates, use alpha with HZ=1000 :-)
97 With classful TBF, limit is just kept for backwards compatibility.
98 It is passed to the default bfifo qdisc - if the inner qdisc is
99 changed the limit is not effective anymore.
102 struct tbf_sched_data {
104 u32 limit; /* Maximal length of backlog: bytes */
105 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
108 struct psched_ratecfg rate;
109 struct psched_ratecfg peak;
113 s64 tokens; /* Current number of B tokens */
114 s64 ptokens; /* Current number of P tokens */
115 s64 t_c; /* Time check-point */
116 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
117 struct qdisc_watchdog watchdog; /* Watchdog timer */
122 * Return length of individual segments of a gso packet,
123 * including all headers (MAC, IP, TCP/UDP)
125 static unsigned int skb_gso_seglen(const struct sk_buff *skb)
127 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
128 const struct skb_shared_info *shinfo = skb_shinfo(skb);
130 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
131 hdr_len += tcp_hdrlen(skb);
133 hdr_len += sizeof(struct udphdr);
134 return hdr_len + shinfo->gso_size;
137 /* GSO packet is too big, segment it so that tbf can transmit
138 * each segment in time
140 static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
142 struct tbf_sched_data *q = qdisc_priv(sch);
143 struct sk_buff *segs, *nskb;
144 netdev_features_t features = netif_skb_features(skb);
147 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
149 if (IS_ERR_OR_NULL(segs))
150 return qdisc_reshape_fail(skb, sch);
156 qdisc_skb_cb(segs)->pkt_len = segs->len;
157 ret = qdisc_enqueue(segs, q->qdisc);
158 if (ret != NET_XMIT_SUCCESS) {
159 if (net_xmit_drop_count(ret))
168 qdisc_tree_decrease_qlen(sch, 1 - nb);
170 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
173 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
175 struct tbf_sched_data *q = qdisc_priv(sch);
178 if (qdisc_pkt_len(skb) > q->max_size) {
179 if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size)
180 return tbf_segment(skb, sch);
181 return qdisc_reshape_fail(skb, sch);
183 ret = qdisc_enqueue(skb, q->qdisc);
184 if (ret != NET_XMIT_SUCCESS) {
185 if (net_xmit_drop_count(ret))
191 return NET_XMIT_SUCCESS;
194 static unsigned int tbf_drop(struct Qdisc *sch)
196 struct tbf_sched_data *q = qdisc_priv(sch);
197 unsigned int len = 0;
199 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
206 static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
208 struct tbf_sched_data *q = qdisc_priv(sch);
211 skb = q->qdisc->ops->peek(q->qdisc);
217 unsigned int len = qdisc_pkt_len(skb);
219 now = ktime_to_ns(ktime_get());
220 toks = min_t(s64, now - q->t_c, q->buffer);
222 if (q->peak_present) {
223 ptoks = toks + q->ptokens;
226 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
229 if (toks > q->buffer)
231 toks -= (s64) psched_l2t_ns(&q->rate, len);
233 if ((toks|ptoks) >= 0) {
234 skb = qdisc_dequeue_peeked(q->qdisc);
242 qdisc_unthrottled(sch);
243 qdisc_bstats_update(sch, skb);
247 qdisc_watchdog_schedule_ns(&q->watchdog,
248 now + max_t(long, -toks, -ptoks));
250 /* Maybe we have a shorter packet in the queue,
251 which can be sent now. It sounds cool,
252 but, however, this is wrong in principle.
253 We MUST NOT reorder packets under these circumstances.
255 Really, if we split the flow into independent
256 subflows, it would be a very good solution.
257 This is the main idea of all FQ algorithms
258 (cf. CSZ, HPFQ, HFSC)
261 sch->qstats.overlimits++;
266 static void tbf_reset(struct Qdisc *sch)
268 struct tbf_sched_data *q = qdisc_priv(sch);
270 qdisc_reset(q->qdisc);
272 q->t_c = ktime_to_ns(ktime_get());
273 q->tokens = q->buffer;
275 qdisc_watchdog_cancel(&q->watchdog);
278 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
279 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
280 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
281 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
282 [TCA_TBF_RATE64] = { .type = NLA_U64 },
283 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
286 static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
289 struct tbf_sched_data *q = qdisc_priv(sch);
290 struct nlattr *tb[TCA_TBF_MAX + 1];
291 struct tc_tbf_qopt *qopt;
292 struct qdisc_rate_table *rtab = NULL;
293 struct qdisc_rate_table *ptab = NULL;
294 struct Qdisc *child = NULL;
296 u64 rate64 = 0, prate64 = 0;
298 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
303 if (tb[TCA_TBF_PARMS] == NULL)
306 qopt = nla_data(tb[TCA_TBF_PARMS]);
307 rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]);
311 if (qopt->peakrate.rate) {
312 if (qopt->peakrate.rate > qopt->rate.rate)
313 ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]);
318 for (n = 0; n < 256; n++)
319 if (rtab->data[n] > qopt->buffer)
321 max_size = (n << qopt->rate.cell_log) - 1;
325 for (n = 0; n < 256; n++)
326 if (ptab->data[n] > qopt->mtu)
328 size = (n << qopt->peakrate.cell_log) - 1;
335 if (max_size < psched_mtu(qdisc_dev(sch)))
336 pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n",
337 max_size, qdisc_dev(sch)->name,
338 psched_mtu(qdisc_dev(sch)));
340 if (q->qdisc != &noop_qdisc) {
341 err = fifo_set_limit(q->qdisc, qopt->limit);
344 } else if (qopt->limit > 0) {
345 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
347 err = PTR_ERR(child);
354 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
355 qdisc_destroy(q->qdisc);
358 q->limit = qopt->limit;
359 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
360 q->max_size = max_size;
361 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
362 q->tokens = q->buffer;
365 if (tb[TCA_TBF_RATE64])
366 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
367 psched_ratecfg_precompute(&q->rate, &rtab->rate, rate64);
369 if (tb[TCA_TBF_PRATE64])
370 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
371 psched_ratecfg_precompute(&q->peak, &ptab->rate, prate64);
372 q->peak_present = true;
374 q->peak_present = false;
377 sch_tree_unlock(sch);
381 qdisc_put_rtab(rtab);
383 qdisc_put_rtab(ptab);
387 static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
389 struct tbf_sched_data *q = qdisc_priv(sch);
394 q->t_c = ktime_to_ns(ktime_get());
395 qdisc_watchdog_init(&q->watchdog, sch);
396 q->qdisc = &noop_qdisc;
398 return tbf_change(sch, opt);
401 static void tbf_destroy(struct Qdisc *sch)
403 struct tbf_sched_data *q = qdisc_priv(sch);
405 qdisc_watchdog_cancel(&q->watchdog);
406 qdisc_destroy(q->qdisc);
409 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
411 struct tbf_sched_data *q = qdisc_priv(sch);
413 struct tc_tbf_qopt opt;
415 sch->qstats.backlog = q->qdisc->qstats.backlog;
416 nest = nla_nest_start(skb, TCA_OPTIONS);
418 goto nla_put_failure;
420 opt.limit = q->limit;
421 psched_ratecfg_getrate(&opt.rate, &q->rate);
423 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
425 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
426 opt.mtu = PSCHED_NS2TICKS(q->mtu);
427 opt.buffer = PSCHED_NS2TICKS(q->buffer);
428 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
429 goto nla_put_failure;
430 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
431 nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps))
432 goto nla_put_failure;
433 if (q->peak_present &&
434 q->peak.rate_bytes_ps >= (1ULL << 32) &&
435 nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps))
436 goto nla_put_failure;
438 nla_nest_end(skb, nest);
442 nla_nest_cancel(skb, nest);
446 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
447 struct sk_buff *skb, struct tcmsg *tcm)
449 struct tbf_sched_data *q = qdisc_priv(sch);
451 tcm->tcm_handle |= TC_H_MIN(1);
452 tcm->tcm_info = q->qdisc->handle;
457 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
460 struct tbf_sched_data *q = qdisc_priv(sch);
468 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
470 sch_tree_unlock(sch);
475 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
477 struct tbf_sched_data *q = qdisc_priv(sch);
481 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
486 static void tbf_put(struct Qdisc *sch, unsigned long arg)
490 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
493 if (walker->count >= walker->skip)
494 if (walker->fn(sch, 1, walker) < 0) {
502 static const struct Qdisc_class_ops tbf_class_ops = {
508 .dump = tbf_dump_class,
511 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
513 .cl_ops = &tbf_class_ops,
515 .priv_size = sizeof(struct tbf_sched_data),
516 .enqueue = tbf_enqueue,
517 .dequeue = tbf_dequeue,
518 .peek = qdisc_peek_dequeued,
522 .destroy = tbf_destroy,
523 .change = tbf_change,
525 .owner = THIS_MODULE,
528 static int __init tbf_module_init(void)
530 return register_qdisc(&tbf_qdisc_ops);
533 static void __exit tbf_module_exit(void)
535 unregister_qdisc(&tbf_qdisc_ops);
537 module_init(tbf_module_init)
538 module_exit(tbf_module_exit)
539 MODULE_LICENSE("GPL");