Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6-block.git] / net / sched / sch_tbf.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
12 *
13 */
14
1da177e4 15#include <linux/module.h>
1da177e4
LT
16#include <linux/types.h>
17#include <linux/kernel.h>
1da177e4 18#include <linux/string.h>
1da177e4 19#include <linux/errno.h>
1da177e4 20#include <linux/skbuff.h>
0ba48053 21#include <net/netlink.h>
b757c933 22#include <net/sch_generic.h>
1da177e4
LT
23#include <net/pkt_sched.h>
24
25
26/* Simple Token Bucket Filter.
27 =======================================
28
29 SOURCE.
30 -------
31
32 None.
33
34 Description.
35 ------------
36
37 A data flow obeys TBF with rate R and depth B, if for any
38 time interval t_i...t_f the number of transmitted bits
39 does not exceed B + R*(t_f-t_i).
40
41 Packetized version of this definition:
42 The sequence of packets of sizes s_i served at moments t_i
43 obeys TBF, if for any i<=k:
44
45 s_i+....+s_k <= B + R*(t_k - t_i)
46
47 Algorithm.
48 ----------
49
50 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
51
52 N(t+delta) = min{B/R, N(t) + delta}
53
54 If the first packet in queue has length S, it may be
55 transmitted only at the time t_* when S/R <= N(t_*),
56 and in this case N(t) jumps:
57
58 N(t_* + 0) = N(t_* - 0) - S/R.
59
60
61
62 Actually, QoS requires two TBF to be applied to a data stream.
63 One of them controls steady state burst size, another
64 one with rate P (peak rate) and depth M (equal to link MTU)
65 limits bursts at a smaller time scale.
66
67 It is easy to see that P>R, and B>M. If P is infinity, this double
68 TBF is equivalent to a single one.
69
70 When TBF works in reshaping mode, latency is estimated as:
71
72 lat = max ((L-B)/R, (L-M)/P)
73
74
75 NOTES.
76 ------
77
78 If TBF throttles, it starts a watchdog timer, which will wake it up
79 when it is ready to transmit.
80 Note that the minimal timer resolution is 1/HZ.
81 If no new packets arrive during this period,
82 or if the device is not awaken by EOI for some previous packet,
83 TBF can stop its activity for 1/HZ.
84
85
86 This means, that with depth B, the maximal rate is
87
88 R_crit = B*HZ
89
90 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
91
92 Note that the peak rate TBF is much more tough: with MTU 1500
93 P_crit = 150Kbytes/sec. So, if you need greater peak
94 rates, use alpha with HZ=1000 :-)
95
96 With classful TBF, limit is just kept for backwards compatibility.
97 It is passed to the default bfifo qdisc - if the inner qdisc is
98 changed the limit is not effective anymore.
99*/
100
cc7ec456 101struct tbf_sched_data {
1da177e4
LT
102/* Parameters */
103 u32 limit; /* Maximal length of backlog: bytes */
a135e598 104 u32 max_size;
b757c933
JP
105 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
106 s64 mtu;
b757c933
JP
107 struct psched_ratecfg rate;
108 struct psched_ratecfg peak;
1da177e4
LT
109
110/* Variables */
b757c933
JP
111 s64 tokens; /* Current number of B tokens */
112 s64 ptokens; /* Current number of P tokens */
113 s64 t_c; /* Time check-point */
1da177e4 114 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
f7f593e3 115 struct qdisc_watchdog watchdog; /* Watchdog timer */
1da177e4
LT
116};
117
e43ac79a 118
cc106e44
YY
119/* Time to Length, convert time in ns to length in bytes
120 * to determinate how many bytes can be sent in given time.
121 */
122static u64 psched_ns_t2l(const struct psched_ratecfg *r,
123 u64 time_in_ns)
124{
125 /* The formula is :
126 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
127 */
128 u64 len = time_in_ns * r->rate_bytes_ps;
129
130 do_div(len, NSEC_PER_SEC);
131
d55d282e
YY
132 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
133 do_div(len, 53);
134 len = len * 48;
135 }
cc106e44
YY
136
137 if (len > r->overhead)
138 len -= r->overhead;
139 else
140 len = 0;
141
142 return len;
143}
144
4d0820cf
ED
145/*
146 * Return length of individual segments of a gso packet,
147 * including all headers (MAC, IP, TCP/UDP)
148 */
de960aa9 149static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
4d0820cf
ED
150{
151 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
de960aa9 152 return hdr_len + skb_gso_transport_seglen(skb);
4d0820cf
ED
153}
154
e43ac79a
ED
155/* GSO packet is too big, segment it so that tbf can transmit
156 * each segment in time
157 */
158static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
159{
160 struct tbf_sched_data *q = qdisc_priv(sch);
161 struct sk_buff *segs, *nskb;
162 netdev_features_t features = netif_skb_features(skb);
163 int ret, nb;
164
165 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
166
167 if (IS_ERR_OR_NULL(segs))
168 return qdisc_reshape_fail(skb, sch);
169
170 nb = 0;
171 while (segs) {
172 nskb = segs->next;
173 segs->next = NULL;
4d0820cf
ED
174 qdisc_skb_cb(segs)->pkt_len = segs->len;
175 ret = qdisc_enqueue(segs, q->qdisc);
e43ac79a
ED
176 if (ret != NET_XMIT_SUCCESS) {
177 if (net_xmit_drop_count(ret))
178 sch->qstats.drops++;
179 } else {
180 nb++;
181 }
182 segs = nskb;
183 }
184 sch->q.qlen += nb;
185 if (nb > 1)
186 qdisc_tree_decrease_qlen(sch, 1 - nb);
187 consume_skb(skb);
188 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
189}
190
cc7ec456 191static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1da177e4
LT
192{
193 struct tbf_sched_data *q = qdisc_priv(sch);
194 int ret;
195
e43ac79a 196 if (qdisc_pkt_len(skb) > q->max_size) {
de960aa9 197 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
e43ac79a 198 return tbf_segment(skb, sch);
69747650 199 return qdisc_reshape_fail(skb, sch);
e43ac79a 200 }
5f86173b 201 ret = qdisc_enqueue(skb, q->qdisc);
9871e50e 202 if (ret != NET_XMIT_SUCCESS) {
378a2f09
JP
203 if (net_xmit_drop_count(ret))
204 sch->qstats.drops++;
1da177e4
LT
205 return ret;
206 }
207
208 sch->q.qlen++;
9871e50e 209 return NET_XMIT_SUCCESS;
1da177e4
LT
210}
211
cc7ec456 212static unsigned int tbf_drop(struct Qdisc *sch)
1da177e4
LT
213{
214 struct tbf_sched_data *q = qdisc_priv(sch);
6d037a26 215 unsigned int len = 0;
1da177e4 216
6d037a26 217 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
1da177e4
LT
218 sch->q.qlen--;
219 sch->qstats.drops++;
220 }
221 return len;
222}
223
a135e598
HS
224static bool tbf_peak_present(const struct tbf_sched_data *q)
225{
226 return q->peak.rate_bytes_ps;
227}
228
cc7ec456 229static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
1da177e4
LT
230{
231 struct tbf_sched_data *q = qdisc_priv(sch);
232 struct sk_buff *skb;
233
03c05f0d 234 skb = q->qdisc->ops->peek(q->qdisc);
1da177e4
LT
235
236 if (skb) {
b757c933
JP
237 s64 now;
238 s64 toks;
239 s64 ptoks = 0;
0abf77e5 240 unsigned int len = qdisc_pkt_len(skb);
1da177e4 241
b757c933
JP
242 now = ktime_to_ns(ktime_get());
243 toks = min_t(s64, now - q->t_c, q->buffer);
1da177e4 244
a135e598 245 if (tbf_peak_present(q)) {
1da177e4 246 ptoks = toks + q->ptokens;
b757c933 247 if (ptoks > q->mtu)
1da177e4 248 ptoks = q->mtu;
b757c933 249 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
1da177e4
LT
250 }
251 toks += q->tokens;
b757c933 252 if (toks > q->buffer)
1da177e4 253 toks = q->buffer;
b757c933 254 toks -= (s64) psched_l2t_ns(&q->rate, len);
1da177e4
LT
255
256 if ((toks|ptoks) >= 0) {
77be155c 257 skb = qdisc_dequeue_peeked(q->qdisc);
03c05f0d
JP
258 if (unlikely(!skb))
259 return NULL;
260
1da177e4
LT
261 q->t_c = now;
262 q->tokens = toks;
263 q->ptokens = ptoks;
264 sch->q.qlen--;
fd245a4a 265 qdisc_unthrottled(sch);
9190b3b3 266 qdisc_bstats_update(sch, skb);
1da177e4
LT
267 return skb;
268 }
269
b757c933
JP
270 qdisc_watchdog_schedule_ns(&q->watchdog,
271 now + max_t(long, -toks, -ptoks));
1da177e4
LT
272
273 /* Maybe we have a shorter packet in the queue,
274 which can be sent now. It sounds cool,
275 but, however, this is wrong in principle.
276 We MUST NOT reorder packets under these circumstances.
277
278 Really, if we split the flow into independent
279 subflows, it would be a very good solution.
280 This is the main idea of all FQ algorithms
281 (cf. CSZ, HPFQ, HFSC)
282 */
283
1da177e4
LT
284 sch->qstats.overlimits++;
285 }
286 return NULL;
287}
288
cc7ec456 289static void tbf_reset(struct Qdisc *sch)
1da177e4
LT
290{
291 struct tbf_sched_data *q = qdisc_priv(sch);
292
293 qdisc_reset(q->qdisc);
294 sch->q.qlen = 0;
b757c933 295 q->t_c = ktime_to_ns(ktime_get());
1da177e4
LT
296 q->tokens = q->buffer;
297 q->ptokens = q->mtu;
f7f593e3 298 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
299}
300
27a3421e
PM
301static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
302 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
303 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
304 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
a33c4a26
YY
305 [TCA_TBF_RATE64] = { .type = NLA_U64 },
306 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
2e04ad42
YY
307 [TCA_TBF_BURST] = { .type = NLA_U32 },
308 [TCA_TBF_PBURST] = { .type = NLA_U32 },
27a3421e
PM
309};
310
cc7ec456 311static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4 312{
cee63723 313 int err;
1da177e4 314 struct tbf_sched_data *q = qdisc_priv(sch);
a33c4a26 315 struct nlattr *tb[TCA_TBF_MAX + 1];
1da177e4 316 struct tc_tbf_qopt *qopt;
1da177e4 317 struct Qdisc *child = NULL;
cc106e44
YY
318 struct psched_ratecfg rate;
319 struct psched_ratecfg peak;
320 u64 max_size;
321 s64 buffer, mtu;
a33c4a26 322 u64 rate64 = 0, prate64 = 0;
1da177e4 323
a33c4a26 324 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
cee63723
PM
325 if (err < 0)
326 return err;
327
328 err = -EINVAL;
27a3421e 329 if (tb[TCA_TBF_PARMS] == NULL)
1da177e4
LT
330 goto done;
331
1e90474c 332 qopt = nla_data(tb[TCA_TBF_PARMS]);
cc106e44
YY
333 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
334 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
335 tb[TCA_TBF_RTAB]));
1da177e4 336
cc106e44
YY
337 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
338 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
339 tb[TCA_TBF_PTAB]));
4d0820cf 340
cc106e44
YY
341 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
342 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
343
344 if (tb[TCA_TBF_RATE64])
345 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
346 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
347
2e04ad42
YY
348 if (tb[TCA_TBF_BURST]) {
349 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
350 buffer = psched_l2t_ns(&rate, max_size);
351 } else {
352 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
353 }
cc106e44
YY
354
355 if (qopt->peakrate.rate) {
356 if (tb[TCA_TBF_PRATE64])
357 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
358 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
359 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
360 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
2e04ad42 361 peak.rate_bytes_ps, rate.rate_bytes_ps);
cc106e44
YY
362 err = -EINVAL;
363 goto done;
364 }
365
2e04ad42
YY
366 if (tb[TCA_TBF_PBURST]) {
367 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
368 max_size = min_t(u32, max_size, pburst);
369 mtu = psched_l2t_ns(&peak, pburst);
370 } else {
371 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
372 }
a135e598
HS
373 } else {
374 memset(&peak, 0, sizeof(peak));
cc106e44
YY
375 }
376
377 if (max_size < psched_mtu(qdisc_dev(sch)))
378 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
379 max_size, qdisc_dev(sch)->name,
380 psched_mtu(qdisc_dev(sch)));
381
382 if (!max_size) {
383 err = -EINVAL;
384 goto done;
385 }
386
724b9e1d
HS
387 if (q->qdisc != &noop_qdisc) {
388 err = fifo_set_limit(q->qdisc, qopt->limit);
389 if (err)
390 goto done;
391 } else if (qopt->limit > 0) {
392 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
393 if (IS_ERR(child)) {
394 err = PTR_ERR(child);
395 goto done;
396 }
397 }
398
1da177e4 399 sch_tree_lock(sch);
5e50da01
PM
400 if (child) {
401 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
b94c8afc
PM
402 qdisc_destroy(q->qdisc);
403 q->qdisc = child;
5e50da01 404 }
1da177e4 405 q->limit = qopt->limit;
2e04ad42
YY
406 if (tb[TCA_TBF_PBURST])
407 q->mtu = mtu;
408 else
409 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
1da177e4 410 q->max_size = max_size;
2e04ad42
YY
411 if (tb[TCA_TBF_BURST])
412 q->buffer = buffer;
413 else
414 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
1da177e4
LT
415 q->tokens = q->buffer;
416 q->ptokens = q->mtu;
b94c8afc 417
cc106e44 418 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
a135e598 419 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
b94c8afc 420
1da177e4
LT
421 sch_tree_unlock(sch);
422 err = 0;
423done:
1da177e4
LT
424 return err;
425}
426
cc7ec456 427static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
428{
429 struct tbf_sched_data *q = qdisc_priv(sch);
430
431 if (opt == NULL)
432 return -EINVAL;
433
b757c933 434 q->t_c = ktime_to_ns(ktime_get());
f7f593e3 435 qdisc_watchdog_init(&q->watchdog, sch);
1da177e4
LT
436 q->qdisc = &noop_qdisc;
437
438 return tbf_change(sch, opt);
439}
440
441static void tbf_destroy(struct Qdisc *sch)
442{
443 struct tbf_sched_data *q = qdisc_priv(sch);
444
f7f593e3 445 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
446 qdisc_destroy(q->qdisc);
447}
448
449static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
450{
451 struct tbf_sched_data *q = qdisc_priv(sch);
4b3550ef 452 struct nlattr *nest;
1da177e4
LT
453 struct tc_tbf_qopt opt;
454
b0460e44 455 sch->qstats.backlog = q->qdisc->qstats.backlog;
4b3550ef
PM
456 nest = nla_nest_start(skb, TCA_OPTIONS);
457 if (nest == NULL)
458 goto nla_put_failure;
1da177e4
LT
459
460 opt.limit = q->limit;
01cb71d2 461 psched_ratecfg_getrate(&opt.rate, &q->rate);
a135e598 462 if (tbf_peak_present(q))
01cb71d2 463 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
1da177e4
LT
464 else
465 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
b757c933
JP
466 opt.mtu = PSCHED_NS2TICKS(q->mtu);
467 opt.buffer = PSCHED_NS2TICKS(q->buffer);
1b34ec43
DM
468 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
469 goto nla_put_failure;
a33c4a26
YY
470 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
471 nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps))
472 goto nla_put_failure;
a135e598 473 if (tbf_peak_present(q) &&
a33c4a26
YY
474 q->peak.rate_bytes_ps >= (1ULL << 32) &&
475 nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps))
476 goto nla_put_failure;
1da177e4 477
d59b7d80 478 return nla_nest_end(skb, nest);
1da177e4 479
1e90474c 480nla_put_failure:
4b3550ef 481 nla_nest_cancel(skb, nest);
1da177e4
LT
482 return -1;
483}
484
485static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
486 struct sk_buff *skb, struct tcmsg *tcm)
487{
488 struct tbf_sched_data *q = qdisc_priv(sch);
489
1da177e4
LT
490 tcm->tcm_handle |= TC_H_MIN(1);
491 tcm->tcm_info = q->qdisc->handle;
492
493 return 0;
494}
495
496static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
497 struct Qdisc **old)
498{
499 struct tbf_sched_data *q = qdisc_priv(sch);
500
501 if (new == NULL)
502 new = &noop_qdisc;
503
504 sch_tree_lock(sch);
b94c8afc
PM
505 *old = q->qdisc;
506 q->qdisc = new;
5e50da01 507 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1da177e4 508 qdisc_reset(*old);
1da177e4
LT
509 sch_tree_unlock(sch);
510
511 return 0;
512}
513
514static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
515{
516 struct tbf_sched_data *q = qdisc_priv(sch);
517 return q->qdisc;
518}
519
520static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
521{
522 return 1;
523}
524
525static void tbf_put(struct Qdisc *sch, unsigned long arg)
526{
527}
528
1da177e4
LT
529static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
530{
531 if (!walker->stop) {
532 if (walker->count >= walker->skip)
533 if (walker->fn(sch, 1, walker) < 0) {
534 walker->stop = 1;
535 return;
536 }
537 walker->count++;
538 }
539}
540
cc7ec456 541static const struct Qdisc_class_ops tbf_class_ops = {
1da177e4
LT
542 .graft = tbf_graft,
543 .leaf = tbf_leaf,
544 .get = tbf_get,
545 .put = tbf_put,
1da177e4 546 .walk = tbf_walk,
1da177e4
LT
547 .dump = tbf_dump_class,
548};
549
20fea08b 550static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
1da177e4
LT
551 .next = NULL,
552 .cl_ops = &tbf_class_ops,
553 .id = "tbf",
554 .priv_size = sizeof(struct tbf_sched_data),
555 .enqueue = tbf_enqueue,
556 .dequeue = tbf_dequeue,
77be155c 557 .peek = qdisc_peek_dequeued,
1da177e4
LT
558 .drop = tbf_drop,
559 .init = tbf_init,
560 .reset = tbf_reset,
561 .destroy = tbf_destroy,
562 .change = tbf_change,
563 .dump = tbf_dump,
564 .owner = THIS_MODULE,
565};
566
567static int __init tbf_module_init(void)
568{
569 return register_qdisc(&tbf_qdisc_ops);
570}
571
572static void __exit tbf_module_exit(void)
573{
574 unregister_qdisc(&tbf_qdisc_ops);
575}
576module_init(tbf_module_init)
577module_exit(tbf_module_exit)
578MODULE_LICENSE("GPL");