Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[linux-2.6-block.git] / net / sched / sch_tbf.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
12 *
13 */
14
1da177e4 15#include <linux/module.h>
1da177e4
LT
16#include <linux/types.h>
17#include <linux/kernel.h>
1da177e4 18#include <linux/string.h>
1da177e4 19#include <linux/errno.h>
1da177e4 20#include <linux/skbuff.h>
0ba48053 21#include <net/netlink.h>
b757c933 22#include <net/sch_generic.h>
1da177e4
LT
23#include <net/pkt_sched.h>
24
25
26/* Simple Token Bucket Filter.
27 =======================================
28
29 SOURCE.
30 -------
31
32 None.
33
34 Description.
35 ------------
36
37 A data flow obeys TBF with rate R and depth B, if for any
38 time interval t_i...t_f the number of transmitted bits
39 does not exceed B + R*(t_f-t_i).
40
41 Packetized version of this definition:
42 The sequence of packets of sizes s_i served at moments t_i
43 obeys TBF, if for any i<=k:
44
45 s_i+....+s_k <= B + R*(t_k - t_i)
46
47 Algorithm.
48 ----------
49
50 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
51
52 N(t+delta) = min{B/R, N(t) + delta}
53
54 If the first packet in queue has length S, it may be
55 transmitted only at the time t_* when S/R <= N(t_*),
56 and in this case N(t) jumps:
57
58 N(t_* + 0) = N(t_* - 0) - S/R.
59
60
61
62 Actually, QoS requires two TBF to be applied to a data stream.
63 One of them controls steady state burst size, another
64 one with rate P (peak rate) and depth M (equal to link MTU)
65 limits bursts at a smaller time scale.
66
67 It is easy to see that P>R, and B>M. If P is infinity, this double
68 TBF is equivalent to a single one.
69
70 When TBF works in reshaping mode, latency is estimated as:
71
72 lat = max ((L-B)/R, (L-M)/P)
73
74
75 NOTES.
76 ------
77
78 If TBF throttles, it starts a watchdog timer, which will wake it up
79 when it is ready to transmit.
80 Note that the minimal timer resolution is 1/HZ.
81 If no new packets arrive during this period,
82 or if the device is not awaken by EOI for some previous packet,
83 TBF can stop its activity for 1/HZ.
84
85
86 This means, that with depth B, the maximal rate is
87
88 R_crit = B*HZ
89
90 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
91
92 Note that the peak rate TBF is much more tough: with MTU 1500
93 P_crit = 150Kbytes/sec. So, if you need greater peak
94 rates, use alpha with HZ=1000 :-)
95
96 With classful TBF, limit is just kept for backwards compatibility.
97 It is passed to the default bfifo qdisc - if the inner qdisc is
98 changed the limit is not effective anymore.
99*/
100
cc7ec456 101struct tbf_sched_data {
1da177e4
LT
102/* Parameters */
103 u32 limit; /* Maximal length of backlog: bytes */
a135e598 104 u32 max_size;
b757c933
JP
105 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
106 s64 mtu;
b757c933
JP
107 struct psched_ratecfg rate;
108 struct psched_ratecfg peak;
1da177e4
LT
109
110/* Variables */
b757c933
JP
111 s64 tokens; /* Current number of B tokens */
112 s64 ptokens; /* Current number of P tokens */
113 s64 t_c; /* Time check-point */
1da177e4 114 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
f7f593e3 115 struct qdisc_watchdog watchdog; /* Watchdog timer */
1da177e4
LT
116};
117
e43ac79a 118
cc106e44
YY
119/* Time to Length, convert time in ns to length in bytes
120 * to determinate how many bytes can be sent in given time.
121 */
122static u64 psched_ns_t2l(const struct psched_ratecfg *r,
123 u64 time_in_ns)
124{
125 /* The formula is :
126 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
127 */
128 u64 len = time_in_ns * r->rate_bytes_ps;
129
130 do_div(len, NSEC_PER_SEC);
131
d55d282e
YY
132 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
133 do_div(len, 53);
134 len = len * 48;
135 }
cc106e44
YY
136
137 if (len > r->overhead)
138 len -= r->overhead;
139 else
140 len = 0;
141
142 return len;
143}
144
4d0820cf
ED
145/*
146 * Return length of individual segments of a gso packet,
147 * including all headers (MAC, IP, TCP/UDP)
148 */
de960aa9 149static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
4d0820cf
ED
150{
151 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
de960aa9 152 return hdr_len + skb_gso_transport_seglen(skb);
4d0820cf
ED
153}
154
e43ac79a
ED
155/* GSO packet is too big, segment it so that tbf can transmit
156 * each segment in time
157 */
158static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
159{
160 struct tbf_sched_data *q = qdisc_priv(sch);
161 struct sk_buff *segs, *nskb;
162 netdev_features_t features = netif_skb_features(skb);
2ccccf5f 163 unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
e43ac79a
ED
164 int ret, nb;
165
166 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
167
168 if (IS_ERR_OR_NULL(segs))
169 return qdisc_reshape_fail(skb, sch);
170
171 nb = 0;
172 while (segs) {
173 nskb = segs->next;
174 segs->next = NULL;
4d0820cf 175 qdisc_skb_cb(segs)->pkt_len = segs->len;
2ccccf5f 176 len += segs->len;
4d0820cf 177 ret = qdisc_enqueue(segs, q->qdisc);
e43ac79a
ED
178 if (ret != NET_XMIT_SUCCESS) {
179 if (net_xmit_drop_count(ret))
25331d6c 180 qdisc_qstats_drop(sch);
e43ac79a
ED
181 } else {
182 nb++;
183 }
184 segs = nskb;
185 }
186 sch->q.qlen += nb;
187 if (nb > 1)
2ccccf5f 188 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
e43ac79a
ED
189 consume_skb(skb);
190 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
191}
192
cc7ec456 193static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1da177e4
LT
194{
195 struct tbf_sched_data *q = qdisc_priv(sch);
196 int ret;
197
e43ac79a 198 if (qdisc_pkt_len(skb) > q->max_size) {
de960aa9 199 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
e43ac79a 200 return tbf_segment(skb, sch);
69747650 201 return qdisc_reshape_fail(skb, sch);
e43ac79a 202 }
5f86173b 203 ret = qdisc_enqueue(skb, q->qdisc);
9871e50e 204 if (ret != NET_XMIT_SUCCESS) {
378a2f09 205 if (net_xmit_drop_count(ret))
25331d6c 206 qdisc_qstats_drop(sch);
1da177e4
LT
207 return ret;
208 }
209
8d5958f4 210 qdisc_qstats_backlog_inc(sch, skb);
1da177e4 211 sch->q.qlen++;
9871e50e 212 return NET_XMIT_SUCCESS;
1da177e4
LT
213}
214
cc7ec456 215static unsigned int tbf_drop(struct Qdisc *sch)
1da177e4
LT
216{
217 struct tbf_sched_data *q = qdisc_priv(sch);
6d037a26 218 unsigned int len = 0;
1da177e4 219
6d037a26 220 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
8d5958f4 221 sch->qstats.backlog -= len;
1da177e4 222 sch->q.qlen--;
25331d6c 223 qdisc_qstats_drop(sch);
1da177e4
LT
224 }
225 return len;
226}
227
a135e598
HS
228static bool tbf_peak_present(const struct tbf_sched_data *q)
229{
230 return q->peak.rate_bytes_ps;
231}
232
cc7ec456 233static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
1da177e4
LT
234{
235 struct tbf_sched_data *q = qdisc_priv(sch);
236 struct sk_buff *skb;
237
03c05f0d 238 skb = q->qdisc->ops->peek(q->qdisc);
1da177e4
LT
239
240 if (skb) {
b757c933
JP
241 s64 now;
242 s64 toks;
243 s64 ptoks = 0;
0abf77e5 244 unsigned int len = qdisc_pkt_len(skb);
1da177e4 245
d2de875c 246 now = ktime_get_ns();
b757c933 247 toks = min_t(s64, now - q->t_c, q->buffer);
1da177e4 248
a135e598 249 if (tbf_peak_present(q)) {
1da177e4 250 ptoks = toks + q->ptokens;
b757c933 251 if (ptoks > q->mtu)
1da177e4 252 ptoks = q->mtu;
b757c933 253 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
1da177e4
LT
254 }
255 toks += q->tokens;
b757c933 256 if (toks > q->buffer)
1da177e4 257 toks = q->buffer;
b757c933 258 toks -= (s64) psched_l2t_ns(&q->rate, len);
1da177e4
LT
259
260 if ((toks|ptoks) >= 0) {
77be155c 261 skb = qdisc_dequeue_peeked(q->qdisc);
03c05f0d
JP
262 if (unlikely(!skb))
263 return NULL;
264
1da177e4
LT
265 q->t_c = now;
266 q->tokens = toks;
267 q->ptokens = ptoks;
8d5958f4 268 qdisc_qstats_backlog_dec(sch, skb);
1da177e4 269 sch->q.qlen--;
fd245a4a 270 qdisc_unthrottled(sch);
9190b3b3 271 qdisc_bstats_update(sch, skb);
1da177e4
LT
272 return skb;
273 }
274
b757c933 275 qdisc_watchdog_schedule_ns(&q->watchdog,
f2600cf0
ED
276 now + max_t(long, -toks, -ptoks),
277 true);
1da177e4
LT
278
279 /* Maybe we have a shorter packet in the queue,
280 which can be sent now. It sounds cool,
281 but, however, this is wrong in principle.
282 We MUST NOT reorder packets under these circumstances.
283
284 Really, if we split the flow into independent
285 subflows, it would be a very good solution.
286 This is the main idea of all FQ algorithms
287 (cf. CSZ, HPFQ, HFSC)
288 */
289
25331d6c 290 qdisc_qstats_overlimit(sch);
1da177e4
LT
291 }
292 return NULL;
293}
294
cc7ec456 295static void tbf_reset(struct Qdisc *sch)
1da177e4
LT
296{
297 struct tbf_sched_data *q = qdisc_priv(sch);
298
299 qdisc_reset(q->qdisc);
8d5958f4 300 sch->qstats.backlog = 0;
1da177e4 301 sch->q.qlen = 0;
d2de875c 302 q->t_c = ktime_get_ns();
1da177e4
LT
303 q->tokens = q->buffer;
304 q->ptokens = q->mtu;
f7f593e3 305 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
306}
307
27a3421e
PM
308static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
309 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
310 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
311 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
a33c4a26
YY
312 [TCA_TBF_RATE64] = { .type = NLA_U64 },
313 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
2e04ad42
YY
314 [TCA_TBF_BURST] = { .type = NLA_U32 },
315 [TCA_TBF_PBURST] = { .type = NLA_U32 },
27a3421e
PM
316};
317
cc7ec456 318static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4 319{
cee63723 320 int err;
1da177e4 321 struct tbf_sched_data *q = qdisc_priv(sch);
a33c4a26 322 struct nlattr *tb[TCA_TBF_MAX + 1];
1da177e4 323 struct tc_tbf_qopt *qopt;
1da177e4 324 struct Qdisc *child = NULL;
cc106e44
YY
325 struct psched_ratecfg rate;
326 struct psched_ratecfg peak;
327 u64 max_size;
328 s64 buffer, mtu;
a33c4a26 329 u64 rate64 = 0, prate64 = 0;
1da177e4 330
a33c4a26 331 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
cee63723
PM
332 if (err < 0)
333 return err;
334
335 err = -EINVAL;
27a3421e 336 if (tb[TCA_TBF_PARMS] == NULL)
1da177e4
LT
337 goto done;
338
1e90474c 339 qopt = nla_data(tb[TCA_TBF_PARMS]);
cc106e44
YY
340 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
341 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
342 tb[TCA_TBF_RTAB]));
1da177e4 343
cc106e44
YY
344 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
345 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
346 tb[TCA_TBF_PTAB]));
4d0820cf 347
cc106e44
YY
348 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
349 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
350
351 if (tb[TCA_TBF_RATE64])
352 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
353 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
354
2e04ad42
YY
355 if (tb[TCA_TBF_BURST]) {
356 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
357 buffer = psched_l2t_ns(&rate, max_size);
358 } else {
359 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
360 }
cc106e44
YY
361
362 if (qopt->peakrate.rate) {
363 if (tb[TCA_TBF_PRATE64])
364 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
365 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
366 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
367 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
2e04ad42 368 peak.rate_bytes_ps, rate.rate_bytes_ps);
cc106e44
YY
369 err = -EINVAL;
370 goto done;
371 }
372
2e04ad42
YY
373 if (tb[TCA_TBF_PBURST]) {
374 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
375 max_size = min_t(u32, max_size, pburst);
376 mtu = psched_l2t_ns(&peak, pburst);
377 } else {
378 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
379 }
a135e598
HS
380 } else {
381 memset(&peak, 0, sizeof(peak));
cc106e44
YY
382 }
383
384 if (max_size < psched_mtu(qdisc_dev(sch)))
385 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
386 max_size, qdisc_dev(sch)->name,
387 psched_mtu(qdisc_dev(sch)));
388
389 if (!max_size) {
390 err = -EINVAL;
391 goto done;
392 }
393
724b9e1d
HS
394 if (q->qdisc != &noop_qdisc) {
395 err = fifo_set_limit(q->qdisc, qopt->limit);
396 if (err)
397 goto done;
398 } else if (qopt->limit > 0) {
399 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
400 if (IS_ERR(child)) {
401 err = PTR_ERR(child);
402 goto done;
403 }
404 }
405
1da177e4 406 sch_tree_lock(sch);
5e50da01 407 if (child) {
2ccccf5f
WC
408 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
409 q->qdisc->qstats.backlog);
b94c8afc
PM
410 qdisc_destroy(q->qdisc);
411 q->qdisc = child;
5e50da01 412 }
1da177e4 413 q->limit = qopt->limit;
2e04ad42
YY
414 if (tb[TCA_TBF_PBURST])
415 q->mtu = mtu;
416 else
417 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
1da177e4 418 q->max_size = max_size;
2e04ad42
YY
419 if (tb[TCA_TBF_BURST])
420 q->buffer = buffer;
421 else
422 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
1da177e4
LT
423 q->tokens = q->buffer;
424 q->ptokens = q->mtu;
b94c8afc 425
cc106e44 426 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
a135e598 427 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
b94c8afc 428
1da177e4
LT
429 sch_tree_unlock(sch);
430 err = 0;
431done:
1da177e4
LT
432 return err;
433}
434
cc7ec456 435static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
436{
437 struct tbf_sched_data *q = qdisc_priv(sch);
438
439 if (opt == NULL)
440 return -EINVAL;
441
d2de875c 442 q->t_c = ktime_get_ns();
f7f593e3 443 qdisc_watchdog_init(&q->watchdog, sch);
1da177e4
LT
444 q->qdisc = &noop_qdisc;
445
446 return tbf_change(sch, opt);
447}
448
449static void tbf_destroy(struct Qdisc *sch)
450{
451 struct tbf_sched_data *q = qdisc_priv(sch);
452
f7f593e3 453 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
454 qdisc_destroy(q->qdisc);
455}
456
457static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
458{
459 struct tbf_sched_data *q = qdisc_priv(sch);
4b3550ef 460 struct nlattr *nest;
1da177e4
LT
461 struct tc_tbf_qopt opt;
462
b0460e44 463 sch->qstats.backlog = q->qdisc->qstats.backlog;
4b3550ef
PM
464 nest = nla_nest_start(skb, TCA_OPTIONS);
465 if (nest == NULL)
466 goto nla_put_failure;
1da177e4
LT
467
468 opt.limit = q->limit;
01cb71d2 469 psched_ratecfg_getrate(&opt.rate, &q->rate);
a135e598 470 if (tbf_peak_present(q))
01cb71d2 471 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
1da177e4
LT
472 else
473 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
b757c933
JP
474 opt.mtu = PSCHED_NS2TICKS(q->mtu);
475 opt.buffer = PSCHED_NS2TICKS(q->buffer);
1b34ec43
DM
476 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
477 goto nla_put_failure;
a33c4a26 478 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
2a51c1e8
ND
479 nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
480 TCA_TBF_PAD))
a33c4a26 481 goto nla_put_failure;
a135e598 482 if (tbf_peak_present(q) &&
a33c4a26 483 q->peak.rate_bytes_ps >= (1ULL << 32) &&
2a51c1e8
ND
484 nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
485 TCA_TBF_PAD))
a33c4a26 486 goto nla_put_failure;
1da177e4 487
d59b7d80 488 return nla_nest_end(skb, nest);
1da177e4 489
1e90474c 490nla_put_failure:
4b3550ef 491 nla_nest_cancel(skb, nest);
1da177e4
LT
492 return -1;
493}
494
495static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
496 struct sk_buff *skb, struct tcmsg *tcm)
497{
498 struct tbf_sched_data *q = qdisc_priv(sch);
499
1da177e4
LT
500 tcm->tcm_handle |= TC_H_MIN(1);
501 tcm->tcm_info = q->qdisc->handle;
502
503 return 0;
504}
505
506static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
507 struct Qdisc **old)
508{
509 struct tbf_sched_data *q = qdisc_priv(sch);
510
511 if (new == NULL)
512 new = &noop_qdisc;
513
86a7996c 514 *old = qdisc_replace(sch, new, &q->qdisc);
1da177e4
LT
515 return 0;
516}
517
518static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
519{
520 struct tbf_sched_data *q = qdisc_priv(sch);
521 return q->qdisc;
522}
523
524static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
525{
526 return 1;
527}
528
529static void tbf_put(struct Qdisc *sch, unsigned long arg)
530{
531}
532
1da177e4
LT
533static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
534{
535 if (!walker->stop) {
536 if (walker->count >= walker->skip)
537 if (walker->fn(sch, 1, walker) < 0) {
538 walker->stop = 1;
539 return;
540 }
541 walker->count++;
542 }
543}
544
cc7ec456 545static const struct Qdisc_class_ops tbf_class_ops = {
1da177e4
LT
546 .graft = tbf_graft,
547 .leaf = tbf_leaf,
548 .get = tbf_get,
549 .put = tbf_put,
1da177e4 550 .walk = tbf_walk,
1da177e4
LT
551 .dump = tbf_dump_class,
552};
553
20fea08b 554static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
1da177e4
LT
555 .next = NULL,
556 .cl_ops = &tbf_class_ops,
557 .id = "tbf",
558 .priv_size = sizeof(struct tbf_sched_data),
559 .enqueue = tbf_enqueue,
560 .dequeue = tbf_dequeue,
77be155c 561 .peek = qdisc_peek_dequeued,
1da177e4
LT
562 .drop = tbf_drop,
563 .init = tbf_init,
564 .reset = tbf_reset,
565 .destroy = tbf_destroy,
566 .change = tbf_change,
567 .dump = tbf_dump,
568 .owner = THIS_MODULE,
569};
570
571static int __init tbf_module_init(void)
572{
573 return register_qdisc(&tbf_qdisc_ops);
574}
575
576static void __exit tbf_module_exit(void)
577{
578 unregister_qdisc(&tbf_qdisc_ops);
579}
580module_init(tbf_module_init)
581module_exit(tbf_module_exit)
582MODULE_LICENSE("GPL");