Merge remote-tracking branch 'asoc/topic/qcom' into asoc-next
[linux-2.6-block.git] / net / sched / sch_tbf.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
12 *
13 */
14
1da177e4 15#include <linux/module.h>
1da177e4
LT
16#include <linux/types.h>
17#include <linux/kernel.h>
1da177e4 18#include <linux/string.h>
1da177e4 19#include <linux/errno.h>
1da177e4 20#include <linux/skbuff.h>
0ba48053 21#include <net/netlink.h>
b757c933 22#include <net/sch_generic.h>
1da177e4
LT
23#include <net/pkt_sched.h>
24
25
26/* Simple Token Bucket Filter.
27 =======================================
28
29 SOURCE.
30 -------
31
32 None.
33
34 Description.
35 ------------
36
37 A data flow obeys TBF with rate R and depth B, if for any
38 time interval t_i...t_f the number of transmitted bits
39 does not exceed B + R*(t_f-t_i).
40
41 Packetized version of this definition:
42 The sequence of packets of sizes s_i served at moments t_i
43 obeys TBF, if for any i<=k:
44
45 s_i+....+s_k <= B + R*(t_k - t_i)
46
47 Algorithm.
48 ----------
49
50 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
51
52 N(t+delta) = min{B/R, N(t) + delta}
53
54 If the first packet in queue has length S, it may be
55 transmitted only at the time t_* when S/R <= N(t_*),
56 and in this case N(t) jumps:
57
58 N(t_* + 0) = N(t_* - 0) - S/R.
59
60
61
62 Actually, QoS requires two TBF to be applied to a data stream.
63 One of them controls steady state burst size, another
64 one with rate P (peak rate) and depth M (equal to link MTU)
65 limits bursts at a smaller time scale.
66
67 It is easy to see that P>R, and B>M. If P is infinity, this double
68 TBF is equivalent to a single one.
69
70 When TBF works in reshaping mode, latency is estimated as:
71
72 lat = max ((L-B)/R, (L-M)/P)
73
74
75 NOTES.
76 ------
77
78 If TBF throttles, it starts a watchdog timer, which will wake it up
79 when it is ready to transmit.
80 Note that the minimal timer resolution is 1/HZ.
81 If no new packets arrive during this period,
82 or if the device is not awaken by EOI for some previous packet,
83 TBF can stop its activity for 1/HZ.
84
85
86 This means, that with depth B, the maximal rate is
87
88 R_crit = B*HZ
89
90 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
91
92 Note that the peak rate TBF is much more tough: with MTU 1500
93 P_crit = 150Kbytes/sec. So, if you need greater peak
94 rates, use alpha with HZ=1000 :-)
95
96 With classful TBF, limit is just kept for backwards compatibility.
97 It is passed to the default bfifo qdisc - if the inner qdisc is
98 changed the limit is not effective anymore.
99*/
100
cc7ec456 101struct tbf_sched_data {
1da177e4
LT
102/* Parameters */
103 u32 limit; /* Maximal length of backlog: bytes */
a135e598 104 u32 max_size;
b757c933
JP
105 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
106 s64 mtu;
b757c933
JP
107 struct psched_ratecfg rate;
108 struct psched_ratecfg peak;
1da177e4
LT
109
110/* Variables */
b757c933
JP
111 s64 tokens; /* Current number of B tokens */
112 s64 ptokens; /* Current number of P tokens */
113 s64 t_c; /* Time check-point */
1da177e4 114 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
f7f593e3 115 struct qdisc_watchdog watchdog; /* Watchdog timer */
1da177e4
LT
116};
117
e43ac79a 118
cc106e44
YY
119/* Time to Length, convert time in ns to length in bytes
120 * to determinate how many bytes can be sent in given time.
121 */
122static u64 psched_ns_t2l(const struct psched_ratecfg *r,
123 u64 time_in_ns)
124{
125 /* The formula is :
126 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
127 */
128 u64 len = time_in_ns * r->rate_bytes_ps;
129
130 do_div(len, NSEC_PER_SEC);
131
d55d282e
YY
132 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
133 do_div(len, 53);
134 len = len * 48;
135 }
cc106e44
YY
136
137 if (len > r->overhead)
138 len -= r->overhead;
139 else
140 len = 0;
141
142 return len;
143}
144
4d0820cf
ED
145/*
146 * Return length of individual segments of a gso packet,
147 * including all headers (MAC, IP, TCP/UDP)
148 */
de960aa9 149static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
4d0820cf
ED
150{
151 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
de960aa9 152 return hdr_len + skb_gso_transport_seglen(skb);
4d0820cf
ED
153}
154
e43ac79a
ED
155/* GSO packet is too big, segment it so that tbf can transmit
156 * each segment in time
157 */
158static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
159{
160 struct tbf_sched_data *q = qdisc_priv(sch);
161 struct sk_buff *segs, *nskb;
162 netdev_features_t features = netif_skb_features(skb);
2ccccf5f 163 unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
e43ac79a
ED
164 int ret, nb;
165
166 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
167
168 if (IS_ERR_OR_NULL(segs))
169 return qdisc_reshape_fail(skb, sch);
170
171 nb = 0;
172 while (segs) {
173 nskb = segs->next;
174 segs->next = NULL;
4d0820cf 175 qdisc_skb_cb(segs)->pkt_len = segs->len;
2ccccf5f 176 len += segs->len;
4d0820cf 177 ret = qdisc_enqueue(segs, q->qdisc);
e43ac79a
ED
178 if (ret != NET_XMIT_SUCCESS) {
179 if (net_xmit_drop_count(ret))
25331d6c 180 qdisc_qstats_drop(sch);
e43ac79a
ED
181 } else {
182 nb++;
183 }
184 segs = nskb;
185 }
186 sch->q.qlen += nb;
187 if (nb > 1)
2ccccf5f 188 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
e43ac79a
ED
189 consume_skb(skb);
190 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
191}
192
cc7ec456 193static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1da177e4
LT
194{
195 struct tbf_sched_data *q = qdisc_priv(sch);
196 int ret;
197
e43ac79a 198 if (qdisc_pkt_len(skb) > q->max_size) {
de960aa9 199 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
e43ac79a 200 return tbf_segment(skb, sch);
69747650 201 return qdisc_reshape_fail(skb, sch);
e43ac79a 202 }
5f86173b 203 ret = qdisc_enqueue(skb, q->qdisc);
9871e50e 204 if (ret != NET_XMIT_SUCCESS) {
378a2f09 205 if (net_xmit_drop_count(ret))
25331d6c 206 qdisc_qstats_drop(sch);
1da177e4
LT
207 return ret;
208 }
209
210 sch->q.qlen++;
9871e50e 211 return NET_XMIT_SUCCESS;
1da177e4
LT
212}
213
cc7ec456 214static unsigned int tbf_drop(struct Qdisc *sch)
1da177e4
LT
215{
216 struct tbf_sched_data *q = qdisc_priv(sch);
6d037a26 217 unsigned int len = 0;
1da177e4 218
6d037a26 219 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
1da177e4 220 sch->q.qlen--;
25331d6c 221 qdisc_qstats_drop(sch);
1da177e4
LT
222 }
223 return len;
224}
225
a135e598
HS
226static bool tbf_peak_present(const struct tbf_sched_data *q)
227{
228 return q->peak.rate_bytes_ps;
229}
230
cc7ec456 231static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
1da177e4
LT
232{
233 struct tbf_sched_data *q = qdisc_priv(sch);
234 struct sk_buff *skb;
235
03c05f0d 236 skb = q->qdisc->ops->peek(q->qdisc);
1da177e4
LT
237
238 if (skb) {
b757c933
JP
239 s64 now;
240 s64 toks;
241 s64 ptoks = 0;
0abf77e5 242 unsigned int len = qdisc_pkt_len(skb);
1da177e4 243
d2de875c 244 now = ktime_get_ns();
b757c933 245 toks = min_t(s64, now - q->t_c, q->buffer);
1da177e4 246
a135e598 247 if (tbf_peak_present(q)) {
1da177e4 248 ptoks = toks + q->ptokens;
b757c933 249 if (ptoks > q->mtu)
1da177e4 250 ptoks = q->mtu;
b757c933 251 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
1da177e4
LT
252 }
253 toks += q->tokens;
b757c933 254 if (toks > q->buffer)
1da177e4 255 toks = q->buffer;
b757c933 256 toks -= (s64) psched_l2t_ns(&q->rate, len);
1da177e4
LT
257
258 if ((toks|ptoks) >= 0) {
77be155c 259 skb = qdisc_dequeue_peeked(q->qdisc);
03c05f0d
JP
260 if (unlikely(!skb))
261 return NULL;
262
1da177e4
LT
263 q->t_c = now;
264 q->tokens = toks;
265 q->ptokens = ptoks;
266 sch->q.qlen--;
fd245a4a 267 qdisc_unthrottled(sch);
9190b3b3 268 qdisc_bstats_update(sch, skb);
1da177e4
LT
269 return skb;
270 }
271
b757c933 272 qdisc_watchdog_schedule_ns(&q->watchdog,
f2600cf0
ED
273 now + max_t(long, -toks, -ptoks),
274 true);
1da177e4
LT
275
276 /* Maybe we have a shorter packet in the queue,
277 which can be sent now. It sounds cool,
278 but, however, this is wrong in principle.
279 We MUST NOT reorder packets under these circumstances.
280
281 Really, if we split the flow into independent
282 subflows, it would be a very good solution.
283 This is the main idea of all FQ algorithms
284 (cf. CSZ, HPFQ, HFSC)
285 */
286
25331d6c 287 qdisc_qstats_overlimit(sch);
1da177e4
LT
288 }
289 return NULL;
290}
291
cc7ec456 292static void tbf_reset(struct Qdisc *sch)
1da177e4
LT
293{
294 struct tbf_sched_data *q = qdisc_priv(sch);
295
296 qdisc_reset(q->qdisc);
297 sch->q.qlen = 0;
d2de875c 298 q->t_c = ktime_get_ns();
1da177e4
LT
299 q->tokens = q->buffer;
300 q->ptokens = q->mtu;
f7f593e3 301 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
302}
303
27a3421e
PM
304static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
305 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
306 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
307 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
a33c4a26
YY
308 [TCA_TBF_RATE64] = { .type = NLA_U64 },
309 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
2e04ad42
YY
310 [TCA_TBF_BURST] = { .type = NLA_U32 },
311 [TCA_TBF_PBURST] = { .type = NLA_U32 },
27a3421e
PM
312};
313
cc7ec456 314static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4 315{
cee63723 316 int err;
1da177e4 317 struct tbf_sched_data *q = qdisc_priv(sch);
a33c4a26 318 struct nlattr *tb[TCA_TBF_MAX + 1];
1da177e4 319 struct tc_tbf_qopt *qopt;
1da177e4 320 struct Qdisc *child = NULL;
cc106e44
YY
321 struct psched_ratecfg rate;
322 struct psched_ratecfg peak;
323 u64 max_size;
324 s64 buffer, mtu;
a33c4a26 325 u64 rate64 = 0, prate64 = 0;
1da177e4 326
a33c4a26 327 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
cee63723
PM
328 if (err < 0)
329 return err;
330
331 err = -EINVAL;
27a3421e 332 if (tb[TCA_TBF_PARMS] == NULL)
1da177e4
LT
333 goto done;
334
1e90474c 335 qopt = nla_data(tb[TCA_TBF_PARMS]);
cc106e44
YY
336 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
337 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
338 tb[TCA_TBF_RTAB]));
1da177e4 339
cc106e44
YY
340 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
341 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
342 tb[TCA_TBF_PTAB]));
4d0820cf 343
cc106e44
YY
344 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
345 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
346
347 if (tb[TCA_TBF_RATE64])
348 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
349 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
350
2e04ad42
YY
351 if (tb[TCA_TBF_BURST]) {
352 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
353 buffer = psched_l2t_ns(&rate, max_size);
354 } else {
355 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
356 }
cc106e44
YY
357
358 if (qopt->peakrate.rate) {
359 if (tb[TCA_TBF_PRATE64])
360 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
361 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
362 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
363 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
2e04ad42 364 peak.rate_bytes_ps, rate.rate_bytes_ps);
cc106e44
YY
365 err = -EINVAL;
366 goto done;
367 }
368
2e04ad42
YY
369 if (tb[TCA_TBF_PBURST]) {
370 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
371 max_size = min_t(u32, max_size, pburst);
372 mtu = psched_l2t_ns(&peak, pburst);
373 } else {
374 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
375 }
a135e598
HS
376 } else {
377 memset(&peak, 0, sizeof(peak));
cc106e44
YY
378 }
379
380 if (max_size < psched_mtu(qdisc_dev(sch)))
381 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
382 max_size, qdisc_dev(sch)->name,
383 psched_mtu(qdisc_dev(sch)));
384
385 if (!max_size) {
386 err = -EINVAL;
387 goto done;
388 }
389
724b9e1d
HS
390 if (q->qdisc != &noop_qdisc) {
391 err = fifo_set_limit(q->qdisc, qopt->limit);
392 if (err)
393 goto done;
394 } else if (qopt->limit > 0) {
395 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
396 if (IS_ERR(child)) {
397 err = PTR_ERR(child);
398 goto done;
399 }
400 }
401
1da177e4 402 sch_tree_lock(sch);
5e50da01 403 if (child) {
2ccccf5f
WC
404 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
405 q->qdisc->qstats.backlog);
b94c8afc
PM
406 qdisc_destroy(q->qdisc);
407 q->qdisc = child;
5e50da01 408 }
1da177e4 409 q->limit = qopt->limit;
2e04ad42
YY
410 if (tb[TCA_TBF_PBURST])
411 q->mtu = mtu;
412 else
413 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
1da177e4 414 q->max_size = max_size;
2e04ad42
YY
415 if (tb[TCA_TBF_BURST])
416 q->buffer = buffer;
417 else
418 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
1da177e4
LT
419 q->tokens = q->buffer;
420 q->ptokens = q->mtu;
b94c8afc 421
cc106e44 422 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
a135e598 423 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
b94c8afc 424
1da177e4
LT
425 sch_tree_unlock(sch);
426 err = 0;
427done:
1da177e4
LT
428 return err;
429}
430
cc7ec456 431static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
432{
433 struct tbf_sched_data *q = qdisc_priv(sch);
434
435 if (opt == NULL)
436 return -EINVAL;
437
d2de875c 438 q->t_c = ktime_get_ns();
f7f593e3 439 qdisc_watchdog_init(&q->watchdog, sch);
1da177e4
LT
440 q->qdisc = &noop_qdisc;
441
442 return tbf_change(sch, opt);
443}
444
445static void tbf_destroy(struct Qdisc *sch)
446{
447 struct tbf_sched_data *q = qdisc_priv(sch);
448
f7f593e3 449 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
450 qdisc_destroy(q->qdisc);
451}
452
453static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
454{
455 struct tbf_sched_data *q = qdisc_priv(sch);
4b3550ef 456 struct nlattr *nest;
1da177e4
LT
457 struct tc_tbf_qopt opt;
458
b0460e44 459 sch->qstats.backlog = q->qdisc->qstats.backlog;
4b3550ef
PM
460 nest = nla_nest_start(skb, TCA_OPTIONS);
461 if (nest == NULL)
462 goto nla_put_failure;
1da177e4
LT
463
464 opt.limit = q->limit;
01cb71d2 465 psched_ratecfg_getrate(&opt.rate, &q->rate);
a135e598 466 if (tbf_peak_present(q))
01cb71d2 467 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
1da177e4
LT
468 else
469 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
b757c933
JP
470 opt.mtu = PSCHED_NS2TICKS(q->mtu);
471 opt.buffer = PSCHED_NS2TICKS(q->buffer);
1b34ec43
DM
472 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
473 goto nla_put_failure;
a33c4a26
YY
474 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
475 nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps))
476 goto nla_put_failure;
a135e598 477 if (tbf_peak_present(q) &&
a33c4a26
YY
478 q->peak.rate_bytes_ps >= (1ULL << 32) &&
479 nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps))
480 goto nla_put_failure;
1da177e4 481
d59b7d80 482 return nla_nest_end(skb, nest);
1da177e4 483
1e90474c 484nla_put_failure:
4b3550ef 485 nla_nest_cancel(skb, nest);
1da177e4
LT
486 return -1;
487}
488
489static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
490 struct sk_buff *skb, struct tcmsg *tcm)
491{
492 struct tbf_sched_data *q = qdisc_priv(sch);
493
1da177e4
LT
494 tcm->tcm_handle |= TC_H_MIN(1);
495 tcm->tcm_info = q->qdisc->handle;
496
497 return 0;
498}
499
500static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
501 struct Qdisc **old)
502{
503 struct tbf_sched_data *q = qdisc_priv(sch);
504
505 if (new == NULL)
506 new = &noop_qdisc;
507
86a7996c 508 *old = qdisc_replace(sch, new, &q->qdisc);
1da177e4
LT
509 return 0;
510}
511
512static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
513{
514 struct tbf_sched_data *q = qdisc_priv(sch);
515 return q->qdisc;
516}
517
518static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
519{
520 return 1;
521}
522
523static void tbf_put(struct Qdisc *sch, unsigned long arg)
524{
525}
526
1da177e4
LT
527static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
528{
529 if (!walker->stop) {
530 if (walker->count >= walker->skip)
531 if (walker->fn(sch, 1, walker) < 0) {
532 walker->stop = 1;
533 return;
534 }
535 walker->count++;
536 }
537}
538
cc7ec456 539static const struct Qdisc_class_ops tbf_class_ops = {
1da177e4
LT
540 .graft = tbf_graft,
541 .leaf = tbf_leaf,
542 .get = tbf_get,
543 .put = tbf_put,
1da177e4 544 .walk = tbf_walk,
1da177e4
LT
545 .dump = tbf_dump_class,
546};
547
20fea08b 548static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
1da177e4
LT
549 .next = NULL,
550 .cl_ops = &tbf_class_ops,
551 .id = "tbf",
552 .priv_size = sizeof(struct tbf_sched_data),
553 .enqueue = tbf_enqueue,
554 .dequeue = tbf_dequeue,
77be155c 555 .peek = qdisc_peek_dequeued,
1da177e4
LT
556 .drop = tbf_drop,
557 .init = tbf_init,
558 .reset = tbf_reset,
559 .destroy = tbf_destroy,
560 .change = tbf_change,
561 .dump = tbf_dump,
562 .owner = THIS_MODULE,
563};
564
565static int __init tbf_module_init(void)
566{
567 return register_qdisc(&tbf_qdisc_ops);
568}
569
570static void __exit tbf_module_exit(void)
571{
572 unregister_qdisc(&tbf_qdisc_ops);
573}
574module_init(tbf_module_init)
575module_exit(tbf_module_exit)
576MODULE_LICENSE("GPL");