Merge tag 'perf-tools-fixes-for-v6.4-1-2023-05-20' of git://git.kernel.org/pub/scm...
[linux-block.git] / net / sched / sch_tbf.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * net/sched/sch_tbf.c Token Bucket Filter queue.
4 *
1da177e4
LT
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
7 * original idea by Martin Devera
1da177e4
LT
8 */
9
1da177e4 10#include <linux/module.h>
1da177e4
LT
11#include <linux/types.h>
12#include <linux/kernel.h>
1da177e4 13#include <linux/string.h>
1da177e4 14#include <linux/errno.h>
1da177e4 15#include <linux/skbuff.h>
0ba48053 16#include <net/netlink.h>
b757c933 17#include <net/sch_generic.h>
ef6aadcc 18#include <net/pkt_cls.h>
1da177e4
LT
19#include <net/pkt_sched.h>
20
21
22/* Simple Token Bucket Filter.
23 =======================================
24
25 SOURCE.
26 -------
27
28 None.
29
30 Description.
31 ------------
32
33 A data flow obeys TBF with rate R and depth B, if for any
34 time interval t_i...t_f the number of transmitted bits
35 does not exceed B + R*(t_f-t_i).
36
37 Packetized version of this definition:
38 The sequence of packets of sizes s_i served at moments t_i
39 obeys TBF, if for any i<=k:
40
41 s_i+....+s_k <= B + R*(t_k - t_i)
42
43 Algorithm.
44 ----------
45
46 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
47
48 N(t+delta) = min{B/R, N(t) + delta}
49
50 If the first packet in queue has length S, it may be
51 transmitted only at the time t_* when S/R <= N(t_*),
52 and in this case N(t) jumps:
53
54 N(t_* + 0) = N(t_* - 0) - S/R.
55
56
57
58 Actually, QoS requires two TBF to be applied to a data stream.
59 One of them controls steady state burst size, another
60 one with rate P (peak rate) and depth M (equal to link MTU)
61 limits bursts at a smaller time scale.
62
63 It is easy to see that P>R, and B>M. If P is infinity, this double
64 TBF is equivalent to a single one.
65
66 When TBF works in reshaping mode, latency is estimated as:
67
68 lat = max ((L-B)/R, (L-M)/P)
69
70
71 NOTES.
72 ------
73
74 If TBF throttles, it starts a watchdog timer, which will wake it up
75 when it is ready to transmit.
76 Note that the minimal timer resolution is 1/HZ.
77 If no new packets arrive during this period,
78 or if the device is not awaken by EOI for some previous packet,
79 TBF can stop its activity for 1/HZ.
80
81
82 This means, that with depth B, the maximal rate is
83
84 R_crit = B*HZ
85
86 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
87
88 Note that the peak rate TBF is much more tough: with MTU 1500
89 P_crit = 150Kbytes/sec. So, if you need greater peak
90 rates, use alpha with HZ=1000 :-)
91
92 With classful TBF, limit is just kept for backwards compatibility.
93 It is passed to the default bfifo qdisc - if the inner qdisc is
94 changed the limit is not effective anymore.
95*/
96
cc7ec456 97struct tbf_sched_data {
1da177e4
LT
98/* Parameters */
99 u32 limit; /* Maximal length of backlog: bytes */
a135e598 100 u32 max_size;
b757c933
JP
101 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
102 s64 mtu;
b757c933
JP
103 struct psched_ratecfg rate;
104 struct psched_ratecfg peak;
1da177e4
LT
105
106/* Variables */
b757c933
JP
107 s64 tokens; /* Current number of B tokens */
108 s64 ptokens; /* Current number of P tokens */
109 s64 t_c; /* Time check-point */
1da177e4 110 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
f7f593e3 111 struct qdisc_watchdog watchdog; /* Watchdog timer */
1da177e4
LT
112};
113
e43ac79a 114
cc106e44
YY
115/* Time to Length, convert time in ns to length in bytes
116 * to determinate how many bytes can be sent in given time.
117 */
118static u64 psched_ns_t2l(const struct psched_ratecfg *r,
119 u64 time_in_ns)
120{
121 /* The formula is :
122 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
123 */
124 u64 len = time_in_ns * r->rate_bytes_ps;
125
126 do_div(len, NSEC_PER_SEC);
127
d55d282e
YY
128 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
129 do_div(len, 53);
130 len = len * 48;
131 }
cc106e44
YY
132
133 if (len > r->overhead)
134 len -= r->overhead;
135 else
136 len = 0;
137
138 return len;
139}
140
ef6aadcc
PM
141static void tbf_offload_change(struct Qdisc *sch)
142{
143 struct tbf_sched_data *q = qdisc_priv(sch);
144 struct net_device *dev = qdisc_dev(sch);
145 struct tc_tbf_qopt_offload qopt;
146
147 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
148 return;
149
150 qopt.command = TC_TBF_REPLACE;
151 qopt.handle = sch->handle;
152 qopt.parent = sch->parent;
153 qopt.replace_params.rate = q->rate;
154 qopt.replace_params.max_size = q->max_size;
155 qopt.replace_params.qstats = &sch->qstats;
156
157 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TBF, &qopt);
158}
159
160static void tbf_offload_destroy(struct Qdisc *sch)
161{
162 struct net_device *dev = qdisc_dev(sch);
163 struct tc_tbf_qopt_offload qopt;
164
165 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
166 return;
167
168 qopt.command = TC_TBF_DESTROY;
169 qopt.handle = sch->handle;
170 qopt.parent = sch->parent;
171 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TBF, &qopt);
172}
173
174static int tbf_offload_dump(struct Qdisc *sch)
175{
176 struct tc_tbf_qopt_offload qopt;
177
178 qopt.command = TC_TBF_STATS;
179 qopt.handle = sch->handle;
180 qopt.parent = sch->parent;
181 qopt.stats.bstats = &sch->bstats;
182 qopt.stats.qstats = &sch->qstats;
183
184 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_TBF, &qopt);
185}
186
6b3efbfa
PM
187static void tbf_offload_graft(struct Qdisc *sch, struct Qdisc *new,
188 struct Qdisc *old, struct netlink_ext_ack *extack)
189{
190 struct tc_tbf_qopt_offload graft_offload = {
191 .handle = sch->handle,
192 .parent = sch->parent,
193 .child_handle = new->handle,
194 .command = TC_TBF_GRAFT,
195 };
196
197 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
198 TC_SETUP_QDISC_TBF, &graft_offload, extack);
199}
200
e43ac79a
ED
201/* GSO packet is too big, segment it so that tbf can transmit
202 * each segment in time
203 */
520ac30f
ED
204static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
205 struct sk_buff **to_free)
e43ac79a
ED
206{
207 struct tbf_sched_data *q = qdisc_priv(sch);
208 struct sk_buff *segs, *nskb;
209 netdev_features_t features = netif_skb_features(skb);
2ccccf5f 210 unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
e43ac79a
ED
211 int ret, nb;
212
213 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
214
215 if (IS_ERR_OR_NULL(segs))
520ac30f 216 return qdisc_drop(skb, sch, to_free);
e43ac79a
ED
217
218 nb = 0;
b950d8a5 219 skb_list_walk_safe(segs, segs, nskb) {
a8305bff 220 skb_mark_not_on_list(segs);
4d0820cf 221 qdisc_skb_cb(segs)->pkt_len = segs->len;
2ccccf5f 222 len += segs->len;
520ac30f 223 ret = qdisc_enqueue(segs, q->qdisc, to_free);
e43ac79a
ED
224 if (ret != NET_XMIT_SUCCESS) {
225 if (net_xmit_drop_count(ret))
25331d6c 226 qdisc_qstats_drop(sch);
e43ac79a
ED
227 } else {
228 nb++;
229 }
e43ac79a
ED
230 }
231 sch->q.qlen += nb;
232 if (nb > 1)
2ccccf5f 233 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
e43ac79a
ED
234 consume_skb(skb);
235 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
236}
237
520ac30f
ED
238static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
239 struct sk_buff **to_free)
1da177e4
LT
240{
241 struct tbf_sched_data *q = qdisc_priv(sch);
f6bab199 242 unsigned int len = qdisc_pkt_len(skb);
1da177e4
LT
243 int ret;
244
e43ac79a 245 if (qdisc_pkt_len(skb) > q->max_size) {
ee78bbef
DA
246 if (skb_is_gso(skb) &&
247 skb_gso_validate_mac_len(skb, q->max_size))
520ac30f
ED
248 return tbf_segment(skb, sch, to_free);
249 return qdisc_drop(skb, sch, to_free);
e43ac79a 250 }
520ac30f 251 ret = qdisc_enqueue(skb, q->qdisc, to_free);
9871e50e 252 if (ret != NET_XMIT_SUCCESS) {
378a2f09 253 if (net_xmit_drop_count(ret))
25331d6c 254 qdisc_qstats_drop(sch);
1da177e4
LT
255 return ret;
256 }
257
f6bab199 258 sch->qstats.backlog += len;
1da177e4 259 sch->q.qlen++;
9871e50e 260 return NET_XMIT_SUCCESS;
1da177e4
LT
261}
262
a135e598
HS
263static bool tbf_peak_present(const struct tbf_sched_data *q)
264{
265 return q->peak.rate_bytes_ps;
266}
267
cc7ec456 268static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
1da177e4
LT
269{
270 struct tbf_sched_data *q = qdisc_priv(sch);
271 struct sk_buff *skb;
272
03c05f0d 273 skb = q->qdisc->ops->peek(q->qdisc);
1da177e4
LT
274
275 if (skb) {
b757c933
JP
276 s64 now;
277 s64 toks;
278 s64 ptoks = 0;
0abf77e5 279 unsigned int len = qdisc_pkt_len(skb);
1da177e4 280
d2de875c 281 now = ktime_get_ns();
b757c933 282 toks = min_t(s64, now - q->t_c, q->buffer);
1da177e4 283
a135e598 284 if (tbf_peak_present(q)) {
1da177e4 285 ptoks = toks + q->ptokens;
b757c933 286 if (ptoks > q->mtu)
1da177e4 287 ptoks = q->mtu;
b757c933 288 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
1da177e4
LT
289 }
290 toks += q->tokens;
b757c933 291 if (toks > q->buffer)
1da177e4 292 toks = q->buffer;
b757c933 293 toks -= (s64) psched_l2t_ns(&q->rate, len);
1da177e4
LT
294
295 if ((toks|ptoks) >= 0) {
77be155c 296 skb = qdisc_dequeue_peeked(q->qdisc);
03c05f0d
JP
297 if (unlikely(!skb))
298 return NULL;
299
1da177e4
LT
300 q->t_c = now;
301 q->tokens = toks;
302 q->ptokens = ptoks;
8d5958f4 303 qdisc_qstats_backlog_dec(sch, skb);
1da177e4 304 sch->q.qlen--;
9190b3b3 305 qdisc_bstats_update(sch, skb);
1da177e4
LT
306 return skb;
307 }
308
b757c933 309 qdisc_watchdog_schedule_ns(&q->watchdog,
45f50bed 310 now + max_t(long, -toks, -ptoks));
1da177e4
LT
311
312 /* Maybe we have a shorter packet in the queue,
313 which can be sent now. It sounds cool,
314 but, however, this is wrong in principle.
315 We MUST NOT reorder packets under these circumstances.
316
317 Really, if we split the flow into independent
318 subflows, it would be a very good solution.
319 This is the main idea of all FQ algorithms
320 (cf. CSZ, HPFQ, HFSC)
321 */
322
25331d6c 323 qdisc_qstats_overlimit(sch);
1da177e4
LT
324 }
325 return NULL;
326}
327
cc7ec456 328static void tbf_reset(struct Qdisc *sch)
1da177e4
LT
329{
330 struct tbf_sched_data *q = qdisc_priv(sch);
331
332 qdisc_reset(q->qdisc);
d2de875c 333 q->t_c = ktime_get_ns();
1da177e4
LT
334 q->tokens = q->buffer;
335 q->ptokens = q->mtu;
f7f593e3 336 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
337}
338
27a3421e
PM
339static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
340 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
341 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
342 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
a33c4a26
YY
343 [TCA_TBF_RATE64] = { .type = NLA_U64 },
344 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
2e04ad42
YY
345 [TCA_TBF_BURST] = { .type = NLA_U32 },
346 [TCA_TBF_PBURST] = { .type = NLA_U32 },
27a3421e
PM
347};
348
2030721c
AA
349static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
350 struct netlink_ext_ack *extack)
1da177e4 351{
cee63723 352 int err;
1da177e4 353 struct tbf_sched_data *q = qdisc_priv(sch);
a33c4a26 354 struct nlattr *tb[TCA_TBF_MAX + 1];
1da177e4 355 struct tc_tbf_qopt *qopt;
1da177e4 356 struct Qdisc *child = NULL;
b05972f0 357 struct Qdisc *old = NULL;
cc106e44
YY
358 struct psched_ratecfg rate;
359 struct psched_ratecfg peak;
360 u64 max_size;
361 s64 buffer, mtu;
a33c4a26 362 u64 rate64 = 0, prate64 = 0;
1da177e4 363
8cb08174
JB
364 err = nla_parse_nested_deprecated(tb, TCA_TBF_MAX, opt, tbf_policy,
365 NULL);
cee63723
PM
366 if (err < 0)
367 return err;
368
369 err = -EINVAL;
27a3421e 370 if (tb[TCA_TBF_PARMS] == NULL)
1da177e4
LT
371 goto done;
372
1e90474c 373 qopt = nla_data(tb[TCA_TBF_PARMS]);
cc106e44
YY
374 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
375 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
e9bc3fa2
AA
376 tb[TCA_TBF_RTAB],
377 NULL));
1da177e4 378
cc106e44
YY
379 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
380 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
e9bc3fa2
AA
381 tb[TCA_TBF_PTAB],
382 NULL));
4d0820cf 383
cc106e44
YY
384 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
385 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
386
387 if (tb[TCA_TBF_RATE64])
388 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
389 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
390
2e04ad42
YY
391 if (tb[TCA_TBF_BURST]) {
392 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
393 buffer = psched_l2t_ns(&rate, max_size);
394 } else {
395 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
396 }
cc106e44
YY
397
398 if (qopt->peakrate.rate) {
399 if (tb[TCA_TBF_PRATE64])
400 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
401 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
402 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
403 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
2e04ad42 404 peak.rate_bytes_ps, rate.rate_bytes_ps);
cc106e44
YY
405 err = -EINVAL;
406 goto done;
407 }
408
2e04ad42
YY
409 if (tb[TCA_TBF_PBURST]) {
410 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
411 max_size = min_t(u32, max_size, pburst);
412 mtu = psched_l2t_ns(&peak, pburst);
413 } else {
414 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
415 }
a135e598
HS
416 } else {
417 memset(&peak, 0, sizeof(peak));
cc106e44
YY
418 }
419
420 if (max_size < psched_mtu(qdisc_dev(sch)))
421 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
422 max_size, qdisc_dev(sch)->name,
423 psched_mtu(qdisc_dev(sch)));
424
425 if (!max_size) {
426 err = -EINVAL;
427 goto done;
428 }
429
724b9e1d
HS
430 if (q->qdisc != &noop_qdisc) {
431 err = fifo_set_limit(q->qdisc, qopt->limit);
432 if (err)
433 goto done;
434 } else if (qopt->limit > 0) {
a38a9882
AA
435 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
436 extack);
724b9e1d
HS
437 if (IS_ERR(child)) {
438 err = PTR_ERR(child);
439 goto done;
440 }
44a63b13
PA
441
442 /* child is fifo, no need to check for noop_qdisc */
443 qdisc_hash_add(child, true);
724b9e1d
HS
444 }
445
1da177e4 446 sch_tree_lock(sch);
5e50da01 447 if (child) {
e5f0e8f8 448 qdisc_tree_flush_backlog(q->qdisc);
b05972f0 449 old = q->qdisc;
b94c8afc 450 q->qdisc = child;
5e50da01 451 }
1da177e4 452 q->limit = qopt->limit;
2e04ad42
YY
453 if (tb[TCA_TBF_PBURST])
454 q->mtu = mtu;
455 else
456 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
1da177e4 457 q->max_size = max_size;
2e04ad42
YY
458 if (tb[TCA_TBF_BURST])
459 q->buffer = buffer;
460 else
461 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
1da177e4
LT
462 q->tokens = q->buffer;
463 q->ptokens = q->mtu;
b94c8afc 464
cc106e44 465 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
a135e598 466 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
b94c8afc 467
1da177e4 468 sch_tree_unlock(sch);
b05972f0 469 qdisc_put(old);
1da177e4 470 err = 0;
ef6aadcc
PM
471
472 tbf_offload_change(sch);
1da177e4 473done:
1da177e4
LT
474 return err;
475}
476
e63d7dfd
AA
477static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
478 struct netlink_ext_ack *extack)
1da177e4
LT
479{
480 struct tbf_sched_data *q = qdisc_priv(sch);
481
c2d6511e
NA
482 qdisc_watchdog_init(&q->watchdog, sch);
483 q->qdisc = &noop_qdisc;
484
ac8ef4ab 485 if (!opt)
1da177e4
LT
486 return -EINVAL;
487
d2de875c 488 q->t_c = ktime_get_ns();
1da177e4 489
2030721c 490 return tbf_change(sch, opt, extack);
1da177e4
LT
491}
492
493static void tbf_destroy(struct Qdisc *sch)
494{
495 struct tbf_sched_data *q = qdisc_priv(sch);
496
f7f593e3 497 qdisc_watchdog_cancel(&q->watchdog);
ef6aadcc 498 tbf_offload_destroy(sch);
86bd446b 499 qdisc_put(q->qdisc);
1da177e4
LT
500}
501
502static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
503{
504 struct tbf_sched_data *q = qdisc_priv(sch);
4b3550ef 505 struct nlattr *nest;
1da177e4 506 struct tc_tbf_qopt opt;
ef6aadcc
PM
507 int err;
508
509 err = tbf_offload_dump(sch);
510 if (err)
511 return err;
1da177e4 512
ae0be8de 513 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
4b3550ef
PM
514 if (nest == NULL)
515 goto nla_put_failure;
1da177e4
LT
516
517 opt.limit = q->limit;
01cb71d2 518 psched_ratecfg_getrate(&opt.rate, &q->rate);
a135e598 519 if (tbf_peak_present(q))
01cb71d2 520 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
1da177e4
LT
521 else
522 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
b757c933
JP
523 opt.mtu = PSCHED_NS2TICKS(q->mtu);
524 opt.buffer = PSCHED_NS2TICKS(q->buffer);
1b34ec43
DM
525 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
526 goto nla_put_failure;
a33c4a26 527 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
2a51c1e8
ND
528 nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
529 TCA_TBF_PAD))
a33c4a26 530 goto nla_put_failure;
a135e598 531 if (tbf_peak_present(q) &&
a33c4a26 532 q->peak.rate_bytes_ps >= (1ULL << 32) &&
2a51c1e8
ND
533 nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
534 TCA_TBF_PAD))
a33c4a26 535 goto nla_put_failure;
1da177e4 536
d59b7d80 537 return nla_nest_end(skb, nest);
1da177e4 538
1e90474c 539nla_put_failure:
4b3550ef 540 nla_nest_cancel(skb, nest);
1da177e4
LT
541 return -1;
542}
543
544static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
545 struct sk_buff *skb, struct tcmsg *tcm)
546{
547 struct tbf_sched_data *q = qdisc_priv(sch);
548
1da177e4
LT
549 tcm->tcm_handle |= TC_H_MIN(1);
550 tcm->tcm_info = q->qdisc->handle;
551
552 return 0;
553}
554
555static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
653d6fd6 556 struct Qdisc **old, struct netlink_ext_ack *extack)
1da177e4
LT
557{
558 struct tbf_sched_data *q = qdisc_priv(sch);
559
560 if (new == NULL)
561 new = &noop_qdisc;
562
86a7996c 563 *old = qdisc_replace(sch, new, &q->qdisc);
6b3efbfa
PM
564
565 tbf_offload_graft(sch, new, *old, extack);
1da177e4
LT
566 return 0;
567}
568
569static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
570{
571 struct tbf_sched_data *q = qdisc_priv(sch);
572 return q->qdisc;
573}
574
143976ce 575static unsigned long tbf_find(struct Qdisc *sch, u32 classid)
1da177e4
LT
576{
577 return 1;
578}
579
1da177e4
LT
580static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
581{
582 if (!walker->stop) {
e046fa89 583 tc_qdisc_stats_dump(sch, 1, walker);
1da177e4
LT
584 }
585}
586
cc7ec456 587static const struct Qdisc_class_ops tbf_class_ops = {
1da177e4
LT
588 .graft = tbf_graft,
589 .leaf = tbf_leaf,
143976ce 590 .find = tbf_find,
1da177e4 591 .walk = tbf_walk,
1da177e4
LT
592 .dump = tbf_dump_class,
593};
594
20fea08b 595static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
1da177e4
LT
596 .next = NULL,
597 .cl_ops = &tbf_class_ops,
598 .id = "tbf",
599 .priv_size = sizeof(struct tbf_sched_data),
600 .enqueue = tbf_enqueue,
601 .dequeue = tbf_dequeue,
77be155c 602 .peek = qdisc_peek_dequeued,
1da177e4
LT
603 .init = tbf_init,
604 .reset = tbf_reset,
605 .destroy = tbf_destroy,
606 .change = tbf_change,
607 .dump = tbf_dump,
608 .owner = THIS_MODULE,
609};
610
611static int __init tbf_module_init(void)
612{
613 return register_qdisc(&tbf_qdisc_ops);
614}
615
616static void __exit tbf_module_exit(void)
617{
618 unregister_qdisc(&tbf_qdisc_ops);
619}
620module_init(tbf_module_init)
621module_exit(tbf_module_exit)
622MODULE_LICENSE("GPL");