Merge branch 'net-tls-small-code-cleanup'
[linux-2.6-block.git] / net / sched / sch_etf.c
CommitLineData
25db26a9
VCG
1// SPDX-License-Identifier: GPL-2.0
2
3/* net/sched/sch_etf.c Earliest TxTime First queueing discipline.
4 *
5 * Authors: Jesus Sanchez-Palencia <jesus.sanchez-palencia@intel.com>
6 * Vinicius Costa Gomes <vinicius.gomes@intel.com>
7 */
8
9#include <linux/module.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/errno.h>
4b15c707 14#include <linux/errqueue.h>
25db26a9
VCG
15#include <linux/rbtree.h>
16#include <linux/skbuff.h>
17#include <linux/posix-timers.h>
18#include <net/netlink.h>
19#include <net/sch_generic.h>
20#include <net/pkt_sched.h>
21#include <net/sock.h>
22
23#define DEADLINE_MODE_IS_ON(x) ((x)->flags & TC_ETF_DEADLINE_MODE_ON)
88cab771 24#define OFFLOAD_IS_ON(x) ((x)->flags & TC_ETF_OFFLOAD_ON)
25db26a9
VCG
25
26struct etf_sched_data {
88cab771 27 bool offload;
25db26a9
VCG
28 bool deadline_mode;
29 int clockid;
30 int queue;
31 s32 delta; /* in ns */
32 ktime_t last; /* The txtime of the last skb sent to the netdevice. */
09fd4860 33 struct rb_root_cached head;
25db26a9
VCG
34 struct qdisc_watchdog watchdog;
35 ktime_t (*get_time)(void);
36};
37
38static const struct nla_policy etf_policy[TCA_ETF_MAX + 1] = {
39 [TCA_ETF_PARMS] = { .len = sizeof(struct tc_etf_qopt) },
40};
41
42static inline int validate_input_params(struct tc_etf_qopt *qopt,
43 struct netlink_ext_ack *extack)
44{
45 /* Check if params comply to the following rules:
46 * * Clockid and delta must be valid.
47 *
48 * * Dynamic clockids are not supported.
49 *
50 * * Delta must be a positive integer.
88cab771
JSP
51 *
52 * Also note that for the HW offload case, we must
53 * expect that system clocks have been synchronized to PHC.
25db26a9
VCG
54 */
55 if (qopt->clockid < 0) {
56 NL_SET_ERR_MSG(extack, "Dynamic clockids are not supported");
57 return -ENOTSUPP;
58 }
59
60 if (qopt->clockid != CLOCK_TAI) {
61 NL_SET_ERR_MSG(extack, "Invalid clockid. CLOCK_TAI must be used");
62 return -EINVAL;
63 }
64
65 if (qopt->delta < 0) {
66 NL_SET_ERR_MSG(extack, "Delta must be positive");
67 return -EINVAL;
68 }
69
70 return 0;
71}
72
73static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb)
74{
75 struct etf_sched_data *q = qdisc_priv(sch);
76 ktime_t txtime = nskb->tstamp;
77 struct sock *sk = nskb->sk;
78 ktime_t now;
79
80 if (!sk)
81 return false;
82
83 if (!sock_flag(sk, SOCK_TXTIME))
84 return false;
85
86 /* We don't perform crosstimestamping.
87 * Drop if packet's clockid differs from qdisc's.
88 */
89 if (sk->sk_clockid != q->clockid)
90 return false;
91
92 if (sk->sk_txtime_deadline_mode != q->deadline_mode)
93 return false;
94
95 now = q->get_time();
96 if (ktime_before(txtime, now) || ktime_before(txtime, q->last))
97 return false;
98
99 return true;
100}
101
102static struct sk_buff *etf_peek_timesortedlist(struct Qdisc *sch)
103{
104 struct etf_sched_data *q = qdisc_priv(sch);
105 struct rb_node *p;
106
09fd4860 107 p = rb_first_cached(&q->head);
25db26a9
VCG
108 if (!p)
109 return NULL;
110
111 return rb_to_skb(p);
112}
113
114static void reset_watchdog(struct Qdisc *sch)
115{
116 struct etf_sched_data *q = qdisc_priv(sch);
117 struct sk_buff *skb = etf_peek_timesortedlist(sch);
118 ktime_t next;
119
3fcbdaee
JSP
120 if (!skb) {
121 qdisc_watchdog_cancel(&q->watchdog);
25db26a9 122 return;
3fcbdaee 123 }
25db26a9
VCG
124
125 next = ktime_sub_ns(skb->tstamp, q->delta);
126 qdisc_watchdog_schedule_ns(&q->watchdog, ktime_to_ns(next));
127}
128
4b15c707
JSP
129static void report_sock_error(struct sk_buff *skb, u32 err, u8 code)
130{
131 struct sock_exterr_skb *serr;
132 struct sk_buff *clone;
133 ktime_t txtime = skb->tstamp;
134
135 if (!skb->sk || !(skb->sk->sk_txtime_report_errors))
136 return;
137
138 clone = skb_clone(skb, GFP_ATOMIC);
139 if (!clone)
140 return;
141
142 serr = SKB_EXT_ERR(clone);
143 serr->ee.ee_errno = err;
144 serr->ee.ee_origin = SO_EE_ORIGIN_TXTIME;
145 serr->ee.ee_type = 0;
146 serr->ee.ee_code = code;
147 serr->ee.ee_pad = 0;
148 serr->ee.ee_data = (txtime >> 32); /* high part of tstamp */
149 serr->ee.ee_info = txtime; /* low part of tstamp */
150
151 if (sock_queue_err_skb(skb->sk, clone))
152 kfree_skb(clone);
153}
154
25db26a9
VCG
155static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
156 struct sk_buff **to_free)
157{
158 struct etf_sched_data *q = qdisc_priv(sch);
09fd4860 159 struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL;
25db26a9 160 ktime_t txtime = nskb->tstamp;
09fd4860 161 bool leftmost = true;
25db26a9 162
4b15c707
JSP
163 if (!is_packet_valid(sch, nskb)) {
164 report_sock_error(nskb, EINVAL,
165 SO_EE_CODE_TXTIME_INVALID_PARAM);
25db26a9 166 return qdisc_drop(nskb, sch, to_free);
4b15c707 167 }
25db26a9
VCG
168
169 while (*p) {
170 struct sk_buff *skb;
171
172 parent = *p;
173 skb = rb_to_skb(parent);
09fd4860 174 if (ktime_after(txtime, skb->tstamp)) {
25db26a9 175 p = &parent->rb_right;
09fd4860
JSP
176 leftmost = false;
177 } else {
25db26a9 178 p = &parent->rb_left;
09fd4860 179 }
25db26a9
VCG
180 }
181 rb_link_node(&nskb->rbnode, parent, p);
09fd4860 182 rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost);
25db26a9
VCG
183
184 qdisc_qstats_backlog_inc(sch, nskb);
185 sch->q.qlen++;
186
187 /* Now we may need to re-arm the qdisc watchdog for the next packet. */
188 reset_watchdog(sch);
189
190 return NET_XMIT_SUCCESS;
191}
192
37342bda
JSP
193static void timesortedlist_drop(struct Qdisc *sch, struct sk_buff *skb,
194 ktime_t now)
25db26a9
VCG
195{
196 struct etf_sched_data *q = qdisc_priv(sch);
cbeeb8ef 197 struct sk_buff *to_free = NULL;
37342bda 198 struct sk_buff *tmp = NULL;
25db26a9 199
37342bda
JSP
200 skb_rbtree_walk_from_safe(skb, tmp) {
201 if (ktime_after(skb->tstamp, now))
202 break;
25db26a9 203
37342bda 204 rb_erase_cached(&skb->rbnode, &q->head);
25db26a9 205
37342bda
JSP
206 /* The rbnode field in the skb re-uses these fields, now that
207 * we are done with the rbnode, reset them.
208 */
209 skb->next = NULL;
210 skb->prev = NULL;
211 skb->dev = qdisc_dev(sch);
25db26a9 212
37342bda 213 report_sock_error(skb, ECANCELED, SO_EE_CODE_TXTIME_MISSED);
25db26a9 214
37342bda
JSP
215 qdisc_qstats_backlog_dec(sch, skb);
216 qdisc_drop(skb, sch, &to_free);
217 qdisc_qstats_overlimit(sch);
218 sch->q.qlen--;
219 }
4b15c707 220
37342bda 221 kfree_skb_list(to_free);
cbeeb8ef 222}
25db26a9 223
cbeeb8ef
JSP
224static void timesortedlist_remove(struct Qdisc *sch, struct sk_buff *skb)
225{
226 struct etf_sched_data *q = qdisc_priv(sch);
227
228 rb_erase_cached(&skb->rbnode, &q->head);
229
230 /* The rbnode field in the skb re-uses these fields, now that
231 * we are done with the rbnode, reset them.
232 */
233 skb->next = NULL;
234 skb->prev = NULL;
235 skb->dev = qdisc_dev(sch);
236
237 qdisc_qstats_backlog_dec(sch, skb);
238
239 qdisc_bstats_update(sch, skb);
240
241 q->last = skb->tstamp;
25db26a9
VCG
242
243 sch->q.qlen--;
244}
245
246static struct sk_buff *etf_dequeue_timesortedlist(struct Qdisc *sch)
247{
248 struct etf_sched_data *q = qdisc_priv(sch);
249 struct sk_buff *skb;
250 ktime_t now, next;
251
252 skb = etf_peek_timesortedlist(sch);
253 if (!skb)
254 return NULL;
255
256 now = q->get_time();
257
258 /* Drop if packet has expired while in queue. */
25db26a9 259 if (ktime_before(skb->tstamp, now)) {
37342bda 260 timesortedlist_drop(sch, skb, now);
25db26a9
VCG
261 skb = NULL;
262 goto out;
263 }
264
265 /* When in deadline mode, dequeue as soon as possible and change the
266 * txtime from deadline to (now + delta).
267 */
268 if (q->deadline_mode) {
cbeeb8ef 269 timesortedlist_remove(sch, skb);
25db26a9
VCG
270 skb->tstamp = now;
271 goto out;
272 }
273
274 next = ktime_sub_ns(skb->tstamp, q->delta);
275
276 /* Dequeue only if now is within the [txtime - delta, txtime] range. */
277 if (ktime_after(now, next))
cbeeb8ef 278 timesortedlist_remove(sch, skb);
25db26a9
VCG
279 else
280 skb = NULL;
281
282out:
283 /* Now we may need to re-arm the qdisc watchdog for the next packet. */
284 reset_watchdog(sch);
285
286 return skb;
287}
288
88cab771
JSP
289static void etf_disable_offload(struct net_device *dev,
290 struct etf_sched_data *q)
291{
292 struct tc_etf_qopt_offload etf = { };
293 const struct net_device_ops *ops;
294 int err;
295
296 if (!q->offload)
297 return;
298
299 ops = dev->netdev_ops;
300 if (!ops->ndo_setup_tc)
301 return;
302
303 etf.queue = q->queue;
304 etf.enable = 0;
305
306 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETF, &etf);
307 if (err < 0)
308 pr_warn("Couldn't disable ETF offload for queue %d\n",
309 etf.queue);
310}
311
312static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q,
313 struct netlink_ext_ack *extack)
314{
315 const struct net_device_ops *ops = dev->netdev_ops;
316 struct tc_etf_qopt_offload etf = { };
317 int err;
318
319 if (q->offload)
320 return 0;
321
322 if (!ops->ndo_setup_tc) {
323 NL_SET_ERR_MSG(extack, "Specified device does not support ETF offload");
324 return -EOPNOTSUPP;
325 }
326
327 etf.queue = q->queue;
328 etf.enable = 1;
329
330 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETF, &etf);
331 if (err < 0) {
332 NL_SET_ERR_MSG(extack, "Specified device failed to setup ETF hardware offload");
333 return err;
334 }
335
336 return 0;
337}
338
25db26a9
VCG
339static int etf_init(struct Qdisc *sch, struct nlattr *opt,
340 struct netlink_ext_ack *extack)
341{
342 struct etf_sched_data *q = qdisc_priv(sch);
343 struct net_device *dev = qdisc_dev(sch);
344 struct nlattr *tb[TCA_ETF_MAX + 1];
345 struct tc_etf_qopt *qopt;
346 int err;
347
348 if (!opt) {
349 NL_SET_ERR_MSG(extack,
350 "Missing ETF qdisc options which are mandatory");
351 return -EINVAL;
352 }
353
354 err = nla_parse_nested(tb, TCA_ETF_MAX, opt, etf_policy, extack);
355 if (err < 0)
356 return err;
357
358 if (!tb[TCA_ETF_PARMS]) {
359 NL_SET_ERR_MSG(extack, "Missing mandatory ETF parameters");
360 return -EINVAL;
361 }
362
363 qopt = nla_data(tb[TCA_ETF_PARMS]);
364
88cab771 365 pr_debug("delta %d clockid %d offload %s deadline %s\n",
25db26a9 366 qopt->delta, qopt->clockid,
88cab771 367 OFFLOAD_IS_ON(qopt) ? "on" : "off",
25db26a9
VCG
368 DEADLINE_MODE_IS_ON(qopt) ? "on" : "off");
369
370 err = validate_input_params(qopt, extack);
371 if (err < 0)
372 return err;
373
374 q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
375
88cab771
JSP
376 if (OFFLOAD_IS_ON(qopt)) {
377 err = etf_enable_offload(dev, q, extack);
378 if (err < 0)
379 return err;
380 }
381
25db26a9
VCG
382 /* Everything went OK, save the parameters used. */
383 q->delta = qopt->delta;
384 q->clockid = qopt->clockid;
88cab771 385 q->offload = OFFLOAD_IS_ON(qopt);
25db26a9
VCG
386 q->deadline_mode = DEADLINE_MODE_IS_ON(qopt);
387
388 switch (q->clockid) {
389 case CLOCK_REALTIME:
390 q->get_time = ktime_get_real;
391 break;
392 case CLOCK_MONOTONIC:
393 q->get_time = ktime_get;
394 break;
395 case CLOCK_BOOTTIME:
396 q->get_time = ktime_get_boottime;
397 break;
398 case CLOCK_TAI:
399 q->get_time = ktime_get_clocktai;
400 break;
401 default:
402 NL_SET_ERR_MSG(extack, "Clockid is not supported");
403 return -ENOTSUPP;
404 }
405
406 qdisc_watchdog_init_clockid(&q->watchdog, sch, q->clockid);
407
408 return 0;
409}
410
411static void timesortedlist_clear(struct Qdisc *sch)
412{
413 struct etf_sched_data *q = qdisc_priv(sch);
09fd4860 414 struct rb_node *p = rb_first_cached(&q->head);
25db26a9
VCG
415
416 while (p) {
417 struct sk_buff *skb = rb_to_skb(p);
418
419 p = rb_next(p);
420
09fd4860 421 rb_erase_cached(&skb->rbnode, &q->head);
25db26a9
VCG
422 rtnl_kfree_skbs(skb, skb);
423 sch->q.qlen--;
424 }
425}
426
427static void etf_reset(struct Qdisc *sch)
428{
429 struct etf_sched_data *q = qdisc_priv(sch);
430
431 /* Only cancel watchdog if it's been initialized. */
432 if (q->watchdog.qdisc == sch)
433 qdisc_watchdog_cancel(&q->watchdog);
434
435 /* No matter which mode we are on, it's safe to clear both lists. */
436 timesortedlist_clear(sch);
437 __qdisc_reset_queue(&sch->q);
438
439 sch->qstats.backlog = 0;
440 sch->q.qlen = 0;
441
442 q->last = 0;
443}
444
445static void etf_destroy(struct Qdisc *sch)
446{
447 struct etf_sched_data *q = qdisc_priv(sch);
88cab771 448 struct net_device *dev = qdisc_dev(sch);
25db26a9
VCG
449
450 /* Only cancel watchdog if it's been initialized. */
451 if (q->watchdog.qdisc == sch)
452 qdisc_watchdog_cancel(&q->watchdog);
88cab771
JSP
453
454 etf_disable_offload(dev, q);
25db26a9
VCG
455}
456
457static int etf_dump(struct Qdisc *sch, struct sk_buff *skb)
458{
459 struct etf_sched_data *q = qdisc_priv(sch);
460 struct tc_etf_qopt opt = { };
461 struct nlattr *nest;
462
463 nest = nla_nest_start(skb, TCA_OPTIONS);
464 if (!nest)
465 goto nla_put_failure;
466
467 opt.delta = q->delta;
468 opt.clockid = q->clockid;
88cab771
JSP
469 if (q->offload)
470 opt.flags |= TC_ETF_OFFLOAD_ON;
471
25db26a9
VCG
472 if (q->deadline_mode)
473 opt.flags |= TC_ETF_DEADLINE_MODE_ON;
474
475 if (nla_put(skb, TCA_ETF_PARMS, sizeof(opt), &opt))
476 goto nla_put_failure;
477
478 return nla_nest_end(skb, nest);
479
480nla_put_failure:
481 nla_nest_cancel(skb, nest);
482 return -1;
483}
484
485static struct Qdisc_ops etf_qdisc_ops __read_mostly = {
486 .id = "etf",
487 .priv_size = sizeof(struct etf_sched_data),
488 .enqueue = etf_enqueue_timesortedlist,
489 .dequeue = etf_dequeue_timesortedlist,
490 .peek = etf_peek_timesortedlist,
491 .init = etf_init,
492 .reset = etf_reset,
493 .destroy = etf_destroy,
494 .dump = etf_dump,
495 .owner = THIS_MODULE,
496};
497
498static int __init etf_module_init(void)
499{
500 return register_qdisc(&etf_qdisc_ops);
501}
502
503static void __exit etf_module_exit(void)
504{
505 unregister_qdisc(&etf_qdisc_ops);
506}
507module_init(etf_module_init)
508module_exit(etf_module_exit)
509MODULE_LICENSE("GPL");