net/sched: pass netlink extack to mqprio and taprio offload
[linux-block.git] / include / net / pkt_sched.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef __NET_PKT_SCHED_H
3#define __NET_PKT_SCHED_H
4
538e43a4 5#include <linux/jiffies.h>
641b9e0e 6#include <linux/ktime.h>
d8b9605d 7#include <linux/if_vlan.h>
855319be 8#include <linux/netdevice.h>
1da177e4 9#include <net/sch_generic.h>
855319be 10#include <net/net_namespace.h>
861932ec 11#include <uapi/linux/pkt_sched.h>
1da177e4 12
d0a81f67 13#define DEFAULT_TX_QUEUE_LEN 1000
b193e15a 14#define STAB_SIZE_LOG_MAX 30
d0a81f67 15
fd2c3ef7 16struct qdisc_walker {
1da177e4
LT
17 int stop;
18 int skip;
19 int count;
20 int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
21};
22
1da177e4
LT
23static inline void *qdisc_priv(struct Qdisc *q)
24{
846e463a 25 return &q->privdata;
1da177e4
LT
26}
27
6e1978a9
AP
28static inline struct Qdisc *qdisc_from_priv(void *priv)
29{
30 return container_of(priv, struct Qdisc, privdata);
31}
32
1da177e4
LT
33/*
34 Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth
35
36 Normal IP packet size ~ 512byte, hence:
37
38 0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for
39 10Mbit ethernet.
40
41 10msec resolution -> <50Kbit/sec.
42
43 The result: [34]86 is not good choice for QoS router :-(
44
25985edc 45 The things are not so bad, because we may use artificial
1da177e4
LT
46 clock evaluated by integration of network data flow
47 in the most critical places.
1da177e4
LT
48 */
49
1da177e4
LT
50typedef u64 psched_time_t;
51typedef long psched_tdiff_t;
52
a4a710c4
JP
53/* Avoid doing 64 bit divide */
54#define PSCHED_SHIFT 6
ca44d6e6
JP
55#define PSCHED_TICKS2NS(x) ((s64)(x) << PSCHED_SHIFT)
56#define PSCHED_NS2TICKS(x) ((x) >> PSCHED_SHIFT)
1da177e4 57
ca44d6e6 58#define PSCHED_TICKS_PER_SEC PSCHED_NS2TICKS(NSEC_PER_SEC)
a084980d 59#define PSCHED_PASTPERFECT 0
1da177e4 60
3bebcda2
PM
61static inline psched_time_t psched_get_time(void)
62{
d2de875c 63 return PSCHED_NS2TICKS(ktime_get_ns());
3bebcda2
PM
64}
65
4179477f
PM
66struct qdisc_watchdog {
67 struct hrtimer timer;
68 struct Qdisc *qdisc;
69};
70
860b642b
VCG
71void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
72 clockid_t clockid);
5c15257f 73void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
efe074c2
ED
74
75void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
76 u64 delta_ns);
77
78static inline void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd,
79 u64 expires)
80{
81 return qdisc_watchdog_schedule_range_ns(wd, expires, 0ULL);
82}
34c5d292
JP
83
84static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
85 psched_time_t expires)
86{
45f50bed 87 qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
34c5d292
JP
88}
89
5c15257f 90void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
4179477f 91
1da177e4
LT
92extern struct Qdisc_ops pfifo_qdisc_ops;
93extern struct Qdisc_ops bfifo_qdisc_ops;
57dbb2d8 94extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
1da177e4 95
5c15257f
JP
96int fifo_set_limit(struct Qdisc *q, unsigned int limit);
97struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
a38a9882
AA
98 unsigned int limit,
99 struct netlink_ext_ack *extack);
5c15257f
JP
100
101int register_qdisc(struct Qdisc_ops *qops);
52327d2e 102void unregister_qdisc(struct Qdisc_ops *qops);
6da7c8fc 103void qdisc_get_default(char *id, size_t len);
104int qdisc_set_default(const char *id);
105
49b49971 106void qdisc_hash_add(struct Qdisc *q, bool invisible);
59cc1f61 107void qdisc_hash_del(struct Qdisc *q);
5c15257f 108struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
3a7d0d07 109struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle);
5c15257f 110struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
e9bc3fa2
AA
111 struct nlattr *tab,
112 struct netlink_ext_ack *extack);
5c15257f
JP
113void qdisc_put_rtab(struct qdisc_rate_table *tab);
114void qdisc_put_stab(struct qdisc_size_table *tab);
6e765a00 115void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
29b86cda
JF
116bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
117 struct net_device *dev, struct netdev_queue *txq,
118 spinlock_t *root_lock, bool validate);
5c15257f
JP
119
120void __qdisc_run(struct Qdisc *q);
1da177e4 121
37437bb2 122static inline void qdisc_run(struct Qdisc *q)
1da177e4 123{
6c148184 124 if (qdisc_run_begin(q)) {
102b55ee 125 __qdisc_run(q);
6c148184
JF
126 qdisc_run_end(q);
127 }
d8b9605d
JP
128}
129
1da177e4
LT
130/* Calculate maximal size of packet seen by hard_start_xmit
131 routine of this device.
132 */
95c96174 133static inline unsigned int psched_mtu(const struct net_device *dev)
1da177e4 134{
3b04ddde 135 return dev->mtu + dev->hard_header_len;
1da177e4
LT
136}
137
855319be
JP
138static inline struct net *qdisc_net(struct Qdisc *q)
139{
140 return dev_net(q->dev_queue->dev);
141}
142
aac4daa8
VO
143struct tc_query_caps_base {
144 enum tc_setup_type type;
145 void *caps;
146};
147
3d0bd028
VCG
148struct tc_cbs_qopt_offload {
149 u8 enable;
150 s32 queue;
151 s32 hicredit;
152 s32 locredit;
153 s32 idleslope;
154 s32 sendslope;
155};
156
88cab771
JSP
157struct tc_etf_qopt_offload {
158 u8 enable;
159 s32 queue;
160};
161
19278d76
VO
162struct tc_mqprio_caps {
163 bool validate_queue_counts:1;
164};
165
9adafe2b
VO
166struct tc_mqprio_qopt_offload {
167 /* struct tc_mqprio_qopt must always be the first element */
168 struct tc_mqprio_qopt qopt;
c54876cd 169 struct netlink_ext_ack *extack;
9adafe2b
VO
170 u16 mode;
171 u16 shaper;
172 u32 flags;
173 u64 min_rate[TC_QOPT_MAX_QUEUE];
174 u64 max_rate[TC_QOPT_MAX_QUEUE];
175};
176
a54fc09e
VO
177struct tc_taprio_caps {
178 bool supports_queue_max_sdu:1;
522d15ea 179 bool gate_mask_per_txq:1;
2f530df7
VO
180 /* Device expects lower TXQ numbers to have higher priority over higher
181 * TXQs, regardless of their TC mapping. DO NOT USE FOR NEW DRIVERS,
182 * INSTEAD ENFORCE A PROPER TC:TXQ MAPPING COMING FROM USER SPACE.
183 */
184 bool broken_mqprio:1;
a54fc09e
VO
185};
186
9c66d156
VCG
187struct tc_taprio_sched_entry {
188 u8 command; /* TC_TAPRIO_CMD_* */
189
190 /* The gate_mask in the offloading side refers to traffic classes */
191 u32 gate_mask;
192 u32 interval;
193};
194
195struct tc_taprio_qopt_offload {
09c794c0 196 struct tc_mqprio_qopt_offload mqprio;
c54876cd 197 struct netlink_ext_ack *extack;
9c66d156
VCG
198 u8 enable;
199 ktime_t base_time;
200 u64 cycle_time;
201 u64 cycle_time_extension;
a54fc09e 202 u32 max_sdu[TC_MAX_QUEUE];
9c66d156
VCG
203
204 size_t num_entries;
b90feaff 205 struct tc_taprio_sched_entry entries[];
9c66d156
VCG
206};
207
d7be266a
VO
208#if IS_ENABLED(CONFIG_NET_SCH_TAPRIO)
209
9c66d156
VCG
210/* Reference counting */
211struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
212 *offload);
213void taprio_offload_free(struct tc_taprio_qopt_offload *offload);
214
d7be266a
VO
215#else
216
217/* Reference counting */
218static inline struct tc_taprio_qopt_offload *
219taprio_offload_get(struct tc_taprio_qopt_offload *offload)
220{
221 return NULL;
222}
223
224static inline void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
225{
226}
227
228#endif
229
847cbfc0
VO
230/* Ensure skb_mstamp_ns, which might have been populated with the txtime, is
231 * not mistaken for a software timestamp, because this will otherwise prevent
232 * the dispatch of hardware timestamps to the socket.
233 */
234static inline void skb_txtime_consumed(struct sk_buff *skb)
235{
236 skb->tstamp = ktime_set(0, 0);
237}
238
ec624fe7
PB
239struct tc_skb_cb {
240 struct qdisc_skb_cb qdisc_cb;
241
242 u16 mru;
6f022c2d
PB
243 u8 post_ct:1;
244 u8 post_ct_snat:1;
245 u8 post_ct_dnat:1;
38495958 246 u16 zone; /* Only valid if post_ct = true */
ec624fe7
PB
247};
248
249static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
250{
251 struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
252
253 BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
254 return cb;
255}
256
d7a68e56
ZS
257static inline bool tc_qdisc_stats_dump(struct Qdisc *sch,
258 unsigned long cl,
259 struct qdisc_walker *arg)
260{
261 if (arg->count >= arg->skip && arg->fn(sch, cl, arg) < 0) {
262 arg->stop = 1;
263 return false;
264 }
265
266 arg->count++;
267 return true;
268}
269
1da177e4 270#endif