Commit | Line | Data |
---|---|---|
9952f691 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
92651940 AD |
2 | /* |
3 | * Copyright (c) 2008, Intel Corporation. | |
4 | * | |
92651940 AD |
5 | * Author: Alexander Duyck <alexander.h.duyck@intel.com> |
6 | */ | |
7 | ||
8 | #include <linux/module.h> | |
5a0e3ad6 | 9 | #include <linux/slab.h> |
92651940 AD |
10 | #include <linux/types.h> |
11 | #include <linux/kernel.h> | |
12 | #include <linux/string.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/skbuff.h> | |
15 | #include <net/netlink.h> | |
16 | #include <net/pkt_sched.h> | |
cf1facda | 17 | #include <net/pkt_cls.h> |
92651940 AD |
18 | |
19 | struct multiq_sched_data { | |
20 | u16 bands; | |
21 | u16 max_bands; | |
22 | u16 curband; | |
25d8c0d5 | 23 | struct tcf_proto __rcu *filter_list; |
6529eaba | 24 | struct tcf_block *block; |
92651940 AD |
25 | struct Qdisc **queues; |
26 | }; | |
27 | ||
28 | ||
29 | static struct Qdisc * | |
30 | multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |
31 | { | |
32 | struct multiq_sched_data *q = qdisc_priv(sch); | |
33 | u32 band; | |
34 | struct tcf_result res; | |
25d8c0d5 | 35 | struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); |
92651940 AD |
36 | int err; |
37 | ||
38 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | |
3aa26055 | 39 | err = tcf_classify(skb, NULL, fl, &res, false); |
92651940 AD |
40 | #ifdef CONFIG_NET_CLS_ACT |
41 | switch (err) { | |
42 | case TC_ACT_STOLEN: | |
43 | case TC_ACT_QUEUED: | |
e25ea21f | 44 | case TC_ACT_TRAP: |
92651940 | 45 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; |
964201de | 46 | fallthrough; |
92651940 AD |
47 | case TC_ACT_SHOT: |
48 | return NULL; | |
49 | } | |
50 | #endif | |
51 | band = skb_get_queue_mapping(skb); | |
52 | ||
53 | if (band >= q->bands) | |
54 | return q->queues[0]; | |
55 | ||
56 | return q->queues[band]; | |
57 | } | |
58 | ||
59 | static int | |
ac5c66f2 | 60 | multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
520ac30f | 61 | struct sk_buff **to_free) |
92651940 AD |
62 | { |
63 | struct Qdisc *qdisc; | |
64 | int ret; | |
65 | ||
66 | qdisc = multiq_classify(skb, sch, &ret); | |
67 | #ifdef CONFIG_NET_CLS_ACT | |
68 | if (qdisc == NULL) { | |
69 | ||
70 | if (ret & __NET_XMIT_BYPASS) | |
25331d6c | 71 | qdisc_qstats_drop(sch); |
520ac30f | 72 | __qdisc_drop(skb, to_free); |
92651940 AD |
73 | return ret; |
74 | } | |
75 | #endif | |
76 | ||
ac5c66f2 | 77 | ret = qdisc_enqueue(skb, qdisc, to_free); |
92651940 | 78 | if (ret == NET_XMIT_SUCCESS) { |
92651940 AD |
79 | sch->q.qlen++; |
80 | return NET_XMIT_SUCCESS; | |
81 | } | |
82 | if (net_xmit_drop_count(ret)) | |
25331d6c | 83 | qdisc_qstats_drop(sch); |
92651940 AD |
84 | return ret; |
85 | } | |
86 | ||
92651940 AD |
87 | static struct sk_buff *multiq_dequeue(struct Qdisc *sch) |
88 | { | |
89 | struct multiq_sched_data *q = qdisc_priv(sch); | |
90 | struct Qdisc *qdisc; | |
91 | struct sk_buff *skb; | |
92 | int band; | |
93 | ||
94 | for (band = 0; band < q->bands; band++) { | |
95 | /* cycle through bands to ensure fairness */ | |
96 | q->curband++; | |
97 | if (q->curband >= q->bands) | |
98 | q->curband = 0; | |
99 | ||
100 | /* Check that target subqueue is available before | |
f30ab418 | 101 | * pulling an skb to avoid head-of-line blocking. |
92651940 | 102 | */ |
73466498 TH |
103 | if (!netif_xmit_stopped( |
104 | netdev_get_tx_queue(qdisc_dev(sch), q->curband))) { | |
92651940 AD |
105 | qdisc = q->queues[q->curband]; |
106 | skb = qdisc->dequeue(qdisc); | |
107 | if (skb) { | |
9190b3b3 | 108 | qdisc_bstats_update(sch, skb); |
92651940 AD |
109 | sch->q.qlen--; |
110 | return skb; | |
111 | } | |
112 | } | |
113 | } | |
114 | return NULL; | |
115 | ||
116 | } | |
117 | ||
8e3af978 JP |
118 | static struct sk_buff *multiq_peek(struct Qdisc *sch) |
119 | { | |
120 | struct multiq_sched_data *q = qdisc_priv(sch); | |
121 | unsigned int curband = q->curband; | |
122 | struct Qdisc *qdisc; | |
123 | struct sk_buff *skb; | |
124 | int band; | |
125 | ||
126 | for (band = 0; band < q->bands; band++) { | |
127 | /* cycle through bands to ensure fairness */ | |
128 | curband++; | |
129 | if (curband >= q->bands) | |
130 | curband = 0; | |
131 | ||
132 | /* Check that target subqueue is available before | |
f30ab418 | 133 | * pulling an skb to avoid head-of-line blocking. |
8e3af978 | 134 | */ |
73466498 TH |
135 | if (!netif_xmit_stopped( |
136 | netdev_get_tx_queue(qdisc_dev(sch), curband))) { | |
8e3af978 JP |
137 | qdisc = q->queues[curband]; |
138 | skb = qdisc->ops->peek(qdisc); | |
139 | if (skb) | |
140 | return skb; | |
141 | } | |
142 | } | |
143 | return NULL; | |
144 | ||
145 | } | |
146 | ||
92651940 AD |
147 | static void |
148 | multiq_reset(struct Qdisc *sch) | |
149 | { | |
150 | u16 band; | |
151 | struct multiq_sched_data *q = qdisc_priv(sch); | |
152 | ||
153 | for (band = 0; band < q->bands; band++) | |
154 | qdisc_reset(q->queues[band]); | |
92651940 AD |
155 | q->curband = 0; |
156 | } | |
157 | ||
158 | static void | |
159 | multiq_destroy(struct Qdisc *sch) | |
160 | { | |
161 | int band; | |
162 | struct multiq_sched_data *q = qdisc_priv(sch); | |
163 | ||
6529eaba | 164 | tcf_block_put(q->block); |
92651940 | 165 | for (band = 0; band < q->bands; band++) |
86bd446b | 166 | qdisc_put(q->queues[band]); |
92651940 AD |
167 | |
168 | kfree(q->queues); | |
169 | } | |
170 | ||
2030721c AA |
171 | static int multiq_tune(struct Qdisc *sch, struct nlattr *opt, |
172 | struct netlink_ext_ack *extack) | |
92651940 AD |
173 | { |
174 | struct multiq_sched_data *q = qdisc_priv(sch); | |
175 | struct tc_multiq_qopt *qopt; | |
c2999f7f VB |
176 | struct Qdisc **removed; |
177 | int i, n_removed = 0; | |
92651940 AD |
178 | |
179 | if (!netif_is_multiqueue(qdisc_dev(sch))) | |
149490f1 | 180 | return -EOPNOTSUPP; |
92651940 AD |
181 | if (nla_len(opt) < sizeof(*qopt)) |
182 | return -EINVAL; | |
183 | ||
184 | qopt = nla_data(opt); | |
185 | ||
186 | qopt->bands = qdisc_dev(sch)->real_num_tx_queues; | |
187 | ||
c2999f7f VB |
188 | removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands), |
189 | GFP_KERNEL); | |
190 | if (!removed) | |
191 | return -ENOMEM; | |
192 | ||
92651940 AD |
193 | sch_tree_lock(sch); |
194 | q->bands = qopt->bands; | |
195 | for (i = q->bands; i < q->max_bands; i++) { | |
f07d1501 | 196 | if (q->queues[i] != &noop_qdisc) { |
b94c8afc | 197 | struct Qdisc *child = q->queues[i]; |
e5f0e8f8 | 198 | |
b94c8afc | 199 | q->queues[i] = &noop_qdisc; |
c2999f7f VB |
200 | qdisc_purge_queue(child); |
201 | removed[n_removed++] = child; | |
92651940 AD |
202 | } |
203 | } | |
204 | ||
205 | sch_tree_unlock(sch); | |
206 | ||
c2999f7f VB |
207 | for (i = 0; i < n_removed; i++) |
208 | qdisc_put(removed[i]); | |
209 | kfree(removed); | |
210 | ||
92651940 AD |
211 | for (i = 0; i < q->bands; i++) { |
212 | if (q->queues[i] == &noop_qdisc) { | |
b94c8afc | 213 | struct Qdisc *child, *old; |
3511c913 | 214 | child = qdisc_create_dflt(sch->dev_queue, |
92651940 AD |
215 | &pfifo_qdisc_ops, |
216 | TC_H_MAKE(sch->handle, | |
a38a9882 | 217 | i + 1), extack); |
92651940 AD |
218 | if (child) { |
219 | sch_tree_lock(sch); | |
b94c8afc PM |
220 | old = q->queues[i]; |
221 | q->queues[i] = child; | |
49b49971 JK |
222 | if (child != &noop_qdisc) |
223 | qdisc_hash_add(child, true); | |
92651940 | 224 | |
c2999f7f VB |
225 | if (old != &noop_qdisc) |
226 | qdisc_purge_queue(old); | |
92651940 | 227 | sch_tree_unlock(sch); |
c2999f7f | 228 | qdisc_put(old); |
92651940 AD |
229 | } |
230 | } | |
231 | } | |
232 | return 0; | |
233 | } | |
234 | ||
e63d7dfd AA |
235 | static int multiq_init(struct Qdisc *sch, struct nlattr *opt, |
236 | struct netlink_ext_ack *extack) | |
92651940 AD |
237 | { |
238 | struct multiq_sched_data *q = qdisc_priv(sch); | |
f07d1501 | 239 | int i, err; |
92651940 AD |
240 | |
241 | q->queues = NULL; | |
242 | ||
ac8ef4ab | 243 | if (!opt) |
92651940 AD |
244 | return -EINVAL; |
245 | ||
8d1a77f9 | 246 | err = tcf_block_get(&q->block, &q->filter_list, sch, extack); |
6529eaba JP |
247 | if (err) |
248 | return err; | |
249 | ||
92651940 AD |
250 | q->max_bands = qdisc_dev(sch)->num_tx_queues; |
251 | ||
252 | q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL); | |
253 | if (!q->queues) | |
254 | return -ENOBUFS; | |
255 | for (i = 0; i < q->max_bands; i++) | |
256 | q->queues[i] = &noop_qdisc; | |
257 | ||
2030721c | 258 | return multiq_tune(sch, opt, extack); |
92651940 AD |
259 | } |
260 | ||
261 | static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) | |
262 | { | |
263 | struct multiq_sched_data *q = qdisc_priv(sch); | |
264 | unsigned char *b = skb_tail_pointer(skb); | |
265 | struct tc_multiq_qopt opt; | |
266 | ||
267 | opt.bands = q->bands; | |
268 | opt.max_bands = q->max_bands; | |
269 | ||
1b34ec43 DM |
270 | if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) |
271 | goto nla_put_failure; | |
92651940 AD |
272 | |
273 | return skb->len; | |
274 | ||
275 | nla_put_failure: | |
276 | nlmsg_trim(skb, b); | |
277 | return -1; | |
278 | } | |
279 | ||
280 | static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |
653d6fd6 | 281 | struct Qdisc **old, struct netlink_ext_ack *extack) |
92651940 AD |
282 | { |
283 | struct multiq_sched_data *q = qdisc_priv(sch); | |
284 | unsigned long band = arg - 1; | |
285 | ||
92651940 AD |
286 | if (new == NULL) |
287 | new = &noop_qdisc; | |
288 | ||
86a7996c | 289 | *old = qdisc_replace(sch, new, &q->queues[band]); |
92651940 AD |
290 | return 0; |
291 | } | |
292 | ||
293 | static struct Qdisc * | |
294 | multiq_leaf(struct Qdisc *sch, unsigned long arg) | |
295 | { | |
296 | struct multiq_sched_data *q = qdisc_priv(sch); | |
297 | unsigned long band = arg - 1; | |
298 | ||
92651940 AD |
299 | return q->queues[band]; |
300 | } | |
301 | ||
143976ce | 302 | static unsigned long multiq_find(struct Qdisc *sch, u32 classid) |
92651940 AD |
303 | { |
304 | struct multiq_sched_data *q = qdisc_priv(sch); | |
305 | unsigned long band = TC_H_MIN(classid); | |
306 | ||
307 | if (band - 1 >= q->bands) | |
308 | return 0; | |
309 | return band; | |
310 | } | |
311 | ||
312 | static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent, | |
313 | u32 classid) | |
314 | { | |
143976ce | 315 | return multiq_find(sch, classid); |
92651940 AD |
316 | } |
317 | ||
318 | ||
143976ce | 319 | static void multiq_unbind(struct Qdisc *q, unsigned long cl) |
92651940 | 320 | { |
92651940 AD |
321 | } |
322 | ||
92651940 AD |
323 | static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, |
324 | struct sk_buff *skb, struct tcmsg *tcm) | |
325 | { | |
326 | struct multiq_sched_data *q = qdisc_priv(sch); | |
327 | ||
92651940 | 328 | tcm->tcm_handle |= TC_H_MIN(cl); |
cc7ec456 | 329 | tcm->tcm_info = q->queues[cl - 1]->handle; |
92651940 AD |
330 | return 0; |
331 | } | |
332 | ||
333 | static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |
334 | struct gnet_dump *d) | |
335 | { | |
336 | struct multiq_sched_data *q = qdisc_priv(sch); | |
337 | struct Qdisc *cl_q; | |
338 | ||
339 | cl_q = q->queues[cl - 1]; | |
29cbcd85 | 340 | if (gnet_stats_copy_basic(d, cl_q->cpu_bstats, &cl_q->bstats, true) < 0 || |
5dd431b6 | 341 | qdisc_qstats_copy(d, cl_q) < 0) |
92651940 AD |
342 | return -1; |
343 | ||
344 | return 0; | |
345 | } | |
346 | ||
347 | static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |
348 | { | |
349 | struct multiq_sched_data *q = qdisc_priv(sch); | |
350 | int band; | |
351 | ||
352 | if (arg->stop) | |
353 | return; | |
354 | ||
355 | for (band = 0; band < q->bands; band++) { | |
e046fa89 | 356 | if (!tc_qdisc_stats_dump(sch, band + 1, arg)) |
92651940 | 357 | break; |
92651940 AD |
358 | } |
359 | } | |
360 | ||
cbaacc4e AA |
361 | static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl, |
362 | struct netlink_ext_ack *extack) | |
92651940 AD |
363 | { |
364 | struct multiq_sched_data *q = qdisc_priv(sch); | |
365 | ||
366 | if (cl) | |
367 | return NULL; | |
6529eaba | 368 | return q->block; |
92651940 AD |
369 | } |
370 | ||
371 | static const struct Qdisc_class_ops multiq_class_ops = { | |
372 | .graft = multiq_graft, | |
373 | .leaf = multiq_leaf, | |
143976ce | 374 | .find = multiq_find, |
92651940 | 375 | .walk = multiq_walk, |
6529eaba | 376 | .tcf_block = multiq_tcf_block, |
92651940 | 377 | .bind_tcf = multiq_bind, |
143976ce | 378 | .unbind_tcf = multiq_unbind, |
92651940 AD |
379 | .dump = multiq_dump_class, |
380 | .dump_stats = multiq_dump_class_stats, | |
381 | }; | |
382 | ||
383 | static struct Qdisc_ops multiq_qdisc_ops __read_mostly = { | |
384 | .next = NULL, | |
385 | .cl_ops = &multiq_class_ops, | |
386 | .id = "multiq", | |
387 | .priv_size = sizeof(struct multiq_sched_data), | |
388 | .enqueue = multiq_enqueue, | |
389 | .dequeue = multiq_dequeue, | |
8e3af978 | 390 | .peek = multiq_peek, |
92651940 AD |
391 | .init = multiq_init, |
392 | .reset = multiq_reset, | |
393 | .destroy = multiq_destroy, | |
394 | .change = multiq_tune, | |
395 | .dump = multiq_dump, | |
396 | .owner = THIS_MODULE, | |
397 | }; | |
398 | ||
399 | static int __init multiq_module_init(void) | |
400 | { | |
401 | return register_qdisc(&multiq_qdisc_ops); | |
402 | } | |
403 | ||
404 | static void __exit multiq_module_exit(void) | |
405 | { | |
406 | unregister_qdisc(&multiq_qdisc_ops); | |
407 | } | |
408 | ||
409 | module_init(multiq_module_init) | |
410 | module_exit(multiq_module_exit) | |
411 | ||
412 | MODULE_LICENSE("GPL"); |