Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rkuo/linux...
[linux-2.6-block.git] / net / sched / sch_prio.c
... / ...
CommitLineData
1/*
2 * net/sched/sch_prio.c Simple 3-band priority "scheduler".
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Fixes: 19990609: J Hadi Salim <hadi@nortelnetworks.com>:
11 * Init -- EINVAL when opt undefined
12 */
13
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/string.h>
19#include <linux/errno.h>
20#include <linux/skbuff.h>
21#include <net/netlink.h>
22#include <net/pkt_sched.h>
23#include <net/pkt_cls.h>
24
25struct prio_sched_data {
26 int bands;
27 struct tcf_proto __rcu *filter_list;
28 struct tcf_block *block;
29 u8 prio2band[TC_PRIO_MAX+1];
30 struct Qdisc *queues[TCQ_PRIO_BANDS];
31};
32
33
34static struct Qdisc *
35prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
36{
37 struct prio_sched_data *q = qdisc_priv(sch);
38 u32 band = skb->priority;
39 struct tcf_result res;
40 struct tcf_proto *fl;
41 int err;
42
43 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
44 if (TC_H_MAJ(skb->priority) != sch->handle) {
45 fl = rcu_dereference_bh(q->filter_list);
46 err = tcf_classify(skb, fl, &res, false);
47#ifdef CONFIG_NET_CLS_ACT
48 switch (err) {
49 case TC_ACT_STOLEN:
50 case TC_ACT_QUEUED:
51 case TC_ACT_TRAP:
52 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
53 /* fall through */
54 case TC_ACT_SHOT:
55 return NULL;
56 }
57#endif
58 if (!fl || err < 0) {
59 if (TC_H_MAJ(band))
60 band = 0;
61 return q->queues[q->prio2band[band & TC_PRIO_MAX]];
62 }
63 band = res.classid;
64 }
65 band = TC_H_MIN(band) - 1;
66 if (band >= q->bands)
67 return q->queues[q->prio2band[0]];
68
69 return q->queues[band];
70}
71
72static int
73prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
74{
75 struct Qdisc *qdisc;
76 int ret;
77
78 qdisc = prio_classify(skb, sch, &ret);
79#ifdef CONFIG_NET_CLS_ACT
80 if (qdisc == NULL) {
81
82 if (ret & __NET_XMIT_BYPASS)
83 qdisc_qstats_drop(sch);
84 __qdisc_drop(skb, to_free);
85 return ret;
86 }
87#endif
88
89 ret = qdisc_enqueue(skb, qdisc, to_free);
90 if (ret == NET_XMIT_SUCCESS) {
91 qdisc_qstats_backlog_inc(sch, skb);
92 sch->q.qlen++;
93 return NET_XMIT_SUCCESS;
94 }
95 if (net_xmit_drop_count(ret))
96 qdisc_qstats_drop(sch);
97 return ret;
98}
99
100static struct sk_buff *prio_peek(struct Qdisc *sch)
101{
102 struct prio_sched_data *q = qdisc_priv(sch);
103 int prio;
104
105 for (prio = 0; prio < q->bands; prio++) {
106 struct Qdisc *qdisc = q->queues[prio];
107 struct sk_buff *skb = qdisc->ops->peek(qdisc);
108 if (skb)
109 return skb;
110 }
111 return NULL;
112}
113
114static struct sk_buff *prio_dequeue(struct Qdisc *sch)
115{
116 struct prio_sched_data *q = qdisc_priv(sch);
117 int prio;
118
119 for (prio = 0; prio < q->bands; prio++) {
120 struct Qdisc *qdisc = q->queues[prio];
121 struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
122 if (skb) {
123 qdisc_bstats_update(sch, skb);
124 qdisc_qstats_backlog_dec(sch, skb);
125 sch->q.qlen--;
126 return skb;
127 }
128 }
129 return NULL;
130
131}
132
133static void
134prio_reset(struct Qdisc *sch)
135{
136 int prio;
137 struct prio_sched_data *q = qdisc_priv(sch);
138
139 for (prio = 0; prio < q->bands; prio++)
140 qdisc_reset(q->queues[prio]);
141 sch->qstats.backlog = 0;
142 sch->q.qlen = 0;
143}
144
145static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt)
146{
147 struct net_device *dev = qdisc_dev(sch);
148 struct tc_prio_qopt_offload opt = {
149 .handle = sch->handle,
150 .parent = sch->parent,
151 };
152
153 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
154 return -EOPNOTSUPP;
155
156 if (qopt) {
157 opt.command = TC_PRIO_REPLACE;
158 opt.replace_params.bands = qopt->bands;
159 memcpy(&opt.replace_params.priomap, qopt->priomap,
160 TC_PRIO_MAX + 1);
161 opt.replace_params.qstats = &sch->qstats;
162 } else {
163 opt.command = TC_PRIO_DESTROY;
164 }
165
166 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO, &opt);
167}
168
169static void
170prio_destroy(struct Qdisc *sch)
171{
172 int prio;
173 struct prio_sched_data *q = qdisc_priv(sch);
174
175 tcf_block_put(q->block);
176 prio_offload(sch, NULL);
177 for (prio = 0; prio < q->bands; prio++)
178 qdisc_destroy(q->queues[prio]);
179}
180
181static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
182 struct netlink_ext_ack *extack)
183{
184 struct prio_sched_data *q = qdisc_priv(sch);
185 struct Qdisc *queues[TCQ_PRIO_BANDS];
186 int oldbands = q->bands, i;
187 struct tc_prio_qopt *qopt;
188
189 if (nla_len(opt) < sizeof(*qopt))
190 return -EINVAL;
191 qopt = nla_data(opt);
192
193 if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
194 return -EINVAL;
195
196 for (i = 0; i <= TC_PRIO_MAX; i++) {
197 if (qopt->priomap[i] >= qopt->bands)
198 return -EINVAL;
199 }
200
201 /* Before commit, make sure we can allocate all new qdiscs */
202 for (i = oldbands; i < qopt->bands; i++) {
203 queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
204 TC_H_MAKE(sch->handle, i + 1),
205 extack);
206 if (!queues[i]) {
207 while (i > oldbands)
208 qdisc_destroy(queues[--i]);
209 return -ENOMEM;
210 }
211 }
212
213 prio_offload(sch, qopt);
214 sch_tree_lock(sch);
215 q->bands = qopt->bands;
216 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
217
218 for (i = q->bands; i < oldbands; i++) {
219 struct Qdisc *child = q->queues[i];
220
221 qdisc_tree_reduce_backlog(child, child->q.qlen,
222 child->qstats.backlog);
223 qdisc_destroy(child);
224 }
225
226 for (i = oldbands; i < q->bands; i++) {
227 q->queues[i] = queues[i];
228 if (q->queues[i] != &noop_qdisc)
229 qdisc_hash_add(q->queues[i], true);
230 }
231
232 sch_tree_unlock(sch);
233 return 0;
234}
235
236static int prio_init(struct Qdisc *sch, struct nlattr *opt,
237 struct netlink_ext_ack *extack)
238{
239 struct prio_sched_data *q = qdisc_priv(sch);
240 int err;
241
242 if (!opt)
243 return -EINVAL;
244
245 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
246 if (err)
247 return err;
248
249 return prio_tune(sch, opt, extack);
250}
251
252static int prio_dump_offload(struct Qdisc *sch)
253{
254 struct net_device *dev = qdisc_dev(sch);
255 struct tc_prio_qopt_offload hw_stats = {
256 .command = TC_PRIO_STATS,
257 .handle = sch->handle,
258 .parent = sch->parent,
259 {
260 .stats = {
261 .bstats = &sch->bstats,
262 .qstats = &sch->qstats,
263 },
264 },
265 };
266 int err;
267
268 sch->flags &= ~TCQ_F_OFFLOADED;
269 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
270 return 0;
271
272 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO,
273 &hw_stats);
274 if (err == -EOPNOTSUPP)
275 return 0;
276
277 if (!err)
278 sch->flags |= TCQ_F_OFFLOADED;
279
280 return err;
281}
282
283static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
284{
285 struct prio_sched_data *q = qdisc_priv(sch);
286 unsigned char *b = skb_tail_pointer(skb);
287 struct tc_prio_qopt opt;
288 int err;
289
290 opt.bands = q->bands;
291 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
292
293 err = prio_dump_offload(sch);
294 if (err)
295 goto nla_put_failure;
296
297 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
298 goto nla_put_failure;
299
300 return skb->len;
301
302nla_put_failure:
303 nlmsg_trim(skb, b);
304 return -1;
305}
306
307static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
308 struct Qdisc **old, struct netlink_ext_ack *extack)
309{
310 struct prio_sched_data *q = qdisc_priv(sch);
311 struct tc_prio_qopt_offload graft_offload;
312 struct net_device *dev = qdisc_dev(sch);
313 unsigned long band = arg - 1;
314 bool any_qdisc_is_offloaded;
315 int err;
316
317 if (new == NULL)
318 new = &noop_qdisc;
319
320 *old = qdisc_replace(sch, new, &q->queues[band]);
321
322 if (!tc_can_offload(dev))
323 return 0;
324
325 graft_offload.handle = sch->handle;
326 graft_offload.parent = sch->parent;
327 graft_offload.graft_params.band = band;
328 graft_offload.graft_params.child_handle = new->handle;
329 graft_offload.command = TC_PRIO_GRAFT;
330
331 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO,
332 &graft_offload);
333
334 /* Don't report error if the graft is part of destroy operation. */
335 if (err && new != &noop_qdisc) {
336 /* Don't report error if the parent, the old child and the new
337 * one are not offloaded.
338 */
339 any_qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
340 any_qdisc_is_offloaded |= new->flags & TCQ_F_OFFLOADED;
341 if (*old)
342 any_qdisc_is_offloaded |= (*old)->flags &
343 TCQ_F_OFFLOADED;
344
345 if (any_qdisc_is_offloaded)
346 NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
347 }
348
349 return 0;
350}
351
352static struct Qdisc *
353prio_leaf(struct Qdisc *sch, unsigned long arg)
354{
355 struct prio_sched_data *q = qdisc_priv(sch);
356 unsigned long band = arg - 1;
357
358 return q->queues[band];
359}
360
361static unsigned long prio_find(struct Qdisc *sch, u32 classid)
362{
363 struct prio_sched_data *q = qdisc_priv(sch);
364 unsigned long band = TC_H_MIN(classid);
365
366 if (band - 1 >= q->bands)
367 return 0;
368 return band;
369}
370
371static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
372{
373 return prio_find(sch, classid);
374}
375
376
377static void prio_unbind(struct Qdisc *q, unsigned long cl)
378{
379}
380
381static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
382 struct tcmsg *tcm)
383{
384 struct prio_sched_data *q = qdisc_priv(sch);
385
386 tcm->tcm_handle |= TC_H_MIN(cl);
387 tcm->tcm_info = q->queues[cl-1]->handle;
388 return 0;
389}
390
391static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
392 struct gnet_dump *d)
393{
394 struct prio_sched_data *q = qdisc_priv(sch);
395 struct Qdisc *cl_q;
396
397 cl_q = q->queues[cl - 1];
398 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
399 d, NULL, &cl_q->bstats) < 0 ||
400 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
401 return -1;
402
403 return 0;
404}
405
406static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
407{
408 struct prio_sched_data *q = qdisc_priv(sch);
409 int prio;
410
411 if (arg->stop)
412 return;
413
414 for (prio = 0; prio < q->bands; prio++) {
415 if (arg->count < arg->skip) {
416 arg->count++;
417 continue;
418 }
419 if (arg->fn(sch, prio + 1, arg) < 0) {
420 arg->stop = 1;
421 break;
422 }
423 arg->count++;
424 }
425}
426
427static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl,
428 struct netlink_ext_ack *extack)
429{
430 struct prio_sched_data *q = qdisc_priv(sch);
431
432 if (cl)
433 return NULL;
434 return q->block;
435}
436
437static const struct Qdisc_class_ops prio_class_ops = {
438 .graft = prio_graft,
439 .leaf = prio_leaf,
440 .find = prio_find,
441 .walk = prio_walk,
442 .tcf_block = prio_tcf_block,
443 .bind_tcf = prio_bind,
444 .unbind_tcf = prio_unbind,
445 .dump = prio_dump_class,
446 .dump_stats = prio_dump_class_stats,
447};
448
449static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
450 .next = NULL,
451 .cl_ops = &prio_class_ops,
452 .id = "prio",
453 .priv_size = sizeof(struct prio_sched_data),
454 .enqueue = prio_enqueue,
455 .dequeue = prio_dequeue,
456 .peek = prio_peek,
457 .init = prio_init,
458 .reset = prio_reset,
459 .destroy = prio_destroy,
460 .change = prio_tune,
461 .dump = prio_dump,
462 .owner = THIS_MODULE,
463};
464
465static int __init prio_module_init(void)
466{
467 return register_qdisc(&prio_qdisc_ops);
468}
469
470static void __exit prio_module_exit(void)
471{
472 unregister_qdisc(&prio_qdisc_ops);
473}
474
475module_init(prio_module_init)
476module_exit(prio_module_exit)
477
478MODULE_LICENSE("GPL");