Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/sched/gen_estimator.c Simple rate estimator. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | * | |
9 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
10 | * | |
11 | * Changes: | |
12 | * Jamal Hadi Salim - moved it to net/core and reshulfed | |
13 | * names to make it usable in general net subsystem. | |
14 | */ | |
15 | ||
16 | #include <asm/uaccess.h> | |
1977f032 | 17 | #include <linux/bitops.h> |
1da177e4 LT |
18 | #include <linux/module.h> |
19 | #include <linux/types.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/jiffies.h> | |
22 | #include <linux/string.h> | |
23 | #include <linux/mm.h> | |
24 | #include <linux/socket.h> | |
25 | #include <linux/sockios.h> | |
26 | #include <linux/in.h> | |
27 | #include <linux/errno.h> | |
28 | #include <linux/interrupt.h> | |
29 | #include <linux/netdevice.h> | |
30 | #include <linux/skbuff.h> | |
31 | #include <linux/rtnetlink.h> | |
32 | #include <linux/init.h> | |
4db0acf3 | 33 | #include <linux/rbtree.h> |
5a0e3ad6 | 34 | #include <linux/slab.h> |
1da177e4 LT |
35 | #include <net/sock.h> |
36 | #include <net/gen_stats.h> | |
37 | ||
38 | /* | |
39 | This code is NOT intended to be used for statistics collection, | |
40 | its purpose is to provide a base for statistical multiplexing | |
41 | for controlled load service. | |
42 | If you need only statistics, run a user level daemon which | |
43 | periodically reads byte counters. | |
44 | ||
45 | Unfortunately, rate estimation is not a very easy task. | |
46 | F.e. I did not find a simple way to estimate the current peak rate | |
47 | and even failed to formulate the problem 8)8) | |
48 | ||
49 | So I preferred not to built an estimator into the scheduler, | |
50 | but run this task separately. | |
51 | Ideally, it should be kernel thread(s), but for now it runs | |
52 | from timers, which puts apparent top bounds on the number of rated | |
53 | flows, has minimal overhead on small, but is enough | |
54 | to handle controlled load service, sets of aggregates. | |
55 | ||
56 | We measure rate over A=(1<<interval) seconds and evaluate EWMA: | |
57 | ||
58 | avrate = avrate*(1-W) + rate*W | |
59 | ||
60 | where W is chosen as negative power of 2: W = 2^(-ewma_log) | |
61 | ||
62 | The resulting time constant is: | |
63 | ||
64 | T = A/(-ln(1-W)) | |
65 | ||
66 | ||
67 | NOTES. | |
68 | ||
32f675bb | 69 | * avbps and avpps are scaled by 2^5. |
511e11e3 ED |
70 | * both values are reported as 32 bit unsigned values. bps can |
71 | overflow for fast links : max speed being 34360Mbit/sec | |
1da177e4 LT |
72 | * Minimal interval is HZ/4=250msec (it is the greatest common divisor |
73 | for HZ=100 and HZ=1024 8)), maximal interval | |
74 | is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals | |
75 | are too expensive, longer ones can be implemented | |
76 | at user level painlessly. | |
77 | */ | |
78 | ||
79 | #define EST_MAX_INTERVAL 5 | |
80 | ||
81 | struct gen_estimator | |
82 | { | |
0929c2dd | 83 | struct list_head list; |
c1a8f1f1 | 84 | struct gnet_stats_basic_packed *bstats; |
45203a3b | 85 | struct gnet_stats_rate_est64 *rate_est; |
1da177e4 | 86 | spinlock_t *stats_lock; |
edb09eb1 | 87 | seqcount_t *running; |
1da177e4 | 88 | int ewma_log; |
32f675bb ED |
89 | u32 last_packets; |
90 | unsigned long avpps; | |
1da177e4 | 91 | u64 last_bytes; |
511e11e3 | 92 | u64 avbps; |
0929c2dd | 93 | struct rcu_head e_rcu; |
4db0acf3 | 94 | struct rb_node node; |
22e0f8b9 JF |
95 | struct gnet_stats_basic_cpu __percpu *cpu_bstats; |
96 | struct rcu_head head; | |
1da177e4 LT |
97 | }; |
98 | ||
99 | struct gen_estimator_head | |
100 | { | |
101 | struct timer_list timer; | |
0929c2dd | 102 | struct list_head list; |
1da177e4 LT |
103 | }; |
104 | ||
105 | static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; | |
106 | ||
deb3abf1 | 107 | /* Protects against NULL dereference */ |
1da177e4 LT |
108 | static DEFINE_RWLOCK(est_lock); |
109 | ||
4db0acf3 JP |
110 | /* Protects against soft lockup during large deletion */ |
111 | static struct rb_root est_root = RB_ROOT; | |
ae638c47 | 112 | static DEFINE_SPINLOCK(est_tree_lock); |
4db0acf3 | 113 | |
1da177e4 LT |
114 | static void est_timer(unsigned long arg) |
115 | { | |
116 | int idx = (int)arg; | |
117 | struct gen_estimator *e; | |
118 | ||
0929c2dd RZ |
119 | rcu_read_lock(); |
120 | list_for_each_entry_rcu(e, &elist[idx].list, list) { | |
22e0f8b9 | 121 | struct gnet_stats_basic_packed b = {0}; |
32f675bb | 122 | unsigned long rate; |
511e11e3 | 123 | u64 brate; |
1da177e4 | 124 | |
edb09eb1 ED |
125 | if (e->stats_lock) |
126 | spin_lock(e->stats_lock); | |
0929c2dd RZ |
127 | read_lock(&est_lock); |
128 | if (e->bstats == NULL) | |
129 | goto skip; | |
130 | ||
edb09eb1 | 131 | __gnet_stats_copy_basic(e->running, &b, e->cpu_bstats, e->bstats); |
22e0f8b9 JF |
132 | |
133 | brate = (b.bytes - e->last_bytes)<<(7 - idx); | |
134 | e->last_bytes = b.bytes; | |
a1dcb662 | 135 | e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); |
edb09eb1 | 136 | WRITE_ONCE(e->rate_est->bps, (e->avbps + 0xF) >> 5); |
1da177e4 | 137 | |
32f675bb ED |
138 | rate = b.packets - e->last_packets; |
139 | rate <<= (7 - idx); | |
22e0f8b9 | 140 | e->last_packets = b.packets; |
a1dcb662 | 141 | e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); |
edb09eb1 | 142 | WRITE_ONCE(e->rate_est->pps, (e->avpps + 0xF) >> 5); |
0929c2dd RZ |
143 | skip: |
144 | read_unlock(&est_lock); | |
edb09eb1 ED |
145 | if (e->stats_lock) |
146 | spin_unlock(e->stats_lock); | |
1da177e4 LT |
147 | } |
148 | ||
0929c2dd | 149 | if (!list_empty(&elist[idx].list)) |
789675e2 | 150 | mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); |
0929c2dd | 151 | rcu_read_unlock(); |
1da177e4 LT |
152 | } |
153 | ||
4db0acf3 JP |
154 | static void gen_add_node(struct gen_estimator *est) |
155 | { | |
156 | struct rb_node **p = &est_root.rb_node, *parent = NULL; | |
157 | ||
158 | while (*p) { | |
159 | struct gen_estimator *e; | |
160 | ||
161 | parent = *p; | |
162 | e = rb_entry(parent, struct gen_estimator, node); | |
163 | ||
164 | if (est->bstats > e->bstats) | |
165 | p = &parent->rb_right; | |
166 | else | |
167 | p = &parent->rb_left; | |
168 | } | |
169 | rb_link_node(&est->node, parent, p); | |
170 | rb_insert_color(&est->node, &est_root); | |
171 | } | |
172 | ||
244e6c2d | 173 | static |
c1a8f1f1 | 174 | struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats, |
45203a3b | 175 | const struct gnet_stats_rate_est64 *rate_est) |
4db0acf3 JP |
176 | { |
177 | struct rb_node *p = est_root.rb_node; | |
178 | ||
179 | while (p) { | |
180 | struct gen_estimator *e; | |
181 | ||
182 | e = rb_entry(p, struct gen_estimator, node); | |
183 | ||
184 | if (bstats > e->bstats) | |
185 | p = p->rb_right; | |
186 | else if (bstats < e->bstats || rate_est != e->rate_est) | |
187 | p = p->rb_left; | |
188 | else | |
189 | return e; | |
190 | } | |
191 | return NULL; | |
192 | } | |
193 | ||
1da177e4 LT |
194 | /** |
195 | * gen_new_estimator - create a new rate estimator | |
196 | * @bstats: basic statistics | |
e9fc2f05 | 197 | * @cpu_bstats: bstats per cpu |
1da177e4 LT |
198 | * @rate_est: rate estimator statistics |
199 | * @stats_lock: statistics lock | |
edb09eb1 | 200 | * @running: qdisc running seqcount |
1da177e4 LT |
201 | * @opt: rate estimator configuration TLV |
202 | * | |
203 | * Creates a new rate estimator with &bstats as source and &rate_est | |
204 | * as destination. A new timer with the interval specified in the | |
205 | * configuration TLV is created. Upon each interval, the latest statistics | |
206 | * will be read from &bstats and the estimated rate will be stored in | |
e793c0f7 | 207 | * &rate_est with the statistics lock grabbed during this period. |
4ec93edb | 208 | * |
1da177e4 | 209 | * Returns 0 on success or a negative error code. |
0929c2dd | 210 | * |
1da177e4 | 211 | */ |
c1a8f1f1 | 212 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, |
22e0f8b9 | 213 | struct gnet_stats_basic_cpu __percpu *cpu_bstats, |
45203a3b | 214 | struct gnet_stats_rate_est64 *rate_est, |
0929c2dd | 215 | spinlock_t *stats_lock, |
edb09eb1 | 216 | seqcount_t *running, |
1e90474c | 217 | struct nlattr *opt) |
1da177e4 LT |
218 | { |
219 | struct gen_estimator *est; | |
1e90474c | 220 | struct gnet_estimator *parm = nla_data(opt); |
22e0f8b9 | 221 | struct gnet_stats_basic_packed b = {0}; |
0929c2dd | 222 | int idx; |
1da177e4 | 223 | |
1e90474c | 224 | if (nla_len(opt) < sizeof(*parm)) |
1da177e4 LT |
225 | return -EINVAL; |
226 | ||
227 | if (parm->interval < -2 || parm->interval > 3) | |
228 | return -EINVAL; | |
229 | ||
77d04bd9 | 230 | est = kzalloc(sizeof(*est), GFP_KERNEL); |
1da177e4 LT |
231 | if (est == NULL) |
232 | return -ENOBUFS; | |
233 | ||
edb09eb1 | 234 | __gnet_stats_copy_basic(running, &b, cpu_bstats, bstats); |
22e0f8b9 | 235 | |
0929c2dd | 236 | idx = parm->interval + 2; |
1da177e4 LT |
237 | est->bstats = bstats; |
238 | est->rate_est = rate_est; | |
239 | est->stats_lock = stats_lock; | |
edb09eb1 | 240 | est->running = running; |
1da177e4 | 241 | est->ewma_log = parm->ewma_log; |
22e0f8b9 | 242 | est->last_bytes = b.bytes; |
1da177e4 | 243 | est->avbps = rate_est->bps<<5; |
22e0f8b9 | 244 | est->last_packets = b.packets; |
1da177e4 | 245 | est->avpps = rate_est->pps<<10; |
22e0f8b9 | 246 | est->cpu_bstats = cpu_bstats; |
1da177e4 | 247 | |
0b5d404e | 248 | spin_lock_bh(&est_tree_lock); |
0929c2dd RZ |
249 | if (!elist[idx].timer.function) { |
250 | INIT_LIST_HEAD(&elist[idx].list); | |
251 | setup_timer(&elist[idx].timer, est_timer, idx); | |
1da177e4 | 252 | } |
0929c2dd RZ |
253 | |
254 | if (list_empty(&elist[idx].list)) | |
789675e2 | 255 | mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); |
0929c2dd RZ |
256 | |
257 | list_add_rcu(&est->list, &elist[idx].list); | |
4db0acf3 | 258 | gen_add_node(est); |
0b5d404e | 259 | spin_unlock_bh(&est_tree_lock); |
4db0acf3 | 260 | |
1da177e4 LT |
261 | return 0; |
262 | } | |
c1b56878 | 263 | EXPORT_SYMBOL(gen_new_estimator); |
1da177e4 LT |
264 | |
265 | /** | |
266 | * gen_kill_estimator - remove a rate estimator | |
267 | * @bstats: basic statistics | |
268 | * @rate_est: rate estimator statistics | |
269 | * | |
4db0acf3 | 270 | * Removes the rate estimator specified by &bstats and &rate_est. |
0929c2dd | 271 | * |
c7de2cf0 | 272 | * Note : Caller should respect an RCU grace period before freeing stats_lock |
1da177e4 | 273 | */ |
c1a8f1f1 | 274 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, |
45203a3b | 275 | struct gnet_stats_rate_est64 *rate_est) |
1da177e4 | 276 | { |
4db0acf3 | 277 | struct gen_estimator *e; |
0929c2dd | 278 | |
0b5d404e | 279 | spin_lock_bh(&est_tree_lock); |
4db0acf3 JP |
280 | while ((e = gen_find_node(bstats, rate_est))) { |
281 | rb_erase(&e->node, &est_root); | |
1da177e4 | 282 | |
9ca7f876 | 283 | write_lock(&est_lock); |
4db0acf3 | 284 | e->bstats = NULL; |
9ca7f876 | 285 | write_unlock(&est_lock); |
1da177e4 | 286 | |
4db0acf3 | 287 | list_del_rcu(&e->list); |
dad178fc | 288 | kfree_rcu(e, e_rcu); |
1da177e4 | 289 | } |
0b5d404e | 290 | spin_unlock_bh(&est_tree_lock); |
1da177e4 | 291 | } |
c1b56878 | 292 | EXPORT_SYMBOL(gen_kill_estimator); |
1da177e4 LT |
293 | |
294 | /** | |
96750162 | 295 | * gen_replace_estimator - replace rate estimator configuration |
1da177e4 | 296 | * @bstats: basic statistics |
e9fc2f05 | 297 | * @cpu_bstats: bstats per cpu |
1da177e4 LT |
298 | * @rate_est: rate estimator statistics |
299 | * @stats_lock: statistics lock | |
edb09eb1 | 300 | * @running: qdisc running seqcount (might be NULL) |
1da177e4 LT |
301 | * @opt: rate estimator configuration TLV |
302 | * | |
303 | * Replaces the configuration of a rate estimator by calling | |
304 | * gen_kill_estimator() and gen_new_estimator(). | |
4ec93edb | 305 | * |
1da177e4 LT |
306 | * Returns 0 on success or a negative error code. |
307 | */ | |
c1a8f1f1 | 308 | int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, |
22e0f8b9 | 309 | struct gnet_stats_basic_cpu __percpu *cpu_bstats, |
45203a3b | 310 | struct gnet_stats_rate_est64 *rate_est, |
edb09eb1 ED |
311 | spinlock_t *stats_lock, |
312 | seqcount_t *running, struct nlattr *opt) | |
1da177e4 | 313 | { |
96750162 | 314 | gen_kill_estimator(bstats, rate_est); |
edb09eb1 | 315 | return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt); |
1da177e4 | 316 | } |
c1b56878 SH |
317 | EXPORT_SYMBOL(gen_replace_estimator); |
318 | ||
319 | /** | |
320 | * gen_estimator_active - test if estimator is currently in use | |
244e6c2d | 321 | * @bstats: basic statistics |
c1b56878 SH |
322 | * @rate_est: rate estimator statistics |
323 | * | |
244e6c2d | 324 | * Returns true if estimator is active, and false if not. |
c1b56878 | 325 | */ |
c1a8f1f1 | 326 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, |
45203a3b | 327 | const struct gnet_stats_rate_est64 *rate_est) |
c1b56878 | 328 | { |
ae638c47 ED |
329 | bool res; |
330 | ||
c1b56878 | 331 | ASSERT_RTNL(); |
1da177e4 | 332 | |
0b5d404e | 333 | spin_lock_bh(&est_tree_lock); |
ae638c47 | 334 | res = gen_find_node(bstats, rate_est) != NULL; |
0b5d404e | 335 | spin_unlock_bh(&est_tree_lock); |
ae638c47 ED |
336 | |
337 | return res; | |
c1b56878 SH |
338 | } |
339 | EXPORT_SYMBOL(gen_estimator_active); |