Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/sched/gen_estimator.c Simple rate estimator. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | * | |
9 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
10 | * | |
11 | * Changes: | |
12 | * Jamal Hadi Salim - moved it to net/core and reshulfed | |
13 | * names to make it usable in general net subsystem. | |
14 | */ | |
15 | ||
16 | #include <asm/uaccess.h> | |
1977f032 | 17 | #include <linux/bitops.h> |
1da177e4 LT |
18 | #include <linux/module.h> |
19 | #include <linux/types.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/jiffies.h> | |
22 | #include <linux/string.h> | |
23 | #include <linux/mm.h> | |
24 | #include <linux/socket.h> | |
25 | #include <linux/sockios.h> | |
26 | #include <linux/in.h> | |
27 | #include <linux/errno.h> | |
28 | #include <linux/interrupt.h> | |
29 | #include <linux/netdevice.h> | |
30 | #include <linux/skbuff.h> | |
31 | #include <linux/rtnetlink.h> | |
32 | #include <linux/init.h> | |
4db0acf3 | 33 | #include <linux/rbtree.h> |
5a0e3ad6 | 34 | #include <linux/slab.h> |
1da177e4 LT |
35 | #include <net/sock.h> |
36 | #include <net/gen_stats.h> | |
37 | ||
38 | /* | |
39 | This code is NOT intended to be used for statistics collection, | |
40 | its purpose is to provide a base for statistical multiplexing | |
41 | for controlled load service. | |
42 | If you need only statistics, run a user level daemon which | |
43 | periodically reads byte counters. | |
44 | ||
45 | Unfortunately, rate estimation is not a very easy task. | |
46 | F.e. I did not find a simple way to estimate the current peak rate | |
47 | and even failed to formulate the problem 8)8) | |
48 | ||
49 | So I preferred not to built an estimator into the scheduler, | |
50 | but run this task separately. | |
51 | Ideally, it should be kernel thread(s), but for now it runs | |
52 | from timers, which puts apparent top bounds on the number of rated | |
53 | flows, has minimal overhead on small, but is enough | |
54 | to handle controlled load service, sets of aggregates. | |
55 | ||
56 | We measure rate over A=(1<<interval) seconds and evaluate EWMA: | |
57 | ||
58 | avrate = avrate*(1-W) + rate*W | |
59 | ||
60 | where W is chosen as negative power of 2: W = 2^(-ewma_log) | |
61 | ||
62 | The resulting time constant is: | |
63 | ||
64 | T = A/(-ln(1-W)) | |
65 | ||
66 | ||
67 | NOTES. | |
68 | ||
32f675bb | 69 | * avbps and avpps are scaled by 2^5. |
511e11e3 ED |
70 | * both values are reported as 32 bit unsigned values. bps can |
71 | overflow for fast links : max speed being 34360Mbit/sec | |
1da177e4 LT |
72 | * Minimal interval is HZ/4=250msec (it is the greatest common divisor |
73 | for HZ=100 and HZ=1024 8)), maximal interval | |
74 | is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals | |
75 | are too expensive, longer ones can be implemented | |
76 | at user level painlessly. | |
77 | */ | |
78 | ||
79 | #define EST_MAX_INTERVAL 5 | |
80 | ||
81 | struct gen_estimator | |
82 | { | |
0929c2dd | 83 | struct list_head list; |
c1a8f1f1 | 84 | struct gnet_stats_basic_packed *bstats; |
45203a3b | 85 | struct gnet_stats_rate_est64 *rate_est; |
1da177e4 | 86 | spinlock_t *stats_lock; |
1da177e4 | 87 | int ewma_log; |
32f675bb ED |
88 | u32 last_packets; |
89 | unsigned long avpps; | |
1da177e4 | 90 | u64 last_bytes; |
511e11e3 | 91 | u64 avbps; |
0929c2dd | 92 | struct rcu_head e_rcu; |
4db0acf3 | 93 | struct rb_node node; |
22e0f8b9 JF |
94 | struct gnet_stats_basic_cpu __percpu *cpu_bstats; |
95 | struct rcu_head head; | |
1da177e4 LT |
96 | }; |
97 | ||
98 | struct gen_estimator_head | |
99 | { | |
100 | struct timer_list timer; | |
0929c2dd | 101 | struct list_head list; |
1da177e4 LT |
102 | }; |
103 | ||
104 | static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; | |
105 | ||
deb3abf1 | 106 | /* Protects against NULL dereference */ |
1da177e4 LT |
107 | static DEFINE_RWLOCK(est_lock); |
108 | ||
4db0acf3 JP |
109 | /* Protects against soft lockup during large deletion */ |
110 | static struct rb_root est_root = RB_ROOT; | |
ae638c47 | 111 | static DEFINE_SPINLOCK(est_tree_lock); |
4db0acf3 | 112 | |
1da177e4 LT |
113 | static void est_timer(unsigned long arg) |
114 | { | |
115 | int idx = (int)arg; | |
116 | struct gen_estimator *e; | |
117 | ||
0929c2dd RZ |
118 | rcu_read_lock(); |
119 | list_for_each_entry_rcu(e, &elist[idx].list, list) { | |
22e0f8b9 | 120 | struct gnet_stats_basic_packed b = {0}; |
32f675bb | 121 | unsigned long rate; |
511e11e3 | 122 | u64 brate; |
1da177e4 LT |
123 | |
124 | spin_lock(e->stats_lock); | |
0929c2dd RZ |
125 | read_lock(&est_lock); |
126 | if (e->bstats == NULL) | |
127 | goto skip; | |
128 | ||
22e0f8b9 JF |
129 | __gnet_stats_copy_basic(&b, e->cpu_bstats, e->bstats); |
130 | ||
131 | brate = (b.bytes - e->last_bytes)<<(7 - idx); | |
132 | e->last_bytes = b.bytes; | |
a1dcb662 | 133 | e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); |
1da177e4 LT |
134 | e->rate_est->bps = (e->avbps+0xF)>>5; |
135 | ||
32f675bb ED |
136 | rate = b.packets - e->last_packets; |
137 | rate <<= (7 - idx); | |
22e0f8b9 | 138 | e->last_packets = b.packets; |
a1dcb662 | 139 | e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); |
32f675bb | 140 | e->rate_est->pps = (e->avpps + 0xF) >> 5; |
0929c2dd RZ |
141 | skip: |
142 | read_unlock(&est_lock); | |
1da177e4 LT |
143 | spin_unlock(e->stats_lock); |
144 | } | |
145 | ||
0929c2dd | 146 | if (!list_empty(&elist[idx].list)) |
789675e2 | 147 | mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); |
0929c2dd | 148 | rcu_read_unlock(); |
1da177e4 LT |
149 | } |
150 | ||
4db0acf3 JP |
151 | static void gen_add_node(struct gen_estimator *est) |
152 | { | |
153 | struct rb_node **p = &est_root.rb_node, *parent = NULL; | |
154 | ||
155 | while (*p) { | |
156 | struct gen_estimator *e; | |
157 | ||
158 | parent = *p; | |
159 | e = rb_entry(parent, struct gen_estimator, node); | |
160 | ||
161 | if (est->bstats > e->bstats) | |
162 | p = &parent->rb_right; | |
163 | else | |
164 | p = &parent->rb_left; | |
165 | } | |
166 | rb_link_node(&est->node, parent, p); | |
167 | rb_insert_color(&est->node, &est_root); | |
168 | } | |
169 | ||
244e6c2d | 170 | static |
c1a8f1f1 | 171 | struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats, |
45203a3b | 172 | const struct gnet_stats_rate_est64 *rate_est) |
4db0acf3 JP |
173 | { |
174 | struct rb_node *p = est_root.rb_node; | |
175 | ||
176 | while (p) { | |
177 | struct gen_estimator *e; | |
178 | ||
179 | e = rb_entry(p, struct gen_estimator, node); | |
180 | ||
181 | if (bstats > e->bstats) | |
182 | p = p->rb_right; | |
183 | else if (bstats < e->bstats || rate_est != e->rate_est) | |
184 | p = p->rb_left; | |
185 | else | |
186 | return e; | |
187 | } | |
188 | return NULL; | |
189 | } | |
190 | ||
1da177e4 LT |
191 | /** |
192 | * gen_new_estimator - create a new rate estimator | |
193 | * @bstats: basic statistics | |
194 | * @rate_est: rate estimator statistics | |
195 | * @stats_lock: statistics lock | |
196 | * @opt: rate estimator configuration TLV | |
197 | * | |
198 | * Creates a new rate estimator with &bstats as source and &rate_est | |
199 | * as destination. A new timer with the interval specified in the | |
200 | * configuration TLV is created. Upon each interval, the latest statistics | |
201 | * will be read from &bstats and the estimated rate will be stored in | |
e793c0f7 | 202 | * &rate_est with the statistics lock grabbed during this period. |
4ec93edb | 203 | * |
1da177e4 | 204 | * Returns 0 on success or a negative error code. |
0929c2dd | 205 | * |
1da177e4 | 206 | */ |
c1a8f1f1 | 207 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, |
22e0f8b9 | 208 | struct gnet_stats_basic_cpu __percpu *cpu_bstats, |
45203a3b | 209 | struct gnet_stats_rate_est64 *rate_est, |
0929c2dd | 210 | spinlock_t *stats_lock, |
1e90474c | 211 | struct nlattr *opt) |
1da177e4 LT |
212 | { |
213 | struct gen_estimator *est; | |
1e90474c | 214 | struct gnet_estimator *parm = nla_data(opt); |
22e0f8b9 | 215 | struct gnet_stats_basic_packed b = {0}; |
0929c2dd | 216 | int idx; |
1da177e4 | 217 | |
1e90474c | 218 | if (nla_len(opt) < sizeof(*parm)) |
1da177e4 LT |
219 | return -EINVAL; |
220 | ||
221 | if (parm->interval < -2 || parm->interval > 3) | |
222 | return -EINVAL; | |
223 | ||
77d04bd9 | 224 | est = kzalloc(sizeof(*est), GFP_KERNEL); |
1da177e4 LT |
225 | if (est == NULL) |
226 | return -ENOBUFS; | |
227 | ||
22e0f8b9 JF |
228 | __gnet_stats_copy_basic(&b, cpu_bstats, bstats); |
229 | ||
0929c2dd | 230 | idx = parm->interval + 2; |
1da177e4 LT |
231 | est->bstats = bstats; |
232 | est->rate_est = rate_est; | |
233 | est->stats_lock = stats_lock; | |
234 | est->ewma_log = parm->ewma_log; | |
22e0f8b9 | 235 | est->last_bytes = b.bytes; |
1da177e4 | 236 | est->avbps = rate_est->bps<<5; |
22e0f8b9 | 237 | est->last_packets = b.packets; |
1da177e4 | 238 | est->avpps = rate_est->pps<<10; |
22e0f8b9 | 239 | est->cpu_bstats = cpu_bstats; |
1da177e4 | 240 | |
0b5d404e | 241 | spin_lock_bh(&est_tree_lock); |
0929c2dd RZ |
242 | if (!elist[idx].timer.function) { |
243 | INIT_LIST_HEAD(&elist[idx].list); | |
244 | setup_timer(&elist[idx].timer, est_timer, idx); | |
1da177e4 | 245 | } |
0929c2dd RZ |
246 | |
247 | if (list_empty(&elist[idx].list)) | |
789675e2 | 248 | mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); |
0929c2dd RZ |
249 | |
250 | list_add_rcu(&est->list, &elist[idx].list); | |
4db0acf3 | 251 | gen_add_node(est); |
0b5d404e | 252 | spin_unlock_bh(&est_tree_lock); |
4db0acf3 | 253 | |
1da177e4 LT |
254 | return 0; |
255 | } | |
c1b56878 | 256 | EXPORT_SYMBOL(gen_new_estimator); |
1da177e4 LT |
257 | |
258 | /** | |
259 | * gen_kill_estimator - remove a rate estimator | |
260 | * @bstats: basic statistics | |
261 | * @rate_est: rate estimator statistics | |
262 | * | |
4db0acf3 | 263 | * Removes the rate estimator specified by &bstats and &rate_est. |
0929c2dd | 264 | * |
c7de2cf0 | 265 | * Note : Caller should respect an RCU grace period before freeing stats_lock |
1da177e4 | 266 | */ |
c1a8f1f1 | 267 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, |
45203a3b | 268 | struct gnet_stats_rate_est64 *rate_est) |
1da177e4 | 269 | { |
4db0acf3 | 270 | struct gen_estimator *e; |
0929c2dd | 271 | |
0b5d404e | 272 | spin_lock_bh(&est_tree_lock); |
4db0acf3 JP |
273 | while ((e = gen_find_node(bstats, rate_est))) { |
274 | rb_erase(&e->node, &est_root); | |
1da177e4 | 275 | |
9ca7f876 | 276 | write_lock(&est_lock); |
4db0acf3 | 277 | e->bstats = NULL; |
9ca7f876 | 278 | write_unlock(&est_lock); |
1da177e4 | 279 | |
4db0acf3 | 280 | list_del_rcu(&e->list); |
dad178fc | 281 | kfree_rcu(e, e_rcu); |
1da177e4 | 282 | } |
0b5d404e | 283 | spin_unlock_bh(&est_tree_lock); |
1da177e4 | 284 | } |
c1b56878 | 285 | EXPORT_SYMBOL(gen_kill_estimator); |
1da177e4 LT |
286 | |
287 | /** | |
96750162 | 288 | * gen_replace_estimator - replace rate estimator configuration |
1da177e4 LT |
289 | * @bstats: basic statistics |
290 | * @rate_est: rate estimator statistics | |
291 | * @stats_lock: statistics lock | |
292 | * @opt: rate estimator configuration TLV | |
293 | * | |
294 | * Replaces the configuration of a rate estimator by calling | |
295 | * gen_kill_estimator() and gen_new_estimator(). | |
4ec93edb | 296 | * |
1da177e4 LT |
297 | * Returns 0 on success or a negative error code. |
298 | */ | |
c1a8f1f1 | 299 | int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, |
22e0f8b9 | 300 | struct gnet_stats_basic_cpu __percpu *cpu_bstats, |
45203a3b | 301 | struct gnet_stats_rate_est64 *rate_est, |
1e90474c | 302 | spinlock_t *stats_lock, struct nlattr *opt) |
1da177e4 | 303 | { |
96750162 | 304 | gen_kill_estimator(bstats, rate_est); |
22e0f8b9 | 305 | return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, opt); |
1da177e4 | 306 | } |
c1b56878 SH |
307 | EXPORT_SYMBOL(gen_replace_estimator); |
308 | ||
309 | /** | |
310 | * gen_estimator_active - test if estimator is currently in use | |
244e6c2d | 311 | * @bstats: basic statistics |
c1b56878 SH |
312 | * @rate_est: rate estimator statistics |
313 | * | |
244e6c2d | 314 | * Returns true if estimator is active, and false if not. |
c1b56878 | 315 | */ |
c1a8f1f1 | 316 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, |
45203a3b | 317 | const struct gnet_stats_rate_est64 *rate_est) |
c1b56878 | 318 | { |
ae638c47 ED |
319 | bool res; |
320 | ||
c1b56878 | 321 | ASSERT_RTNL(); |
1da177e4 | 322 | |
0b5d404e | 323 | spin_lock_bh(&est_tree_lock); |
ae638c47 | 324 | res = gen_find_node(bstats, rate_est) != NULL; |
0b5d404e | 325 | spin_unlock_bh(&est_tree_lock); |
ae638c47 ED |
326 | |
327 | return res; | |
c1b56878 SH |
328 | } |
329 | EXPORT_SYMBOL(gen_estimator_active); |