Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
fd2ed4d2 MP |
2 | #include <linux/errno.h> |
3 | #include <linux/numa.h> | |
4 | #include <linux/slab.h> | |
5 | #include <linux/rculist.h> | |
6 | #include <linux/threads.h> | |
7 | #include <linux/preempt.h> | |
8 | #include <linux/irqflags.h> | |
9 | #include <linux/vmalloc.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/device-mapper.h> | |
13 | ||
4cc96131 | 14 | #include "dm-core.h" |
fd2ed4d2 MP |
15 | #include "dm-stats.h" |
16 | ||
17 | #define DM_MSG_PREFIX "stats" | |
18 | ||
19 | static int dm_stat_need_rcu_barrier; | |
20 | ||
21 | /* | |
22 | * Using 64-bit values to avoid overflow (which is a | |
23 | * problem that block/genhd.c's IO accounting has). | |
24 | */ | |
25 | struct dm_stat_percpu { | |
26 | unsigned long long sectors[2]; | |
27 | unsigned long long ios[2]; | |
28 | unsigned long long merges[2]; | |
29 | unsigned long long ticks[2]; | |
30 | unsigned long long io_ticks[2]; | |
31 | unsigned long long io_ticks_total; | |
32 | unsigned long long time_in_queue; | |
dfcfac3e | 33 | unsigned long long *histogram; |
fd2ed4d2 MP |
34 | }; |
35 | ||
36 | struct dm_stat_shared { | |
37 | atomic_t in_flight[2]; | |
c96aec34 | 38 | unsigned long long stamp; |
fd2ed4d2 MP |
39 | struct dm_stat_percpu tmp; |
40 | }; | |
41 | ||
42 | struct dm_stat { | |
43 | struct list_head list_entry; | |
44 | int id; | |
c96aec34 | 45 | unsigned stat_flags; |
fd2ed4d2 MP |
46 | size_t n_entries; |
47 | sector_t start; | |
48 | sector_t end; | |
49 | sector_t step; | |
dfcfac3e MP |
50 | unsigned n_histogram_entries; |
51 | unsigned long long *histogram_boundaries; | |
fd2ed4d2 MP |
52 | const char *program_id; |
53 | const char *aux_data; | |
54 | struct rcu_head rcu_head; | |
55 | size_t shared_alloc_size; | |
56 | size_t percpu_alloc_size; | |
dfcfac3e | 57 | size_t histogram_alloc_size; |
fd2ed4d2 | 58 | struct dm_stat_percpu *stat_percpu[NR_CPUS]; |
b18ae8dd | 59 | struct dm_stat_shared stat_shared[]; |
fd2ed4d2 MP |
60 | }; |
61 | ||
c96aec34 MP |
62 | #define STAT_PRECISE_TIMESTAMPS 1 |
63 | ||
fd2ed4d2 MP |
64 | struct dm_stats_last_position { |
65 | sector_t last_sector; | |
66 | unsigned last_rw; | |
67 | }; | |
68 | ||
69 | /* | |
70 | * A typo on the command line could possibly make the kernel run out of memory | |
71 | * and crash. To prevent the crash we account all used memory. We fail if we | |
72 | * exhaust 1/4 of all memory or 1/2 of vmalloc space. | |
73 | */ | |
74 | #define DM_STATS_MEMORY_FACTOR 4 | |
75 | #define DM_STATS_VMALLOC_FACTOR 2 | |
76 | ||
77 | static DEFINE_SPINLOCK(shared_memory_lock); | |
78 | ||
79 | static unsigned long shared_memory_amount; | |
80 | ||
81 | static bool __check_shared_memory(size_t alloc_size) | |
82 | { | |
83 | size_t a; | |
84 | ||
85 | a = shared_memory_amount + alloc_size; | |
86 | if (a < shared_memory_amount) | |
87 | return false; | |
ca79b0c2 | 88 | if (a >> PAGE_SHIFT > totalram_pages() / DM_STATS_MEMORY_FACTOR) |
fd2ed4d2 MP |
89 | return false; |
90 | #ifdef CONFIG_MMU | |
91 | if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR) | |
92 | return false; | |
93 | #endif | |
94 | return true; | |
95 | } | |
96 | ||
97 | static bool check_shared_memory(size_t alloc_size) | |
98 | { | |
99 | bool ret; | |
100 | ||
101 | spin_lock_irq(&shared_memory_lock); | |
102 | ||
103 | ret = __check_shared_memory(alloc_size); | |
104 | ||
105 | spin_unlock_irq(&shared_memory_lock); | |
106 | ||
107 | return ret; | |
108 | } | |
109 | ||
110 | static bool claim_shared_memory(size_t alloc_size) | |
111 | { | |
112 | spin_lock_irq(&shared_memory_lock); | |
113 | ||
114 | if (!__check_shared_memory(alloc_size)) { | |
115 | spin_unlock_irq(&shared_memory_lock); | |
116 | return false; | |
117 | } | |
118 | ||
119 | shared_memory_amount += alloc_size; | |
120 | ||
121 | spin_unlock_irq(&shared_memory_lock); | |
122 | ||
123 | return true; | |
124 | } | |
125 | ||
126 | static void free_shared_memory(size_t alloc_size) | |
127 | { | |
128 | unsigned long flags; | |
129 | ||
130 | spin_lock_irqsave(&shared_memory_lock, flags); | |
131 | ||
132 | if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) { | |
133 | spin_unlock_irqrestore(&shared_memory_lock, flags); | |
134 | DMCRIT("Memory usage accounting bug."); | |
135 | return; | |
136 | } | |
137 | ||
138 | shared_memory_amount -= alloc_size; | |
139 | ||
140 | spin_unlock_irqrestore(&shared_memory_lock, flags); | |
141 | } | |
142 | ||
143 | static void *dm_kvzalloc(size_t alloc_size, int node) | |
144 | { | |
145 | void *p; | |
146 | ||
147 | if (!claim_shared_memory(alloc_size)) | |
148 | return NULL; | |
149 | ||
a7c3e901 | 150 | p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node); |
fd2ed4d2 MP |
151 | if (p) |
152 | return p; | |
153 | ||
154 | free_shared_memory(alloc_size); | |
155 | ||
156 | return NULL; | |
157 | } | |
158 | ||
159 | static void dm_kvfree(void *ptr, size_t alloc_size) | |
160 | { | |
161 | if (!ptr) | |
162 | return; | |
163 | ||
164 | free_shared_memory(alloc_size); | |
165 | ||
0f24b79b | 166 | kvfree(ptr); |
fd2ed4d2 MP |
167 | } |
168 | ||
169 | static void dm_stat_free(struct rcu_head *head) | |
170 | { | |
171 | int cpu; | |
172 | struct dm_stat *s = container_of(head, struct dm_stat, rcu_head); | |
173 | ||
60858318 | 174 | kfree(s->histogram_boundaries); |
fd2ed4d2 MP |
175 | kfree(s->program_id); |
176 | kfree(s->aux_data); | |
dfcfac3e MP |
177 | for_each_possible_cpu(cpu) { |
178 | dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size); | |
fd2ed4d2 | 179 | dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size); |
dfcfac3e MP |
180 | } |
181 | dm_kvfree(s->stat_shared[0].tmp.histogram, s->histogram_alloc_size); | |
fd2ed4d2 MP |
182 | dm_kvfree(s, s->shared_alloc_size); |
183 | } | |
184 | ||
185 | static int dm_stat_in_flight(struct dm_stat_shared *shared) | |
186 | { | |
187 | return atomic_read(&shared->in_flight[READ]) + | |
188 | atomic_read(&shared->in_flight[WRITE]); | |
189 | } | |
190 | ||
191 | void dm_stats_init(struct dm_stats *stats) | |
192 | { | |
193 | int cpu; | |
194 | struct dm_stats_last_position *last; | |
195 | ||
196 | mutex_init(&stats->mutex); | |
197 | INIT_LIST_HEAD(&stats->list); | |
0cdb90f0 | 198 | stats->precise_timestamps = false; |
fd2ed4d2 MP |
199 | stats->last = alloc_percpu(struct dm_stats_last_position); |
200 | for_each_possible_cpu(cpu) { | |
201 | last = per_cpu_ptr(stats->last, cpu); | |
202 | last->last_sector = (sector_t)ULLONG_MAX; | |
203 | last->last_rw = UINT_MAX; | |
204 | } | |
205 | } | |
206 | ||
207 | void dm_stats_cleanup(struct dm_stats *stats) | |
208 | { | |
209 | size_t ni; | |
210 | struct dm_stat *s; | |
211 | struct dm_stat_shared *shared; | |
212 | ||
213 | while (!list_empty(&stats->list)) { | |
214 | s = container_of(stats->list.next, struct dm_stat, list_entry); | |
215 | list_del(&s->list_entry); | |
216 | for (ni = 0; ni < s->n_entries; ni++) { | |
217 | shared = &s->stat_shared[ni]; | |
218 | if (WARN_ON(dm_stat_in_flight(shared))) { | |
219 | DMCRIT("leaked in-flight counter at index %lu " | |
220 | "(start %llu, end %llu, step %llu): reads %d, writes %d", | |
221 | (unsigned long)ni, | |
222 | (unsigned long long)s->start, | |
223 | (unsigned long long)s->end, | |
224 | (unsigned long long)s->step, | |
225 | atomic_read(&shared->in_flight[READ]), | |
226 | atomic_read(&shared->in_flight[WRITE])); | |
227 | } | |
bfe2b014 | 228 | cond_resched(); |
fd2ed4d2 MP |
229 | } |
230 | dm_stat_free(&s->rcu_head); | |
231 | } | |
232 | free_percpu(stats->last); | |
d5ffebdd | 233 | mutex_destroy(&stats->mutex); |
fd2ed4d2 MP |
234 | } |
235 | ||
0cdb90f0 MS |
236 | static void dm_stats_recalc_precise_timestamps(struct dm_stats *stats) |
237 | { | |
238 | struct list_head *l; | |
239 | struct dm_stat *tmp_s; | |
240 | bool precise_timestamps = false; | |
241 | ||
242 | list_for_each(l, &stats->list) { | |
243 | tmp_s = container_of(l, struct dm_stat, list_entry); | |
244 | if (tmp_s->stat_flags & STAT_PRECISE_TIMESTAMPS) { | |
245 | precise_timestamps = true; | |
246 | break; | |
247 | } | |
248 | } | |
249 | stats->precise_timestamps = precise_timestamps; | |
250 | } | |
251 | ||
fd2ed4d2 | 252 | static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, |
c96aec34 | 253 | sector_t step, unsigned stat_flags, |
dfcfac3e MP |
254 | unsigned n_histogram_entries, |
255 | unsigned long long *histogram_boundaries, | |
c96aec34 | 256 | const char *program_id, const char *aux_data, |
fd2ed4d2 MP |
257 | void (*suspend_callback)(struct mapped_device *), |
258 | void (*resume_callback)(struct mapped_device *), | |
259 | struct mapped_device *md) | |
260 | { | |
261 | struct list_head *l; | |
262 | struct dm_stat *s, *tmp_s; | |
263 | sector_t n_entries; | |
264 | size_t ni; | |
265 | size_t shared_alloc_size; | |
266 | size_t percpu_alloc_size; | |
dfcfac3e | 267 | size_t histogram_alloc_size; |
fd2ed4d2 MP |
268 | struct dm_stat_percpu *p; |
269 | int cpu; | |
270 | int ret_id; | |
271 | int r; | |
272 | ||
273 | if (end < start || !step) | |
274 | return -EINVAL; | |
275 | ||
276 | n_entries = end - start; | |
277 | if (dm_sector_div64(n_entries, step)) | |
278 | n_entries++; | |
279 | ||
280 | if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1)) | |
281 | return -EOVERFLOW; | |
282 | ||
fb16c799 | 283 | shared_alloc_size = struct_size(s, stat_shared, n_entries); |
fd2ed4d2 MP |
284 | if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries) |
285 | return -EOVERFLOW; | |
286 | ||
287 | percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu); | |
288 | if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries) | |
289 | return -EOVERFLOW; | |
290 | ||
dfcfac3e MP |
291 | histogram_alloc_size = (n_histogram_entries + 1) * (size_t)n_entries * sizeof(unsigned long long); |
292 | if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long)) | |
293 | return -EOVERFLOW; | |
294 | ||
295 | if (!check_shared_memory(shared_alloc_size + histogram_alloc_size + | |
296 | num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size))) | |
fd2ed4d2 MP |
297 | return -ENOMEM; |
298 | ||
299 | s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE); | |
300 | if (!s) | |
301 | return -ENOMEM; | |
302 | ||
c96aec34 | 303 | s->stat_flags = stat_flags; |
fd2ed4d2 MP |
304 | s->n_entries = n_entries; |
305 | s->start = start; | |
306 | s->end = end; | |
307 | s->step = step; | |
308 | s->shared_alloc_size = shared_alloc_size; | |
309 | s->percpu_alloc_size = percpu_alloc_size; | |
dfcfac3e MP |
310 | s->histogram_alloc_size = histogram_alloc_size; |
311 | ||
312 | s->n_histogram_entries = n_histogram_entries; | |
313 | s->histogram_boundaries = kmemdup(histogram_boundaries, | |
314 | s->n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL); | |
315 | if (!s->histogram_boundaries) { | |
316 | r = -ENOMEM; | |
317 | goto out; | |
318 | } | |
fd2ed4d2 MP |
319 | |
320 | s->program_id = kstrdup(program_id, GFP_KERNEL); | |
321 | if (!s->program_id) { | |
322 | r = -ENOMEM; | |
323 | goto out; | |
324 | } | |
325 | s->aux_data = kstrdup(aux_data, GFP_KERNEL); | |
326 | if (!s->aux_data) { | |
327 | r = -ENOMEM; | |
328 | goto out; | |
329 | } | |
330 | ||
331 | for (ni = 0; ni < n_entries; ni++) { | |
332 | atomic_set(&s->stat_shared[ni].in_flight[READ], 0); | |
333 | atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0); | |
bfe2b014 | 334 | cond_resched(); |
fd2ed4d2 MP |
335 | } |
336 | ||
dfcfac3e MP |
337 | if (s->n_histogram_entries) { |
338 | unsigned long long *hi; | |
339 | hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE); | |
340 | if (!hi) { | |
341 | r = -ENOMEM; | |
342 | goto out; | |
343 | } | |
344 | for (ni = 0; ni < n_entries; ni++) { | |
345 | s->stat_shared[ni].tmp.histogram = hi; | |
346 | hi += s->n_histogram_entries + 1; | |
bfe2b014 | 347 | cond_resched(); |
dfcfac3e MP |
348 | } |
349 | } | |
350 | ||
fd2ed4d2 MP |
351 | for_each_possible_cpu(cpu) { |
352 | p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu)); | |
353 | if (!p) { | |
354 | r = -ENOMEM; | |
355 | goto out; | |
356 | } | |
357 | s->stat_percpu[cpu] = p; | |
dfcfac3e MP |
358 | if (s->n_histogram_entries) { |
359 | unsigned long long *hi; | |
360 | hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu)); | |
361 | if (!hi) { | |
362 | r = -ENOMEM; | |
363 | goto out; | |
364 | } | |
365 | for (ni = 0; ni < n_entries; ni++) { | |
366 | p[ni].histogram = hi; | |
367 | hi += s->n_histogram_entries + 1; | |
bfe2b014 | 368 | cond_resched(); |
dfcfac3e MP |
369 | } |
370 | } | |
fd2ed4d2 MP |
371 | } |
372 | ||
373 | /* | |
374 | * Suspend/resume to make sure there is no i/o in flight, | |
375 | * so that newly created statistics will be exact. | |
376 | * | |
377 | * (note: we couldn't suspend earlier because we must not | |
378 | * allocate memory while suspended) | |
379 | */ | |
380 | suspend_callback(md); | |
381 | ||
382 | mutex_lock(&stats->mutex); | |
383 | s->id = 0; | |
384 | list_for_each(l, &stats->list) { | |
385 | tmp_s = container_of(l, struct dm_stat, list_entry); | |
386 | if (WARN_ON(tmp_s->id < s->id)) { | |
387 | r = -EINVAL; | |
388 | goto out_unlock_resume; | |
389 | } | |
390 | if (tmp_s->id > s->id) | |
391 | break; | |
392 | if (unlikely(s->id == INT_MAX)) { | |
393 | r = -ENFILE; | |
394 | goto out_unlock_resume; | |
395 | } | |
396 | s->id++; | |
397 | } | |
398 | ret_id = s->id; | |
399 | list_add_tail_rcu(&s->list_entry, l); | |
0cdb90f0 MS |
400 | |
401 | dm_stats_recalc_precise_timestamps(stats); | |
402 | ||
442761fd MS |
403 | if (!static_key_enabled(&stats_enabled.key)) |
404 | static_branch_enable(&stats_enabled); | |
405 | ||
fd2ed4d2 MP |
406 | mutex_unlock(&stats->mutex); |
407 | ||
408 | resume_callback(md); | |
409 | ||
410 | return ret_id; | |
411 | ||
412 | out_unlock_resume: | |
413 | mutex_unlock(&stats->mutex); | |
414 | resume_callback(md); | |
415 | out: | |
416 | dm_stat_free(&s->rcu_head); | |
417 | return r; | |
418 | } | |
419 | ||
420 | static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id) | |
421 | { | |
422 | struct dm_stat *s; | |
423 | ||
424 | list_for_each_entry(s, &stats->list, list_entry) { | |
425 | if (s->id > id) | |
426 | break; | |
427 | if (s->id == id) | |
428 | return s; | |
429 | } | |
430 | ||
431 | return NULL; | |
432 | } | |
433 | ||
434 | static int dm_stats_delete(struct dm_stats *stats, int id) | |
435 | { | |
436 | struct dm_stat *s; | |
437 | int cpu; | |
438 | ||
439 | mutex_lock(&stats->mutex); | |
440 | ||
441 | s = __dm_stats_find(stats, id); | |
442 | if (!s) { | |
443 | mutex_unlock(&stats->mutex); | |
444 | return -ENOENT; | |
445 | } | |
446 | ||
447 | list_del_rcu(&s->list_entry); | |
0cdb90f0 MS |
448 | |
449 | dm_stats_recalc_precise_timestamps(stats); | |
450 | ||
fd2ed4d2 MP |
451 | mutex_unlock(&stats->mutex); |
452 | ||
453 | /* | |
454 | * vfree can't be called from RCU callback | |
455 | */ | |
456 | for_each_possible_cpu(cpu) | |
dfcfac3e MP |
457 | if (is_vmalloc_addr(s->stat_percpu) || |
458 | is_vmalloc_addr(s->stat_percpu[cpu][0].histogram)) | |
fd2ed4d2 | 459 | goto do_sync_free; |
dfcfac3e MP |
460 | if (is_vmalloc_addr(s) || |
461 | is_vmalloc_addr(s->stat_shared[0].tmp.histogram)) { | |
fd2ed4d2 MP |
462 | do_sync_free: |
463 | synchronize_rcu_expedited(); | |
464 | dm_stat_free(&s->rcu_head); | |
465 | } else { | |
6aa7de05 | 466 | WRITE_ONCE(dm_stat_need_rcu_barrier, 1); |
fd2ed4d2 MP |
467 | call_rcu(&s->rcu_head, dm_stat_free); |
468 | } | |
469 | return 0; | |
470 | } | |
471 | ||
472 | static int dm_stats_list(struct dm_stats *stats, const char *program, | |
473 | char *result, unsigned maxlen) | |
474 | { | |
475 | struct dm_stat *s; | |
476 | sector_t len; | |
477 | unsigned sz = 0; | |
478 | ||
479 | /* | |
480 | * Output format: | |
481 | * <region_id>: <start_sector>+<length> <step> <program_id> <aux_data> | |
482 | */ | |
483 | ||
484 | mutex_lock(&stats->mutex); | |
485 | list_for_each_entry(s, &stats->list, list_entry) { | |
486 | if (!program || !strcmp(program, s->program_id)) { | |
487 | len = s->end - s->start; | |
bd49784f | 488 | DMEMIT("%d: %llu+%llu %llu %s %s", s->id, |
fd2ed4d2 MP |
489 | (unsigned long long)s->start, |
490 | (unsigned long long)len, | |
491 | (unsigned long long)s->step, | |
492 | s->program_id, | |
493 | s->aux_data); | |
bd49784f MP |
494 | if (s->stat_flags & STAT_PRECISE_TIMESTAMPS) |
495 | DMEMIT(" precise_timestamps"); | |
496 | if (s->n_histogram_entries) { | |
497 | unsigned i; | |
498 | DMEMIT(" histogram:"); | |
499 | for (i = 0; i < s->n_histogram_entries; i++) { | |
500 | if (i) | |
501 | DMEMIT(","); | |
502 | DMEMIT("%llu", s->histogram_boundaries[i]); | |
503 | } | |
504 | } | |
505 | DMEMIT("\n"); | |
fd2ed4d2 | 506 | } |
bfe2b014 | 507 | cond_resched(); |
fd2ed4d2 MP |
508 | } |
509 | mutex_unlock(&stats->mutex); | |
510 | ||
511 | return 1; | |
512 | } | |
513 | ||
c96aec34 MP |
514 | static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared, |
515 | struct dm_stat_percpu *p) | |
fd2ed4d2 MP |
516 | { |
517 | /* | |
518 | * This is racy, but so is part_round_stats_single. | |
519 | */ | |
c96aec34 MP |
520 | unsigned long long now, difference; |
521 | unsigned in_flight_read, in_flight_write; | |
522 | ||
523 | if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS))) | |
524 | now = jiffies; | |
525 | else | |
526 | now = ktime_to_ns(ktime_get()); | |
fd2ed4d2 | 527 | |
c96aec34 | 528 | difference = now - shared->stamp; |
fd2ed4d2 MP |
529 | if (!difference) |
530 | return; | |
c96aec34 | 531 | |
fd2ed4d2 MP |
532 | in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]); |
533 | in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]); | |
534 | if (in_flight_read) | |
535 | p->io_ticks[READ] += difference; | |
536 | if (in_flight_write) | |
537 | p->io_ticks[WRITE] += difference; | |
538 | if (in_flight_read + in_flight_write) { | |
539 | p->io_ticks_total += difference; | |
540 | p->time_in_queue += (in_flight_read + in_flight_write) * difference; | |
541 | } | |
542 | shared->stamp = now; | |
543 | } | |
544 | ||
545 | static void dm_stat_for_entry(struct dm_stat *s, size_t entry, | |
528ec5ab | 546 | int idx, sector_t len, |
c96aec34 MP |
547 | struct dm_stats_aux *stats_aux, bool end, |
548 | unsigned long duration_jiffies) | |
fd2ed4d2 | 549 | { |
fd2ed4d2 MP |
550 | struct dm_stat_shared *shared = &s->stat_shared[entry]; |
551 | struct dm_stat_percpu *p; | |
552 | ||
553 | /* | |
bbf3f8cb | 554 | * For strict correctness we should use local_irq_save/restore |
fd2ed4d2 MP |
555 | * instead of preempt_disable/enable. |
556 | * | |
bbf3f8cb MP |
557 | * preempt_disable/enable is racy if the driver finishes bios |
558 | * from non-interrupt context as well as from interrupt context | |
559 | * or from more different interrupts. | |
fd2ed4d2 | 560 | * |
bbf3f8cb MP |
561 | * On 64-bit architectures the race only results in not counting some |
562 | * events, so it is acceptable. On 32-bit architectures the race could | |
563 | * cause the counter going off by 2^32, so we need to do proper locking | |
564 | * there. | |
fd2ed4d2 MP |
565 | * |
566 | * part_stat_lock()/part_stat_unlock() have this race too. | |
567 | */ | |
bbf3f8cb MP |
568 | #if BITS_PER_LONG == 32 |
569 | unsigned long flags; | |
570 | local_irq_save(flags); | |
571 | #else | |
fd2ed4d2 | 572 | preempt_disable(); |
bbf3f8cb | 573 | #endif |
fd2ed4d2 MP |
574 | p = &s->stat_percpu[smp_processor_id()][entry]; |
575 | ||
576 | if (!end) { | |
c96aec34 | 577 | dm_stat_round(s, shared, p); |
fd2ed4d2 MP |
578 | atomic_inc(&shared->in_flight[idx]); |
579 | } else { | |
dfcfac3e | 580 | unsigned long long duration; |
c96aec34 | 581 | dm_stat_round(s, shared, p); |
fd2ed4d2 MP |
582 | atomic_dec(&shared->in_flight[idx]); |
583 | p->sectors[idx] += len; | |
584 | p->ios[idx] += 1; | |
c96aec34 | 585 | p->merges[idx] += stats_aux->merged; |
dfcfac3e | 586 | if (!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)) { |
c96aec34 | 587 | p->ticks[idx] += duration_jiffies; |
dfcfac3e MP |
588 | duration = jiffies_to_msecs(duration_jiffies); |
589 | } else { | |
c96aec34 | 590 | p->ticks[idx] += stats_aux->duration_ns; |
dfcfac3e MP |
591 | duration = stats_aux->duration_ns; |
592 | } | |
593 | if (s->n_histogram_entries) { | |
594 | unsigned lo = 0, hi = s->n_histogram_entries + 1; | |
595 | while (lo + 1 < hi) { | |
596 | unsigned mid = (lo + hi) / 2; | |
597 | if (s->histogram_boundaries[mid - 1] > duration) { | |
598 | hi = mid; | |
599 | } else { | |
600 | lo = mid; | |
601 | } | |
602 | ||
603 | } | |
604 | p->histogram[lo]++; | |
605 | } | |
fd2ed4d2 MP |
606 | } |
607 | ||
bbf3f8cb MP |
608 | #if BITS_PER_LONG == 32 |
609 | local_irq_restore(flags); | |
610 | #else | |
fd2ed4d2 | 611 | preempt_enable(); |
bbf3f8cb | 612 | #endif |
fd2ed4d2 MP |
613 | } |
614 | ||
528ec5ab | 615 | static void __dm_stat_bio(struct dm_stat *s, int bi_rw, |
fd2ed4d2 | 616 | sector_t bi_sector, sector_t end_sector, |
c96aec34 | 617 | bool end, unsigned long duration_jiffies, |
fd2ed4d2 MP |
618 | struct dm_stats_aux *stats_aux) |
619 | { | |
620 | sector_t rel_sector, offset, todo, fragment_len; | |
621 | size_t entry; | |
622 | ||
623 | if (end_sector <= s->start || bi_sector >= s->end) | |
624 | return; | |
625 | if (unlikely(bi_sector < s->start)) { | |
626 | rel_sector = 0; | |
627 | todo = end_sector - s->start; | |
628 | } else { | |
629 | rel_sector = bi_sector - s->start; | |
630 | todo = end_sector - bi_sector; | |
631 | } | |
632 | if (unlikely(end_sector > s->end)) | |
633 | todo -= (end_sector - s->end); | |
634 | ||
635 | offset = dm_sector_div64(rel_sector, s->step); | |
636 | entry = rel_sector; | |
637 | do { | |
638 | if (WARN_ON_ONCE(entry >= s->n_entries)) { | |
639 | DMCRIT("Invalid area access in region id %d", s->id); | |
640 | return; | |
641 | } | |
642 | fragment_len = todo; | |
643 | if (fragment_len > s->step - offset) | |
644 | fragment_len = s->step - offset; | |
645 | dm_stat_for_entry(s, entry, bi_rw, fragment_len, | |
c96aec34 | 646 | stats_aux, end, duration_jiffies); |
fd2ed4d2 MP |
647 | todo -= fragment_len; |
648 | entry++; | |
649 | offset = 0; | |
650 | } while (unlikely(todo != 0)); | |
651 | } | |
652 | ||
653 | void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, | |
654 | sector_t bi_sector, unsigned bi_sectors, bool end, | |
8d394bc4 | 655 | unsigned long start_time, |
c96aec34 | 656 | struct dm_stats_aux *stats_aux) |
fd2ed4d2 MP |
657 | { |
658 | struct dm_stat *s; | |
659 | sector_t end_sector; | |
660 | struct dm_stats_last_position *last; | |
c96aec34 | 661 | bool got_precise_time; |
8d394bc4 | 662 | unsigned long duration_jiffies = 0; |
fd2ed4d2 MP |
663 | |
664 | if (unlikely(!bi_sectors)) | |
665 | return; | |
666 | ||
667 | end_sector = bi_sector + bi_sectors; | |
668 | ||
669 | if (!end) { | |
670 | /* | |
671 | * A race condition can at worst result in the merged flag being | |
672 | * misrepresented, so we don't have to disable preemption here. | |
673 | */ | |
1f125e76 | 674 | last = raw_cpu_ptr(stats->last); |
fd2ed4d2 | 675 | stats_aux->merged = |
6aa7de05 | 676 | (bi_sector == (READ_ONCE(last->last_sector) && |
528ec5ab | 677 | ((bi_rw == WRITE) == |
6aa7de05 | 678 | (READ_ONCE(last->last_rw) == WRITE)) |
fd2ed4d2 | 679 | )); |
6aa7de05 MR |
680 | WRITE_ONCE(last->last_sector, end_sector); |
681 | WRITE_ONCE(last->last_rw, bi_rw); | |
8d394bc4 MS |
682 | } else |
683 | duration_jiffies = jiffies - start_time; | |
fd2ed4d2 MP |
684 | |
685 | rcu_read_lock(); | |
686 | ||
c96aec34 MP |
687 | got_precise_time = false; |
688 | list_for_each_entry_rcu(s, &stats->list, list_entry) { | |
689 | if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) { | |
0cdb90f0 MS |
690 | /* start (!end) duration_ns is set by DM core's alloc_io() */ |
691 | if (end) | |
c96aec34 MP |
692 | stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns; |
693 | got_precise_time = true; | |
694 | } | |
695 | __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux); | |
696 | } | |
fd2ed4d2 MP |
697 | |
698 | rcu_read_unlock(); | |
699 | } | |
700 | ||
701 | static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared, | |
702 | struct dm_stat *s, size_t x) | |
703 | { | |
704 | int cpu; | |
705 | struct dm_stat_percpu *p; | |
706 | ||
707 | local_irq_disable(); | |
708 | p = &s->stat_percpu[smp_processor_id()][x]; | |
c96aec34 | 709 | dm_stat_round(s, shared, p); |
fd2ed4d2 MP |
710 | local_irq_enable(); |
711 | ||
dfcfac3e MP |
712 | shared->tmp.sectors[READ] = 0; |
713 | shared->tmp.sectors[WRITE] = 0; | |
714 | shared->tmp.ios[READ] = 0; | |
715 | shared->tmp.ios[WRITE] = 0; | |
716 | shared->tmp.merges[READ] = 0; | |
717 | shared->tmp.merges[WRITE] = 0; | |
718 | shared->tmp.ticks[READ] = 0; | |
719 | shared->tmp.ticks[WRITE] = 0; | |
720 | shared->tmp.io_ticks[READ] = 0; | |
721 | shared->tmp.io_ticks[WRITE] = 0; | |
722 | shared->tmp.io_ticks_total = 0; | |
723 | shared->tmp.time_in_queue = 0; | |
724 | ||
725 | if (s->n_histogram_entries) | |
726 | memset(shared->tmp.histogram, 0, (s->n_histogram_entries + 1) * sizeof(unsigned long long)); | |
727 | ||
fd2ed4d2 MP |
728 | for_each_possible_cpu(cpu) { |
729 | p = &s->stat_percpu[cpu][x]; | |
6aa7de05 MR |
730 | shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]); |
731 | shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]); | |
732 | shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]); | |
733 | shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]); | |
734 | shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]); | |
735 | shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]); | |
736 | shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]); | |
737 | shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]); | |
738 | shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]); | |
739 | shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]); | |
740 | shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total); | |
741 | shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue); | |
dfcfac3e MP |
742 | if (s->n_histogram_entries) { |
743 | unsigned i; | |
744 | for (i = 0; i < s->n_histogram_entries + 1; i++) | |
6aa7de05 | 745 | shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]); |
dfcfac3e | 746 | } |
fd2ed4d2 MP |
747 | } |
748 | } | |
749 | ||
750 | static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end, | |
751 | bool init_tmp_percpu_totals) | |
752 | { | |
753 | size_t x; | |
754 | struct dm_stat_shared *shared; | |
755 | struct dm_stat_percpu *p; | |
756 | ||
757 | for (x = idx_start; x < idx_end; x++) { | |
758 | shared = &s->stat_shared[x]; | |
759 | if (init_tmp_percpu_totals) | |
760 | __dm_stat_init_temporary_percpu_totals(shared, s, x); | |
761 | local_irq_disable(); | |
762 | p = &s->stat_percpu[smp_processor_id()][x]; | |
763 | p->sectors[READ] -= shared->tmp.sectors[READ]; | |
764 | p->sectors[WRITE] -= shared->tmp.sectors[WRITE]; | |
765 | p->ios[READ] -= shared->tmp.ios[READ]; | |
766 | p->ios[WRITE] -= shared->tmp.ios[WRITE]; | |
767 | p->merges[READ] -= shared->tmp.merges[READ]; | |
768 | p->merges[WRITE] -= shared->tmp.merges[WRITE]; | |
769 | p->ticks[READ] -= shared->tmp.ticks[READ]; | |
770 | p->ticks[WRITE] -= shared->tmp.ticks[WRITE]; | |
771 | p->io_ticks[READ] -= shared->tmp.io_ticks[READ]; | |
772 | p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE]; | |
773 | p->io_ticks_total -= shared->tmp.io_ticks_total; | |
774 | p->time_in_queue -= shared->tmp.time_in_queue; | |
775 | local_irq_enable(); | |
dfcfac3e MP |
776 | if (s->n_histogram_entries) { |
777 | unsigned i; | |
778 | for (i = 0; i < s->n_histogram_entries + 1; i++) { | |
779 | local_irq_disable(); | |
780 | p = &s->stat_percpu[smp_processor_id()][x]; | |
781 | p->histogram[i] -= shared->tmp.histogram[i]; | |
782 | local_irq_enable(); | |
783 | } | |
784 | } | |
bfe2b014 | 785 | cond_resched(); |
fd2ed4d2 MP |
786 | } |
787 | } | |
788 | ||
789 | static int dm_stats_clear(struct dm_stats *stats, int id) | |
790 | { | |
791 | struct dm_stat *s; | |
792 | ||
793 | mutex_lock(&stats->mutex); | |
794 | ||
795 | s = __dm_stats_find(stats, id); | |
796 | if (!s) { | |
797 | mutex_unlock(&stats->mutex); | |
798 | return -ENOENT; | |
799 | } | |
800 | ||
801 | __dm_stat_clear(s, 0, s->n_entries, true); | |
802 | ||
803 | mutex_unlock(&stats->mutex); | |
804 | ||
805 | return 1; | |
806 | } | |
807 | ||
808 | /* | |
809 | * This is like jiffies_to_msec, but works for 64-bit values. | |
810 | */ | |
c96aec34 | 811 | static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j) |
fd2ed4d2 | 812 | { |
c96aec34 | 813 | unsigned long long result; |
fd2ed4d2 MP |
814 | unsigned mult; |
815 | ||
c96aec34 MP |
816 | if (s->stat_flags & STAT_PRECISE_TIMESTAMPS) |
817 | return j; | |
818 | ||
819 | result = 0; | |
fd2ed4d2 MP |
820 | if (j) |
821 | result = jiffies_to_msecs(j & 0x3fffff); | |
822 | if (j >= 1 << 22) { | |
823 | mult = jiffies_to_msecs(1 << 22); | |
824 | result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff); | |
825 | } | |
826 | if (j >= 1ULL << 44) | |
827 | result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44); | |
828 | ||
829 | return result; | |
830 | } | |
831 | ||
832 | static int dm_stats_print(struct dm_stats *stats, int id, | |
833 | size_t idx_start, size_t idx_len, | |
834 | bool clear, char *result, unsigned maxlen) | |
835 | { | |
836 | unsigned sz = 0; | |
837 | struct dm_stat *s; | |
838 | size_t x; | |
839 | sector_t start, end, step; | |
840 | size_t idx_end; | |
841 | struct dm_stat_shared *shared; | |
842 | ||
843 | /* | |
844 | * Output format: | |
845 | * <start_sector>+<length> counters | |
846 | */ | |
847 | ||
848 | mutex_lock(&stats->mutex); | |
849 | ||
850 | s = __dm_stats_find(stats, id); | |
851 | if (!s) { | |
852 | mutex_unlock(&stats->mutex); | |
853 | return -ENOENT; | |
854 | } | |
855 | ||
856 | idx_end = idx_start + idx_len; | |
857 | if (idx_end < idx_start || | |
858 | idx_end > s->n_entries) | |
859 | idx_end = s->n_entries; | |
860 | ||
861 | if (idx_start > idx_end) | |
862 | idx_start = idx_end; | |
863 | ||
864 | step = s->step; | |
865 | start = s->start + (step * idx_start); | |
866 | ||
867 | for (x = idx_start; x < idx_end; x++, start = end) { | |
868 | shared = &s->stat_shared[x]; | |
869 | end = start + step; | |
870 | if (unlikely(end > s->end)) | |
871 | end = s->end; | |
872 | ||
873 | __dm_stat_init_temporary_percpu_totals(shared, s, x); | |
874 | ||
dfcfac3e | 875 | DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu", |
fd2ed4d2 MP |
876 | (unsigned long long)start, |
877 | (unsigned long long)step, | |
878 | shared->tmp.ios[READ], | |
879 | shared->tmp.merges[READ], | |
880 | shared->tmp.sectors[READ], | |
c96aec34 | 881 | dm_jiffies_to_msec64(s, shared->tmp.ticks[READ]), |
fd2ed4d2 MP |
882 | shared->tmp.ios[WRITE], |
883 | shared->tmp.merges[WRITE], | |
884 | shared->tmp.sectors[WRITE], | |
c96aec34 | 885 | dm_jiffies_to_msec64(s, shared->tmp.ticks[WRITE]), |
fd2ed4d2 | 886 | dm_stat_in_flight(shared), |
c96aec34 MP |
887 | dm_jiffies_to_msec64(s, shared->tmp.io_ticks_total), |
888 | dm_jiffies_to_msec64(s, shared->tmp.time_in_queue), | |
889 | dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]), | |
890 | dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE])); | |
dfcfac3e MP |
891 | if (s->n_histogram_entries) { |
892 | unsigned i; | |
893 | for (i = 0; i < s->n_histogram_entries + 1; i++) { | |
894 | DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]); | |
895 | } | |
896 | } | |
897 | DMEMIT("\n"); | |
fd2ed4d2 MP |
898 | |
899 | if (unlikely(sz + 1 >= maxlen)) | |
900 | goto buffer_overflow; | |
bfe2b014 MP |
901 | |
902 | cond_resched(); | |
fd2ed4d2 MP |
903 | } |
904 | ||
905 | if (clear) | |
906 | __dm_stat_clear(s, idx_start, idx_end, false); | |
907 | ||
908 | buffer_overflow: | |
909 | mutex_unlock(&stats->mutex); | |
910 | ||
911 | return 1; | |
912 | } | |
913 | ||
914 | static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data) | |
915 | { | |
916 | struct dm_stat *s; | |
917 | const char *new_aux_data; | |
918 | ||
919 | mutex_lock(&stats->mutex); | |
920 | ||
921 | s = __dm_stats_find(stats, id); | |
922 | if (!s) { | |
923 | mutex_unlock(&stats->mutex); | |
924 | return -ENOENT; | |
925 | } | |
926 | ||
927 | new_aux_data = kstrdup(aux_data, GFP_KERNEL); | |
928 | if (!new_aux_data) { | |
929 | mutex_unlock(&stats->mutex); | |
930 | return -ENOMEM; | |
931 | } | |
932 | ||
933 | kfree(s->aux_data); | |
934 | s->aux_data = new_aux_data; | |
935 | ||
936 | mutex_unlock(&stats->mutex); | |
937 | ||
938 | return 0; | |
939 | } | |
940 | ||
dfcfac3e MP |
941 | static int parse_histogram(const char *h, unsigned *n_histogram_entries, |
942 | unsigned long long **histogram_boundaries) | |
943 | { | |
944 | const char *q; | |
945 | unsigned n; | |
946 | unsigned long long last; | |
947 | ||
948 | *n_histogram_entries = 1; | |
949 | for (q = h; *q; q++) | |
950 | if (*q == ',') | |
951 | (*n_histogram_entries)++; | |
952 | ||
6da2ec56 KC |
953 | *histogram_boundaries = kmalloc_array(*n_histogram_entries, |
954 | sizeof(unsigned long long), | |
955 | GFP_KERNEL); | |
dfcfac3e MP |
956 | if (!*histogram_boundaries) |
957 | return -ENOMEM; | |
958 | ||
959 | n = 0; | |
960 | last = 0; | |
961 | while (1) { | |
962 | unsigned long long hi; | |
963 | int s; | |
964 | char ch; | |
965 | s = sscanf(h, "%llu%c", &hi, &ch); | |
966 | if (!s || (s == 2 && ch != ',')) | |
967 | return -EINVAL; | |
968 | if (hi <= last) | |
969 | return -EINVAL; | |
970 | last = hi; | |
971 | (*histogram_boundaries)[n] = hi; | |
972 | if (s == 1) | |
973 | return 0; | |
974 | h = strchr(h, ',') + 1; | |
975 | n++; | |
976 | } | |
977 | } | |
978 | ||
fd2ed4d2 MP |
979 | static int message_stats_create(struct mapped_device *md, |
980 | unsigned argc, char **argv, | |
981 | char *result, unsigned maxlen) | |
982 | { | |
dfcfac3e | 983 | int r; |
fd2ed4d2 MP |
984 | int id; |
985 | char dummy; | |
986 | unsigned long long start, end, len, step; | |
987 | unsigned divisor; | |
988 | const char *program_id, *aux_data; | |
c96aec34 MP |
989 | unsigned stat_flags = 0; |
990 | ||
dfcfac3e MP |
991 | unsigned n_histogram_entries = 0; |
992 | unsigned long long *histogram_boundaries = NULL; | |
993 | ||
c96aec34 MP |
994 | struct dm_arg_set as, as_backup; |
995 | const char *a; | |
996 | unsigned feature_args; | |
fd2ed4d2 MP |
997 | |
998 | /* | |
999 | * Input format: | |
c96aec34 | 1000 | * <range> <step> [<extra_parameters> <parameters>] [<program_id> [<aux_data>]] |
fd2ed4d2 MP |
1001 | */ |
1002 | ||
c96aec34 | 1003 | if (argc < 3) |
dfcfac3e | 1004 | goto ret_einval; |
fd2ed4d2 | 1005 | |
c96aec34 MP |
1006 | as.argc = argc; |
1007 | as.argv = argv; | |
1008 | dm_consume_args(&as, 1); | |
1009 | ||
1010 | a = dm_shift_arg(&as); | |
1011 | if (!strcmp(a, "-")) { | |
fd2ed4d2 MP |
1012 | start = 0; |
1013 | len = dm_get_size(md); | |
1014 | if (!len) | |
1015 | len = 1; | |
c96aec34 | 1016 | } else if (sscanf(a, "%llu+%llu%c", &start, &len, &dummy) != 2 || |
fd2ed4d2 | 1017 | start != (sector_t)start || len != (sector_t)len) |
dfcfac3e | 1018 | goto ret_einval; |
fd2ed4d2 MP |
1019 | |
1020 | end = start + len; | |
1021 | if (start >= end) | |
dfcfac3e | 1022 | goto ret_einval; |
fd2ed4d2 | 1023 | |
c96aec34 MP |
1024 | a = dm_shift_arg(&as); |
1025 | if (sscanf(a, "/%u%c", &divisor, &dummy) == 1) { | |
dd4c1b7d MP |
1026 | if (!divisor) |
1027 | return -EINVAL; | |
fd2ed4d2 MP |
1028 | step = end - start; |
1029 | if (do_div(step, divisor)) | |
1030 | step++; | |
1031 | if (!step) | |
1032 | step = 1; | |
c96aec34 | 1033 | } else if (sscanf(a, "%llu%c", &step, &dummy) != 1 || |
fd2ed4d2 | 1034 | step != (sector_t)step || !step) |
dfcfac3e | 1035 | goto ret_einval; |
fd2ed4d2 | 1036 | |
c96aec34 MP |
1037 | as_backup = as; |
1038 | a = dm_shift_arg(&as); | |
1039 | if (a && sscanf(a, "%u%c", &feature_args, &dummy) == 1) { | |
1040 | while (feature_args--) { | |
1041 | a = dm_shift_arg(&as); | |
1042 | if (!a) | |
dfcfac3e | 1043 | goto ret_einval; |
c96aec34 MP |
1044 | if (!strcasecmp(a, "precise_timestamps")) |
1045 | stat_flags |= STAT_PRECISE_TIMESTAMPS; | |
dfcfac3e MP |
1046 | else if (!strncasecmp(a, "histogram:", 10)) { |
1047 | if (n_histogram_entries) | |
1048 | goto ret_einval; | |
1049 | if ((r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries))) | |
1050 | goto ret; | |
1051 | } else | |
1052 | goto ret_einval; | |
c96aec34 MP |
1053 | } |
1054 | } else { | |
1055 | as = as_backup; | |
1056 | } | |
1057 | ||
fd2ed4d2 MP |
1058 | program_id = "-"; |
1059 | aux_data = "-"; | |
1060 | ||
c96aec34 MP |
1061 | a = dm_shift_arg(&as); |
1062 | if (a) | |
1063 | program_id = a; | |
1064 | ||
1065 | a = dm_shift_arg(&as); | |
1066 | if (a) | |
1067 | aux_data = a; | |
fd2ed4d2 | 1068 | |
c96aec34 | 1069 | if (as.argc) |
dfcfac3e | 1070 | goto ret_einval; |
fd2ed4d2 MP |
1071 | |
1072 | /* | |
1073 | * If a buffer overflow happens after we created the region, | |
1074 | * it's too late (the userspace would retry with a larger | |
1075 | * buffer, but the region id that caused the overflow is already | |
1076 | * leaked). So we must detect buffer overflow in advance. | |
1077 | */ | |
1078 | snprintf(result, maxlen, "%d", INT_MAX); | |
dfcfac3e MP |
1079 | if (dm_message_test_buffer_overflow(result, maxlen)) { |
1080 | r = 1; | |
1081 | goto ret; | |
1082 | } | |
fd2ed4d2 | 1083 | |
dfcfac3e MP |
1084 | id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags, |
1085 | n_histogram_entries, histogram_boundaries, program_id, aux_data, | |
ffcc3936 | 1086 | dm_internal_suspend_fast, dm_internal_resume_fast, md); |
dfcfac3e MP |
1087 | if (id < 0) { |
1088 | r = id; | |
1089 | goto ret; | |
1090 | } | |
fd2ed4d2 MP |
1091 | |
1092 | snprintf(result, maxlen, "%d", id); | |
1093 | ||
dfcfac3e MP |
1094 | r = 1; |
1095 | goto ret; | |
1096 | ||
1097 | ret_einval: | |
1098 | r = -EINVAL; | |
1099 | ret: | |
1100 | kfree(histogram_boundaries); | |
1101 | return r; | |
fd2ed4d2 MP |
1102 | } |
1103 | ||
1104 | static int message_stats_delete(struct mapped_device *md, | |
1105 | unsigned argc, char **argv) | |
1106 | { | |
1107 | int id; | |
1108 | char dummy; | |
1109 | ||
1110 | if (argc != 2) | |
1111 | return -EINVAL; | |
1112 | ||
1113 | if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) | |
1114 | return -EINVAL; | |
1115 | ||
1116 | return dm_stats_delete(dm_get_stats(md), id); | |
1117 | } | |
1118 | ||
1119 | static int message_stats_clear(struct mapped_device *md, | |
1120 | unsigned argc, char **argv) | |
1121 | { | |
1122 | int id; | |
1123 | char dummy; | |
1124 | ||
1125 | if (argc != 2) | |
1126 | return -EINVAL; | |
1127 | ||
1128 | if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) | |
1129 | return -EINVAL; | |
1130 | ||
1131 | return dm_stats_clear(dm_get_stats(md), id); | |
1132 | } | |
1133 | ||
1134 | static int message_stats_list(struct mapped_device *md, | |
1135 | unsigned argc, char **argv, | |
1136 | char *result, unsigned maxlen) | |
1137 | { | |
1138 | int r; | |
1139 | const char *program = NULL; | |
1140 | ||
1141 | if (argc < 1 || argc > 2) | |
1142 | return -EINVAL; | |
1143 | ||
1144 | if (argc > 1) { | |
1145 | program = kstrdup(argv[1], GFP_KERNEL); | |
1146 | if (!program) | |
1147 | return -ENOMEM; | |
1148 | } | |
1149 | ||
1150 | r = dm_stats_list(dm_get_stats(md), program, result, maxlen); | |
1151 | ||
1152 | kfree(program); | |
1153 | ||
1154 | return r; | |
1155 | } | |
1156 | ||
1157 | static int message_stats_print(struct mapped_device *md, | |
1158 | unsigned argc, char **argv, bool clear, | |
1159 | char *result, unsigned maxlen) | |
1160 | { | |
1161 | int id; | |
1162 | char dummy; | |
1163 | unsigned long idx_start = 0, idx_len = ULONG_MAX; | |
1164 | ||
1165 | if (argc != 2 && argc != 4) | |
1166 | return -EINVAL; | |
1167 | ||
1168 | if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) | |
1169 | return -EINVAL; | |
1170 | ||
1171 | if (argc > 3) { | |
1172 | if (strcmp(argv[2], "-") && | |
1173 | sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1) | |
1174 | return -EINVAL; | |
1175 | if (strcmp(argv[3], "-") && | |
1176 | sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1) | |
1177 | return -EINVAL; | |
1178 | } | |
1179 | ||
1180 | return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear, | |
1181 | result, maxlen); | |
1182 | } | |
1183 | ||
1184 | static int message_stats_set_aux(struct mapped_device *md, | |
1185 | unsigned argc, char **argv) | |
1186 | { | |
1187 | int id; | |
1188 | char dummy; | |
1189 | ||
1190 | if (argc != 3) | |
1191 | return -EINVAL; | |
1192 | ||
1193 | if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) | |
1194 | return -EINVAL; | |
1195 | ||
1196 | return dm_stats_set_aux(dm_get_stats(md), id, argv[2]); | |
1197 | } | |
1198 | ||
1199 | int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv, | |
1200 | char *result, unsigned maxlen) | |
1201 | { | |
1202 | int r; | |
1203 | ||
fd2ed4d2 MP |
1204 | /* All messages here must start with '@' */ |
1205 | if (!strcasecmp(argv[0], "@stats_create")) | |
1206 | r = message_stats_create(md, argc, argv, result, maxlen); | |
1207 | else if (!strcasecmp(argv[0], "@stats_delete")) | |
1208 | r = message_stats_delete(md, argc, argv); | |
1209 | else if (!strcasecmp(argv[0], "@stats_clear")) | |
1210 | r = message_stats_clear(md, argc, argv); | |
1211 | else if (!strcasecmp(argv[0], "@stats_list")) | |
1212 | r = message_stats_list(md, argc, argv, result, maxlen); | |
1213 | else if (!strcasecmp(argv[0], "@stats_print")) | |
1214 | r = message_stats_print(md, argc, argv, false, result, maxlen); | |
1215 | else if (!strcasecmp(argv[0], "@stats_print_clear")) | |
1216 | r = message_stats_print(md, argc, argv, true, result, maxlen); | |
1217 | else if (!strcasecmp(argv[0], "@stats_set_aux")) | |
1218 | r = message_stats_set_aux(md, argc, argv); | |
1219 | else | |
1220 | return 2; /* this wasn't a stats message */ | |
1221 | ||
1222 | if (r == -EINVAL) | |
1223 | DMWARN("Invalid parameters for message %s", argv[0]); | |
1224 | ||
1225 | return r; | |
1226 | } | |
1227 | ||
1228 | int __init dm_statistics_init(void) | |
1229 | { | |
76f5bee5 | 1230 | shared_memory_amount = 0; |
fd2ed4d2 MP |
1231 | dm_stat_need_rcu_barrier = 0; |
1232 | return 0; | |
1233 | } | |
1234 | ||
1235 | void dm_statistics_exit(void) | |
1236 | { | |
1237 | if (dm_stat_need_rcu_barrier) | |
1238 | rcu_barrier(); | |
1239 | if (WARN_ON(shared_memory_amount)) | |
1240 | DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount); | |
1241 | } | |
1242 | ||
1243 | module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO); | |
1244 | MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics"); |