ntfs: check overflow when iterating ATTR_RECORDs
[linux-block.git] / kernel / profile.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/kernel/profile.c
4 * Simple profiling. Manages a direct-mapped profile hit count buffer,
5 * with configurable resolution, support for restricting the cpus on
6 * which profiling is done, and switching between cpu time and
7 * schedule() calls via kernel command line parameters passed at boot.
8 *
9 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
10 * Red Hat, July 2004
11 * Consolidation of architecture support code for profiling,
6d49e352 12 * Nadia Yvette Chambers, Oracle, July 2004
1da177e4 13 * Amortized hit count accounting via per-cpu open-addressed hashtables
6d49e352
NYC
14 * to resolve timer interrupt livelocks, Nadia Yvette Chambers,
15 * Oracle, 2004
1da177e4
LT
16 */
17
9984de1a 18#include <linux/export.h>
1da177e4 19#include <linux/profile.h>
57c8a661 20#include <linux/memblock.h>
1da177e4
LT
21#include <linux/notifier.h>
22#include <linux/mm.h>
23#include <linux/cpumask.h>
24#include <linux/cpu.h>
1da177e4 25#include <linux/highmem.h>
97d1f15b 26#include <linux/mutex.h>
22b8ce94
DH
27#include <linux/slab.h>
28#include <linux/vmalloc.h>
3905f9ad
IM
29#include <linux/sched/stat.h>
30
1da177e4 31#include <asm/sections.h>
7d12e780 32#include <asm/irq_regs.h>
e8edc6e0 33#include <asm/ptrace.h>
1da177e4
LT
34
35struct profile_hit {
36 u32 pc, hits;
37};
38#define PROFILE_GRPSHIFT 3
39#define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
40#define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
41#define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
42
1da177e4 43static atomic_t *prof_buffer;
2d186afd
PS
44static unsigned long prof_len;
45static unsigned short int prof_shift;
07031e14 46
ece8a684 47int prof_on __read_mostly;
07031e14
IM
48EXPORT_SYMBOL_GPL(prof_on);
49
c309b917 50static cpumask_var_t prof_cpu_mask;
ade356b9 51#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
1da177e4
LT
52static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
53static DEFINE_PER_CPU(int, cpu_profile_flip);
97d1f15b 54static DEFINE_MUTEX(profile_flip_mutex);
1da177e4
LT
55#endif /* CONFIG_SMP */
56
22b8ce94 57int profile_setup(char *str)
1da177e4 58{
f3da64d1
FF
59 static const char schedstr[] = "schedule";
60 static const char sleepstr[] = "sleep";
61 static const char kvmstr[] = "kvm";
1da177e4
LT
62 int par;
63
ece8a684 64 if (!strncmp(str, sleepstr, strlen(sleepstr))) {
b3da2a73 65#ifdef CONFIG_SCHEDSTATS
cb251765 66 force_schedstat_enabled();
ece8a684
IM
67 prof_on = SLEEP_PROFILING;
68 if (str[strlen(sleepstr)] == ',')
69 str += strlen(sleepstr) + 1;
70 if (get_option(&str, &par))
2d186afd
PS
71 prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
72 pr_info("kernel sleep profiling enabled (shift: %u)\n",
ece8a684 73 prof_shift);
b3da2a73 74#else
aba871f1 75 pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
b3da2a73 76#endif /* CONFIG_SCHEDSTATS */
a75acf85 77 } else if (!strncmp(str, schedstr, strlen(schedstr))) {
1da177e4 78 prof_on = SCHED_PROFILING;
dfaa9c94
WLII
79 if (str[strlen(schedstr)] == ',')
80 str += strlen(schedstr) + 1;
81 if (get_option(&str, &par))
2d186afd
PS
82 prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
83 pr_info("kernel schedule profiling enabled (shift: %u)\n",
dfaa9c94 84 prof_shift);
07031e14
IM
85 } else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
86 prof_on = KVM_PROFILING;
87 if (str[strlen(kvmstr)] == ',')
88 str += strlen(kvmstr) + 1;
89 if (get_option(&str, &par))
2d186afd
PS
90 prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
91 pr_info("kernel KVM profiling enabled (shift: %u)\n",
07031e14 92 prof_shift);
dfaa9c94 93 } else if (get_option(&str, &par)) {
2d186afd 94 prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
1da177e4 95 prof_on = CPU_PROFILING;
2d186afd 96 pr_info("kernel profiling enabled (shift: %u)\n",
1da177e4
LT
97 prof_shift);
98 }
99 return 1;
100}
101__setup("profile=", profile_setup);
102
103
ce05fcc3 104int __ref profile_init(void)
1da177e4 105{
22b8ce94 106 int buffer_bytes;
1ad82fd5 107 if (!prof_on)
22b8ce94 108 return 0;
1ad82fd5 109
1da177e4
LT
110 /* only text is profiled */
111 prof_len = (_etext - _stext) >> prof_shift;
0fe6ee8f
CZ
112
113 if (!prof_len) {
114 pr_warn("profiling shift: %u too large\n", prof_shift);
115 prof_on = 0;
116 return -EINVAL;
117 }
118
22b8ce94 119 buffer_bytes = prof_len*sizeof(atomic_t);
22b8ce94 120
c309b917
RR
121 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
122 return -ENOMEM;
123
acd89579
HD
124 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
125
b62f495d 126 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
22b8ce94
DH
127 if (prof_buffer)
128 return 0;
129
b62f495d
MG
130 prof_buffer = alloc_pages_exact(buffer_bytes,
131 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
22b8ce94
DH
132 if (prof_buffer)
133 return 0;
134
559fa6e7
JJ
135 prof_buffer = vzalloc(buffer_bytes);
136 if (prof_buffer)
22b8ce94
DH
137 return 0;
138
c309b917 139 free_cpumask_var(prof_cpu_mask);
22b8ce94 140 return -ENOMEM;
1da177e4
LT
141}
142
ade356b9 143#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
1da177e4
LT
144/*
145 * Each cpu has a pair of open-addressed hashtables for pending
146 * profile hits. read_profile() IPI's all cpus to request them
147 * to flip buffers and flushes their contents to prof_buffer itself.
148 * Flip requests are serialized by the profile_flip_mutex. The sole
149 * use of having a second hashtable is for avoiding cacheline
150 * contention that would otherwise happen during flushes of pending
151 * profile hits required for the accuracy of reported profile hits
152 * and so resurrect the interrupt livelock issue.
153 *
154 * The open-addressed hashtables are indexed by profile buffer slot
155 * and hold the number of pending hits to that profile buffer slot on
156 * a cpu in an entry. When the hashtable overflows, all pending hits
157 * are accounted to their corresponding profile buffer slots with
158 * atomic_add() and the hashtable emptied. As numerous pending hits
159 * may be accounted to a profile buffer slot in a hashtable entry,
160 * this amortizes a number of atomic profile buffer increments likely
161 * to be far larger than the number of entries in the hashtable,
162 * particularly given that the number of distinct profile buffer
163 * positions to which hits are accounted during short intervals (e.g.
164 * several seconds) is usually very small. Exclusion from buffer
165 * flipping is provided by interrupt disablement (note that for
ece8a684
IM
166 * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
167 * process context).
1da177e4
LT
168 * The hash function is meant to be lightweight as opposed to strong,
169 * and was vaguely inspired by ppc64 firmware-supported inverted
170 * pagetable hash functions, but uses a full hashtable full of finite
171 * collision chains, not just pairs of them.
172 *
6d49e352 173 * -- nyc
1da177e4
LT
174 */
175static void __profile_flip_buffers(void *unused)
176{
177 int cpu = smp_processor_id();
178
179 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
180}
181
182static void profile_flip_buffers(void)
183{
184 int i, j, cpu;
185
97d1f15b 186 mutex_lock(&profile_flip_mutex);
1da177e4
LT
187 j = per_cpu(cpu_profile_flip, get_cpu());
188 put_cpu();
15c8b6c1 189 on_each_cpu(__profile_flip_buffers, NULL, 1);
1da177e4
LT
190 for_each_online_cpu(cpu) {
191 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
192 for (i = 0; i < NR_PROFILE_HIT; ++i) {
193 if (!hits[i].hits) {
194 if (hits[i].pc)
195 hits[i].pc = 0;
196 continue;
197 }
198 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
199 hits[i].hits = hits[i].pc = 0;
200 }
201 }
97d1f15b 202 mutex_unlock(&profile_flip_mutex);
1da177e4
LT
203}
204
205static void profile_discard_flip_buffers(void)
206{
207 int i, cpu;
208
97d1f15b 209 mutex_lock(&profile_flip_mutex);
1da177e4
LT
210 i = per_cpu(cpu_profile_flip, get_cpu());
211 put_cpu();
15c8b6c1 212 on_each_cpu(__profile_flip_buffers, NULL, 1);
1da177e4
LT
213 for_each_online_cpu(cpu) {
214 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
215 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
216 }
97d1f15b 217 mutex_unlock(&profile_flip_mutex);
1da177e4
LT
218}
219
6f7bd76f 220static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
1da177e4
LT
221{
222 unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
223 int i, j, cpu;
224 struct profile_hit *hits;
225
1da177e4
LT
226 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
227 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
228 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
229 cpu = get_cpu();
230 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
231 if (!hits) {
232 put_cpu();
233 return;
234 }
ece8a684
IM
235 /*
236 * We buffer the global profiler buffer into a per-CPU
237 * queue and thus reduce the number of global (and possibly
238 * NUMA-alien) accesses. The write-queue is self-coalescing:
239 */
1da177e4
LT
240 local_irq_save(flags);
241 do {
242 for (j = 0; j < PROFILE_GRPSZ; ++j) {
243 if (hits[i + j].pc == pc) {
ece8a684 244 hits[i + j].hits += nr_hits;
1da177e4
LT
245 goto out;
246 } else if (!hits[i + j].hits) {
247 hits[i + j].pc = pc;
ece8a684 248 hits[i + j].hits = nr_hits;
1da177e4
LT
249 goto out;
250 }
251 }
252 i = (i + secondary) & (NR_PROFILE_HIT - 1);
253 } while (i != primary);
ece8a684
IM
254
255 /*
256 * Add the current hit(s) and flush the write-queue out
257 * to the global buffer:
258 */
259 atomic_add(nr_hits, &prof_buffer[pc]);
1da177e4
LT
260 for (i = 0; i < NR_PROFILE_HIT; ++i) {
261 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
262 hits[i].pc = hits[i].hits = 0;
263 }
264out:
265 local_irq_restore(flags);
266 put_cpu();
267}
268
e722d8da 269static int profile_dead_cpu(unsigned int cpu)
1da177e4 270{
1da177e4 271 struct page *page;
e722d8da 272 int i;
1da177e4 273
ef70eff9 274 if (cpumask_available(prof_cpu_mask))
e722d8da
SAS
275 cpumask_clear_cpu(cpu, prof_cpu_mask);
276
277 for (i = 0; i < 2; i++) {
278 if (per_cpu(cpu_profile_hits, cpu)[i]) {
279 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]);
280 per_cpu(cpu_profile_hits, cpu)[i] = NULL;
1da177e4
LT
281 __free_page(page);
282 }
e722d8da
SAS
283 }
284 return 0;
285}
286
287static int profile_prepare_cpu(unsigned int cpu)
288{
289 int i, node = cpu_to_mem(cpu);
290 struct page *page;
291
292 per_cpu(cpu_profile_flip, cpu) = 0;
293
294 for (i = 0; i < 2; i++) {
295 if (per_cpu(cpu_profile_hits, cpu)[i])
296 continue;
297
298 page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
299 if (!page) {
300 profile_dead_cpu(cpu);
301 return -ENOMEM;
1da177e4 302 }
e722d8da
SAS
303 per_cpu(cpu_profile_hits, cpu)[i] = page_address(page);
304
1da177e4 305 }
e722d8da
SAS
306 return 0;
307}
308
309static int profile_online_cpu(unsigned int cpu)
310{
ef70eff9 311 if (cpumask_available(prof_cpu_mask))
e722d8da
SAS
312 cpumask_set_cpu(cpu, prof_cpu_mask);
313
314 return 0;
1da177e4 315}
e722d8da 316
1da177e4
LT
317#else /* !CONFIG_SMP */
318#define profile_flip_buffers() do { } while (0)
319#define profile_discard_flip_buffers() do { } while (0)
320
6f7bd76f 321static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
1da177e4
LT
322{
323 unsigned long pc;
1da177e4 324 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
ece8a684 325 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
1da177e4
LT
326}
327#endif /* !CONFIG_SMP */
6f7bd76f
RM
328
329void profile_hits(int type, void *__pc, unsigned int nr_hits)
330{
331 if (prof_on != type || !prof_buffer)
332 return;
333 do_profile_hits(type, __pc, nr_hits);
334}
bbe1a59b
AM
335EXPORT_SYMBOL_GPL(profile_hits);
336
7d12e780 337void profile_tick(int type)
1da177e4 338{
7d12e780
DH
339 struct pt_regs *regs = get_irq_regs();
340
ef70eff9 341 if (!user_mode(regs) && cpumask_available(prof_cpu_mask) &&
c309b917 342 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
1da177e4
LT
343 profile_hit(type, (void *)profile_pc(regs));
344}
345
346#ifdef CONFIG_PROC_FS
347#include <linux/proc_fs.h>
583a22e7 348#include <linux/seq_file.h>
7c0f6ba6 349#include <linux/uaccess.h>
1da177e4 350
583a22e7 351static int prof_cpu_mask_proc_show(struct seq_file *m, void *v)
1da177e4 352{
ccbd59c1 353 seq_printf(m, "%*pb\n", cpumask_pr_args(prof_cpu_mask));
583a22e7
AD
354 return 0;
355}
356
357static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file)
358{
359 return single_open(file, prof_cpu_mask_proc_show, NULL);
1da177e4
LT
360}
361
583a22e7
AD
362static ssize_t prof_cpu_mask_proc_write(struct file *file,
363 const char __user *buffer, size_t count, loff_t *pos)
1da177e4 364{
c309b917 365 cpumask_var_t new_value;
583a22e7 366 int err;
1da177e4 367
c5e3a411 368 if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
c309b917 369 return -ENOMEM;
1da177e4 370
c309b917
RR
371 err = cpumask_parse_user(buffer, count, new_value);
372 if (!err) {
583a22e7
AD
373 cpumask_copy(prof_cpu_mask, new_value);
374 err = count;
c309b917
RR
375 }
376 free_cpumask_var(new_value);
377 return err;
1da177e4
LT
378}
379
97a32539
AD
380static const struct proc_ops prof_cpu_mask_proc_ops = {
381 .proc_open = prof_cpu_mask_proc_open,
382 .proc_read = seq_read,
383 .proc_lseek = seq_lseek,
384 .proc_release = single_release,
385 .proc_write = prof_cpu_mask_proc_write,
583a22e7
AD
386};
387
fbd387ae 388void create_prof_cpu_mask(void)
1da177e4 389{
1da177e4 390 /* create /proc/irq/prof_cpu_mask */
97a32539 391 proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_ops);
1da177e4
LT
392}
393
394/*
395 * This function accesses profiling information. The returned data is
396 * binary: the sampling step and the actual contents of the profile
397 * buffer. Use of the program readprofile is recommended in order to
398 * get meaningful info out of these data.
399 */
400static ssize_t
401read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
402{
403 unsigned long p = *ppos;
404 ssize_t read;
1ad82fd5 405 char *pnt;
2d186afd 406 unsigned long sample_step = 1UL << prof_shift;
1da177e4
LT
407
408 profile_flip_buffers();
409 if (p >= (prof_len+1)*sizeof(unsigned int))
410 return 0;
411 if (count > (prof_len+1)*sizeof(unsigned int) - p)
412 count = (prof_len+1)*sizeof(unsigned int) - p;
413 read = 0;
414
415 while (p < sizeof(unsigned int) && count > 0) {
1ad82fd5 416 if (put_user(*((char *)(&sample_step)+p), buf))
064b022c 417 return -EFAULT;
1da177e4
LT
418 buf++; p++; count--; read++;
419 }
420 pnt = (char *)prof_buffer + p - sizeof(atomic_t);
1ad82fd5 421 if (copy_to_user(buf, (void *)pnt, count))
1da177e4
LT
422 return -EFAULT;
423 read += count;
424 *ppos += read;
425 return read;
426}
427
787dbea1
BD
428/* default is to not implement this call */
429int __weak setup_profiling_timer(unsigned mult)
430{
431 return -EINVAL;
432}
433
1da177e4
LT
434/*
435 * Writing to /proc/profile resets the counters
436 *
437 * Writing a 'profiling multiplier' value into it also re-sets the profiling
438 * interrupt frequency, on architectures that support this.
439 */
440static ssize_t write_profile(struct file *file, const char __user *buf,
441 size_t count, loff_t *ppos)
442{
443#ifdef CONFIG_SMP
1da177e4
LT
444 if (count == sizeof(int)) {
445 unsigned int multiplier;
446
447 if (copy_from_user(&multiplier, buf, sizeof(int)))
448 return -EFAULT;
449
450 if (setup_profiling_timer(multiplier))
451 return -EINVAL;
452 }
453#endif
454 profile_discard_flip_buffers();
455 memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
456 return count;
457}
458
97a32539
AD
459static const struct proc_ops profile_proc_ops = {
460 .proc_read = read_profile,
461 .proc_write = write_profile,
462 .proc_lseek = default_llseek,
1da177e4
LT
463};
464
e722d8da 465int __ref create_proc_profile(void)
1da177e4 466{
e722d8da
SAS
467 struct proc_dir_entry *entry;
468#ifdef CONFIG_SMP
469 enum cpuhp_state online_state;
1da177e4
LT
470#endif
471
c270a817 472 int err = 0;
1da177e4
LT
473
474 if (!prof_on)
475 return 0;
e722d8da
SAS
476#ifdef CONFIG_SMP
477 err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE",
478 profile_prepare_cpu, profile_dead_cpu);
479 if (err)
480 return err;
481
482 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE",
483 profile_online_cpu, NULL);
484 if (err < 0)
485 goto err_state_prep;
486 online_state = err;
487 err = 0;
488#endif
c33fff0a 489 entry = proc_create("profile", S_IWUSR | S_IRUGO,
97a32539 490 NULL, &profile_proc_ops);
1ad82fd5 491 if (!entry)
e722d8da 492 goto err_state_onl;
271a15ea 493 proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
c270a817 494
e722d8da
SAS
495 return err;
496err_state_onl:
497#ifdef CONFIG_SMP
498 cpuhp_remove_state(online_state);
499err_state_prep:
500 cpuhp_remove_state(CPUHP_PROFILE_PREPARE);
501#endif
c270a817 502 return err;
1da177e4 503}
c96d6660 504subsys_initcall(create_proc_profile);
1da177e4 505#endif /* CONFIG_PROC_FS */