Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file implements the perfmon-2 subsystem which is used | |
3 | * to program the IA-64 Performance Monitoring Unit (PMU). | |
4 | * | |
5 | * The initial version of perfmon.c was written by | |
6 | * Ganesh Venkitachalam, IBM Corp. | |
7 | * | |
8 | * Then it was modified for perfmon-1.x by Stephane Eranian and | |
9 | * David Mosberger, Hewlett Packard Co. | |
10 | * | |
11 | * Version Perfmon-2.x is a rewrite of perfmon-1.x | |
12 | * by Stephane Eranian, Hewlett Packard Co. | |
13 | * | |
a1ecf7f6 | 14 | * Copyright (C) 1999-2005 Hewlett Packard Co |
1da177e4 LT |
15 | * Stephane Eranian <eranian@hpl.hp.com> |
16 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
17 | * | |
18 | * More information about perfmon available at: | |
19 | * http://www.hpl.hp.com/research/linux/perfmon | |
20 | */ | |
21 | ||
1da177e4 LT |
22 | #include <linux/module.h> |
23 | #include <linux/kernel.h> | |
24 | #include <linux/sched.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/smp_lock.h> | |
27 | #include <linux/proc_fs.h> | |
28 | #include <linux/seq_file.h> | |
29 | #include <linux/init.h> | |
30 | #include <linux/vmalloc.h> | |
31 | #include <linux/mm.h> | |
32 | #include <linux/sysctl.h> | |
33 | #include <linux/list.h> | |
34 | #include <linux/file.h> | |
35 | #include <linux/poll.h> | |
36 | #include <linux/vfs.h> | |
a3bc0dbc | 37 | #include <linux/smp.h> |
1da177e4 LT |
38 | #include <linux/pagemap.h> |
39 | #include <linux/mount.h> | |
1da177e4 | 40 | #include <linux/bitops.h> |
a9415644 | 41 | #include <linux/capability.h> |
badf1662 | 42 | #include <linux/rcupdate.h> |
60f1c444 | 43 | #include <linux/completion.h> |
1da177e4 LT |
44 | |
45 | #include <asm/errno.h> | |
46 | #include <asm/intrinsics.h> | |
47 | #include <asm/page.h> | |
48 | #include <asm/perfmon.h> | |
49 | #include <asm/processor.h> | |
50 | #include <asm/signal.h> | |
51 | #include <asm/system.h> | |
52 | #include <asm/uaccess.h> | |
53 | #include <asm/delay.h> | |
54 | ||
55 | #ifdef CONFIG_PERFMON | |
56 | /* | |
57 | * perfmon context state | |
58 | */ | |
59 | #define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */ | |
60 | #define PFM_CTX_LOADED 2 /* context is loaded onto a task */ | |
61 | #define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */ | |
62 | #define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */ | |
63 | ||
64 | #define PFM_INVALID_ACTIVATION (~0UL) | |
65 | ||
35589a8f KA |
66 | #define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */ |
67 | #define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */ | |
68 | ||
1da177e4 LT |
69 | /* |
70 | * depth of message queue | |
71 | */ | |
72 | #define PFM_MAX_MSGS 32 | |
73 | #define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail) | |
74 | ||
75 | /* | |
76 | * type of a PMU register (bitmask). | |
77 | * bitmask structure: | |
78 | * bit0 : register implemented | |
79 | * bit1 : end marker | |
80 | * bit2-3 : reserved | |
81 | * bit4 : pmc has pmc.pm | |
82 | * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter | |
83 | * bit6-7 : register type | |
84 | * bit8-31: reserved | |
85 | */ | |
86 | #define PFM_REG_NOTIMPL 0x0 /* not implemented at all */ | |
87 | #define PFM_REG_IMPL 0x1 /* register implemented */ | |
88 | #define PFM_REG_END 0x2 /* end marker */ | |
89 | #define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */ | |
90 | #define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */ | |
91 | #define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */ | |
92 | #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */ | |
93 | #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */ | |
94 | ||
95 | #define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END) | |
96 | #define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END) | |
97 | ||
98 | #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY) | |
99 | ||
100 | /* i assumed unsigned */ | |
101 | #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL)) | |
102 | #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL)) | |
103 | ||
104 | /* XXX: these assume that register i is implemented */ | |
105 | #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING) | |
106 | #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING) | |
107 | #define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR) | |
108 | #define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL) | |
109 | ||
110 | #define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value | |
111 | #define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask | |
112 | #define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0] | |
113 | #define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0] | |
114 | ||
115 | #define PFM_NUM_IBRS IA64_NUM_DBG_REGS | |
116 | #define PFM_NUM_DBRS IA64_NUM_DBG_REGS | |
117 | ||
118 | #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0) | |
119 | #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling) | |
120 | #define PFM_CTX_TASK(h) (h)->ctx_task | |
121 | ||
122 | #define PMU_PMC_OI 5 /* position of pmc.oi bit */ | |
123 | ||
124 | /* XXX: does not support more than 64 PMDs */ | |
125 | #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask) | |
126 | #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL) | |
127 | ||
128 | #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask) | |
129 | ||
130 | #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64) | |
131 | #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64) | |
132 | #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1) | |
133 | #define PFM_CODE_RR 0 /* requesting code range restriction */ | |
134 | #define PFM_DATA_RR 1 /* requestion data range restriction */ | |
135 | ||
136 | #define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v) | |
137 | #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v) | |
138 | #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info) | |
139 | ||
140 | #define RDEP(x) (1UL<<(x)) | |
141 | ||
142 | /* | |
143 | * context protection macros | |
144 | * in SMP: | |
145 | * - we need to protect against CPU concurrency (spin_lock) | |
146 | * - we need to protect against PMU overflow interrupts (local_irq_disable) | |
147 | * in UP: | |
148 | * - we need to protect against PMU overflow interrupts (local_irq_disable) | |
149 | * | |
150 | * spin_lock_irqsave()/spin_lock_irqrestore(): | |
151 | * in SMP: local_irq_disable + spin_lock | |
152 | * in UP : local_irq_disable | |
153 | * | |
154 | * spin_lock()/spin_lock(): | |
155 | * in UP : removed automatically | |
156 | * in SMP: protect against context accesses from other CPU. interrupts | |
157 | * are not masked. This is useful for the PMU interrupt handler | |
158 | * because we know we will not get PMU concurrency in that code. | |
159 | */ | |
160 | #define PROTECT_CTX(c, f) \ | |
161 | do { \ | |
162 | DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \ | |
163 | spin_lock_irqsave(&(c)->ctx_lock, f); \ | |
164 | DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \ | |
165 | } while(0) | |
166 | ||
167 | #define UNPROTECT_CTX(c, f) \ | |
168 | do { \ | |
169 | DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \ | |
170 | spin_unlock_irqrestore(&(c)->ctx_lock, f); \ | |
171 | } while(0) | |
172 | ||
173 | #define PROTECT_CTX_NOPRINT(c, f) \ | |
174 | do { \ | |
175 | spin_lock_irqsave(&(c)->ctx_lock, f); \ | |
176 | } while(0) | |
177 | ||
178 | ||
179 | #define UNPROTECT_CTX_NOPRINT(c, f) \ | |
180 | do { \ | |
181 | spin_unlock_irqrestore(&(c)->ctx_lock, f); \ | |
182 | } while(0) | |
183 | ||
184 | ||
185 | #define PROTECT_CTX_NOIRQ(c) \ | |
186 | do { \ | |
187 | spin_lock(&(c)->ctx_lock); \ | |
188 | } while(0) | |
189 | ||
190 | #define UNPROTECT_CTX_NOIRQ(c) \ | |
191 | do { \ | |
192 | spin_unlock(&(c)->ctx_lock); \ | |
193 | } while(0) | |
194 | ||
195 | ||
196 | #ifdef CONFIG_SMP | |
197 | ||
198 | #define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number) | |
199 | #define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++ | |
200 | #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION() | |
201 | ||
202 | #else /* !CONFIG_SMP */ | |
203 | #define SET_ACTIVATION(t) do {} while(0) | |
204 | #define GET_ACTIVATION(t) do {} while(0) | |
205 | #define INC_ACTIVATION(t) do {} while(0) | |
206 | #endif /* CONFIG_SMP */ | |
207 | ||
208 | #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0) | |
209 | #define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner) | |
210 | #define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx) | |
211 | ||
212 | #define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g) | |
213 | #define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g) | |
214 | ||
215 | #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0) | |
216 | ||
217 | /* | |
218 | * cmp0 must be the value of pmc0 | |
219 | */ | |
220 | #define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL) | |
221 | ||
222 | #define PFMFS_MAGIC 0xa0b4d889 | |
223 | ||
224 | /* | |
225 | * debugging | |
226 | */ | |
227 | #define PFM_DEBUGGING 1 | |
228 | #ifdef PFM_DEBUGGING | |
229 | #define DPRINT(a) \ | |
230 | do { \ | |
231 | if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \ | |
232 | } while (0) | |
233 | ||
234 | #define DPRINT_ovfl(a) \ | |
235 | do { \ | |
236 | if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \ | |
237 | } while (0) | |
238 | #endif | |
239 | ||
240 | /* | |
241 | * 64-bit software counter structure | |
242 | * | |
243 | * the next_reset_type is applied to the next call to pfm_reset_regs() | |
244 | */ | |
245 | typedef struct { | |
246 | unsigned long val; /* virtual 64bit counter value */ | |
247 | unsigned long lval; /* last reset value */ | |
248 | unsigned long long_reset; /* reset value on sampling overflow */ | |
249 | unsigned long short_reset; /* reset value on overflow */ | |
250 | unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */ | |
251 | unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */ | |
252 | unsigned long seed; /* seed for random-number generator */ | |
253 | unsigned long mask; /* mask for random-number generator */ | |
254 | unsigned int flags; /* notify/do not notify */ | |
255 | unsigned long eventid; /* overflow event identifier */ | |
256 | } pfm_counter_t; | |
257 | ||
258 | /* | |
259 | * context flags | |
260 | */ | |
261 | typedef struct { | |
262 | unsigned int block:1; /* when 1, task will blocked on user notifications */ | |
263 | unsigned int system:1; /* do system wide monitoring */ | |
264 | unsigned int using_dbreg:1; /* using range restrictions (debug registers) */ | |
265 | unsigned int is_sampling:1; /* true if using a custom format */ | |
266 | unsigned int excl_idle:1; /* exclude idle task in system wide session */ | |
267 | unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */ | |
268 | unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */ | |
269 | unsigned int no_msg:1; /* no message sent on overflow */ | |
270 | unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */ | |
271 | unsigned int reserved:22; | |
272 | } pfm_context_flags_t; | |
273 | ||
274 | #define PFM_TRAP_REASON_NONE 0x0 /* default value */ | |
275 | #define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */ | |
276 | #define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */ | |
277 | ||
278 | ||
279 | /* | |
280 | * perfmon context: encapsulates all the state of a monitoring session | |
281 | */ | |
282 | ||
283 | typedef struct pfm_context { | |
284 | spinlock_t ctx_lock; /* context protection */ | |
285 | ||
286 | pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */ | |
287 | unsigned int ctx_state; /* state: active/inactive (no bitfield) */ | |
288 | ||
289 | struct task_struct *ctx_task; /* task to which context is attached */ | |
290 | ||
291 | unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */ | |
292 | ||
60f1c444 | 293 | struct completion ctx_restart_done; /* use for blocking notification mode */ |
1da177e4 LT |
294 | |
295 | unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */ | |
296 | unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */ | |
297 | unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */ | |
298 | ||
299 | unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */ | |
300 | unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */ | |
301 | unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */ | |
302 | ||
35589a8f | 303 | unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */ |
1da177e4 LT |
304 | |
305 | unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */ | |
306 | unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */ | |
307 | unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */ | |
308 | unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */ | |
309 | ||
35589a8f KA |
310 | pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */ |
311 | ||
312 | unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */ | |
313 | unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */ | |
1da177e4 LT |
314 | |
315 | u64 ctx_saved_psr_up; /* only contains psr.up value */ | |
316 | ||
317 | unsigned long ctx_last_activation; /* context last activation number for last_cpu */ | |
318 | unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */ | |
319 | unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */ | |
320 | ||
321 | int ctx_fd; /* file descriptor used my this context */ | |
322 | pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */ | |
323 | ||
324 | pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */ | |
325 | void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */ | |
326 | unsigned long ctx_smpl_size; /* size of sampling buffer */ | |
327 | void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */ | |
328 | ||
329 | wait_queue_head_t ctx_msgq_wait; | |
330 | pfm_msg_t ctx_msgq[PFM_MAX_MSGS]; | |
331 | int ctx_msgq_head; | |
332 | int ctx_msgq_tail; | |
333 | struct fasync_struct *ctx_async_queue; | |
334 | ||
335 | wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */ | |
336 | } pfm_context_t; | |
337 | ||
338 | /* | |
339 | * magic number used to verify that structure is really | |
340 | * a perfmon context | |
341 | */ | |
342 | #define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops) | |
343 | ||
344 | #define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context) | |
345 | ||
346 | #ifdef CONFIG_SMP | |
347 | #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v) | |
348 | #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu | |
349 | #else | |
350 | #define SET_LAST_CPU(ctx, v) do {} while(0) | |
351 | #define GET_LAST_CPU(ctx) do {} while(0) | |
352 | #endif | |
353 | ||
354 | ||
355 | #define ctx_fl_block ctx_flags.block | |
356 | #define ctx_fl_system ctx_flags.system | |
357 | #define ctx_fl_using_dbreg ctx_flags.using_dbreg | |
358 | #define ctx_fl_is_sampling ctx_flags.is_sampling | |
359 | #define ctx_fl_excl_idle ctx_flags.excl_idle | |
360 | #define ctx_fl_going_zombie ctx_flags.going_zombie | |
361 | #define ctx_fl_trap_reason ctx_flags.trap_reason | |
362 | #define ctx_fl_no_msg ctx_flags.no_msg | |
363 | #define ctx_fl_can_restart ctx_flags.can_restart | |
364 | ||
365 | #define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0); | |
366 | #define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking | |
367 | ||
368 | /* | |
369 | * global information about all sessions | |
370 | * mostly used to synchronize between system wide and per-process | |
371 | */ | |
372 | typedef struct { | |
373 | spinlock_t pfs_lock; /* lock the structure */ | |
374 | ||
375 | unsigned int pfs_task_sessions; /* number of per task sessions */ | |
376 | unsigned int pfs_sys_sessions; /* number of per system wide sessions */ | |
377 | unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */ | |
378 | unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */ | |
379 | struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */ | |
380 | } pfm_session_t; | |
381 | ||
382 | /* | |
383 | * information about a PMC or PMD. | |
384 | * dep_pmd[]: a bitmask of dependent PMD registers | |
385 | * dep_pmc[]: a bitmask of dependent PMC registers | |
386 | */ | |
387 | typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs); | |
388 | typedef struct { | |
389 | unsigned int type; | |
390 | int pm_pos; | |
391 | unsigned long default_value; /* power-on default value */ | |
392 | unsigned long reserved_mask; /* bitmask of reserved bits */ | |
393 | pfm_reg_check_t read_check; | |
394 | pfm_reg_check_t write_check; | |
395 | unsigned long dep_pmd[4]; | |
396 | unsigned long dep_pmc[4]; | |
397 | } pfm_reg_desc_t; | |
398 | ||
399 | /* assume cnum is a valid monitor */ | |
400 | #define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1) | |
401 | ||
402 | /* | |
403 | * This structure is initialized at boot time and contains | |
404 | * a description of the PMU main characteristics. | |
405 | * | |
406 | * If the probe function is defined, detection is based | |
407 | * on its return value: | |
408 | * - 0 means recognized PMU | |
409 | * - anything else means not supported | |
410 | * When the probe function is not defined, then the pmu_family field | |
411 | * is used and it must match the host CPU family such that: | |
412 | * - cpu->family & config->pmu_family != 0 | |
413 | */ | |
414 | typedef struct { | |
415 | unsigned long ovfl_val; /* overflow value for counters */ | |
416 | ||
417 | pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */ | |
418 | pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */ | |
419 | ||
420 | unsigned int num_pmcs; /* number of PMCS: computed at init time */ | |
421 | unsigned int num_pmds; /* number of PMDS: computed at init time */ | |
422 | unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */ | |
423 | unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */ | |
424 | ||
425 | char *pmu_name; /* PMU family name */ | |
426 | unsigned int pmu_family; /* cpuid family pattern used to identify pmu */ | |
427 | unsigned int flags; /* pmu specific flags */ | |
428 | unsigned int num_ibrs; /* number of IBRS: computed at init time */ | |
429 | unsigned int num_dbrs; /* number of DBRS: computed at init time */ | |
430 | unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */ | |
431 | int (*probe)(void); /* customized probe routine */ | |
432 | unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */ | |
433 | } pmu_config_t; | |
434 | /* | |
435 | * PMU specific flags | |
436 | */ | |
437 | #define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */ | |
438 | ||
439 | /* | |
440 | * debug register related type definitions | |
441 | */ | |
442 | typedef struct { | |
443 | unsigned long ibr_mask:56; | |
444 | unsigned long ibr_plm:4; | |
445 | unsigned long ibr_ig:3; | |
446 | unsigned long ibr_x:1; | |
447 | } ibr_mask_reg_t; | |
448 | ||
449 | typedef struct { | |
450 | unsigned long dbr_mask:56; | |
451 | unsigned long dbr_plm:4; | |
452 | unsigned long dbr_ig:2; | |
453 | unsigned long dbr_w:1; | |
454 | unsigned long dbr_r:1; | |
455 | } dbr_mask_reg_t; | |
456 | ||
457 | typedef union { | |
458 | unsigned long val; | |
459 | ibr_mask_reg_t ibr; | |
460 | dbr_mask_reg_t dbr; | |
461 | } dbreg_t; | |
462 | ||
463 | ||
464 | /* | |
465 | * perfmon command descriptions | |
466 | */ | |
467 | typedef struct { | |
468 | int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); | |
469 | char *cmd_name; | |
470 | int cmd_flags; | |
471 | unsigned int cmd_narg; | |
472 | size_t cmd_argsize; | |
473 | int (*cmd_getsize)(void *arg, size_t *sz); | |
474 | } pfm_cmd_desc_t; | |
475 | ||
476 | #define PFM_CMD_FD 0x01 /* command requires a file descriptor */ | |
477 | #define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */ | |
478 | #define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */ | |
479 | #define PFM_CMD_STOP 0x08 /* command does not work on zombie context */ | |
480 | ||
481 | ||
482 | #define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name | |
483 | #define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ) | |
484 | #define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW) | |
485 | #define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD) | |
486 | #define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP) | |
487 | ||
488 | #define PFM_CMD_ARG_MANY -1 /* cannot be zero */ | |
489 | ||
1da177e4 LT |
490 | typedef struct { |
491 | unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ | |
492 | unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */ | |
493 | unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */ | |
494 | unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */ | |
495 | unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */ | |
496 | unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */ | |
497 | unsigned long pfm_smpl_handler_calls; | |
498 | unsigned long pfm_smpl_handler_cycles; | |
499 | char pad[SMP_CACHE_BYTES] ____cacheline_aligned; | |
500 | } pfm_stats_t; | |
501 | ||
502 | /* | |
503 | * perfmon internal variables | |
504 | */ | |
505 | static pfm_stats_t pfm_stats[NR_CPUS]; | |
506 | static pfm_session_t pfm_sessions; /* global sessions information */ | |
507 | ||
a9f6a0dd | 508 | static DEFINE_SPINLOCK(pfm_alt_install_check); |
a1ecf7f6 TL |
509 | static pfm_intr_handler_desc_t *pfm_alt_intr_handler; |
510 | ||
1da177e4 LT |
511 | static struct proc_dir_entry *perfmon_dir; |
512 | static pfm_uuid_t pfm_null_uuid = {0,}; | |
513 | ||
514 | static spinlock_t pfm_buffer_fmt_lock; | |
515 | static LIST_HEAD(pfm_buffer_fmt_list); | |
516 | ||
517 | static pmu_config_t *pmu_conf; | |
518 | ||
519 | /* sysctl() controls */ | |
4944930a SE |
520 | pfm_sysctl_t pfm_sysctl; |
521 | EXPORT_SYMBOL(pfm_sysctl); | |
1da177e4 LT |
522 | |
523 | static ctl_table pfm_ctl_table[]={ | |
524 | {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, | |
525 | {2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, | |
526 | {3, "fastctxsw", &pfm_sysctl.fastctxsw, sizeof(int), 0600, NULL, &proc_dointvec, NULL,}, | |
527 | {4, "expert_mode", &pfm_sysctl.expert_mode, sizeof(int), 0600, NULL, &proc_dointvec, NULL,}, | |
528 | { 0, }, | |
529 | }; | |
530 | static ctl_table pfm_sysctl_dir[] = { | |
531 | {1, "perfmon", NULL, 0, 0755, pfm_ctl_table, }, | |
532 | {0,}, | |
533 | }; | |
534 | static ctl_table pfm_sysctl_root[] = { | |
535 | {1, "kernel", NULL, 0, 0755, pfm_sysctl_dir, }, | |
536 | {0,}, | |
537 | }; | |
538 | static struct ctl_table_header *pfm_sysctl_header; | |
539 | ||
540 | static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); | |
1da177e4 LT |
541 | |
542 | #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v) | |
543 | #define pfm_get_cpu_data(a,b) per_cpu(a, b) | |
544 | ||
545 | static inline void | |
546 | pfm_put_task(struct task_struct *task) | |
547 | { | |
548 | if (task != current) put_task_struct(task); | |
549 | } | |
550 | ||
551 | static inline void | |
552 | pfm_set_task_notify(struct task_struct *task) | |
553 | { | |
554 | struct thread_info *info; | |
555 | ||
556 | info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE); | |
557 | set_bit(TIF_NOTIFY_RESUME, &info->flags); | |
558 | } | |
559 | ||
560 | static inline void | |
561 | pfm_clear_task_notify(void) | |
562 | { | |
563 | clear_thread_flag(TIF_NOTIFY_RESUME); | |
564 | } | |
565 | ||
566 | static inline void | |
567 | pfm_reserve_page(unsigned long a) | |
568 | { | |
569 | SetPageReserved(vmalloc_to_page((void *)a)); | |
570 | } | |
571 | static inline void | |
572 | pfm_unreserve_page(unsigned long a) | |
573 | { | |
574 | ClearPageReserved(vmalloc_to_page((void*)a)); | |
575 | } | |
576 | ||
577 | static inline unsigned long | |
578 | pfm_protect_ctx_ctxsw(pfm_context_t *x) | |
579 | { | |
580 | spin_lock(&(x)->ctx_lock); | |
581 | return 0UL; | |
582 | } | |
583 | ||
24b8e0cc | 584 | static inline void |
1da177e4 LT |
585 | pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f) |
586 | { | |
587 | spin_unlock(&(x)->ctx_lock); | |
588 | } | |
589 | ||
590 | static inline unsigned int | |
591 | pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct) | |
592 | { | |
593 | return do_munmap(mm, addr, len); | |
594 | } | |
595 | ||
596 | static inline unsigned long | |
597 | pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec) | |
598 | { | |
599 | return get_unmapped_area(file, addr, len, pgoff, flags); | |
600 | } | |
601 | ||
602 | ||
454e2398 DH |
603 | static int |
604 | pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, | |
605 | struct vfsmount *mnt) | |
1da177e4 | 606 | { |
454e2398 | 607 | return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt); |
1da177e4 LT |
608 | } |
609 | ||
610 | static struct file_system_type pfm_fs_type = { | |
611 | .name = "pfmfs", | |
612 | .get_sb = pfmfs_get_sb, | |
613 | .kill_sb = kill_anon_super, | |
614 | }; | |
615 | ||
616 | DEFINE_PER_CPU(unsigned long, pfm_syst_info); | |
617 | DEFINE_PER_CPU(struct task_struct *, pmu_owner); | |
618 | DEFINE_PER_CPU(pfm_context_t *, pmu_ctx); | |
619 | DEFINE_PER_CPU(unsigned long, pmu_activation_number); | |
fffcc150 | 620 | EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info); |
1da177e4 LT |
621 | |
622 | ||
623 | /* forward declaration */ | |
5dfe4c96 | 624 | static const struct file_operations pfm_file_ops; |
1da177e4 LT |
625 | |
626 | /* | |
627 | * forward declarations | |
628 | */ | |
629 | #ifndef CONFIG_SMP | |
630 | static void pfm_lazy_save_regs (struct task_struct *ta); | |
631 | #endif | |
632 | ||
633 | void dump_pmu_state(const char *); | |
634 | static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); | |
635 | ||
636 | #include "perfmon_itanium.h" | |
637 | #include "perfmon_mckinley.h" | |
9179cb65 | 638 | #include "perfmon_montecito.h" |
1da177e4 LT |
639 | #include "perfmon_generic.h" |
640 | ||
641 | static pmu_config_t *pmu_confs[]={ | |
9179cb65 | 642 | &pmu_conf_mont, |
1da177e4 LT |
643 | &pmu_conf_mck, |
644 | &pmu_conf_ita, | |
645 | &pmu_conf_gen, /* must be last */ | |
646 | NULL | |
647 | }; | |
648 | ||
649 | ||
650 | static int pfm_end_notify_user(pfm_context_t *ctx); | |
651 | ||
652 | static inline void | |
653 | pfm_clear_psr_pp(void) | |
654 | { | |
655 | ia64_rsm(IA64_PSR_PP); | |
656 | ia64_srlz_i(); | |
657 | } | |
658 | ||
659 | static inline void | |
660 | pfm_set_psr_pp(void) | |
661 | { | |
662 | ia64_ssm(IA64_PSR_PP); | |
663 | ia64_srlz_i(); | |
664 | } | |
665 | ||
666 | static inline void | |
667 | pfm_clear_psr_up(void) | |
668 | { | |
669 | ia64_rsm(IA64_PSR_UP); | |
670 | ia64_srlz_i(); | |
671 | } | |
672 | ||
673 | static inline void | |
674 | pfm_set_psr_up(void) | |
675 | { | |
676 | ia64_ssm(IA64_PSR_UP); | |
677 | ia64_srlz_i(); | |
678 | } | |
679 | ||
680 | static inline unsigned long | |
681 | pfm_get_psr(void) | |
682 | { | |
683 | unsigned long tmp; | |
684 | tmp = ia64_getreg(_IA64_REG_PSR); | |
685 | ia64_srlz_i(); | |
686 | return tmp; | |
687 | } | |
688 | ||
689 | static inline void | |
690 | pfm_set_psr_l(unsigned long val) | |
691 | { | |
692 | ia64_setreg(_IA64_REG_PSR_L, val); | |
693 | ia64_srlz_i(); | |
694 | } | |
695 | ||
696 | static inline void | |
697 | pfm_freeze_pmu(void) | |
698 | { | |
699 | ia64_set_pmc(0,1UL); | |
700 | ia64_srlz_d(); | |
701 | } | |
702 | ||
703 | static inline void | |
704 | pfm_unfreeze_pmu(void) | |
705 | { | |
706 | ia64_set_pmc(0,0UL); | |
707 | ia64_srlz_d(); | |
708 | } | |
709 | ||
710 | static inline void | |
711 | pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs) | |
712 | { | |
713 | int i; | |
714 | ||
715 | for (i=0; i < nibrs; i++) { | |
716 | ia64_set_ibr(i, ibrs[i]); | |
717 | ia64_dv_serialize_instruction(); | |
718 | } | |
719 | ia64_srlz_i(); | |
720 | } | |
721 | ||
722 | static inline void | |
723 | pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs) | |
724 | { | |
725 | int i; | |
726 | ||
727 | for (i=0; i < ndbrs; i++) { | |
728 | ia64_set_dbr(i, dbrs[i]); | |
729 | ia64_dv_serialize_data(); | |
730 | } | |
731 | ia64_srlz_d(); | |
732 | } | |
733 | ||
734 | /* | |
735 | * PMD[i] must be a counter. no check is made | |
736 | */ | |
737 | static inline unsigned long | |
738 | pfm_read_soft_counter(pfm_context_t *ctx, int i) | |
739 | { | |
740 | return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val); | |
741 | } | |
742 | ||
743 | /* | |
744 | * PMD[i] must be a counter. no check is made | |
745 | */ | |
746 | static inline void | |
747 | pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val) | |
748 | { | |
749 | unsigned long ovfl_val = pmu_conf->ovfl_val; | |
750 | ||
751 | ctx->ctx_pmds[i].val = val & ~ovfl_val; | |
752 | /* | |
753 | * writing to unimplemented part is ignore, so we do not need to | |
754 | * mask off top part | |
755 | */ | |
756 | ia64_set_pmd(i, val & ovfl_val); | |
757 | } | |
758 | ||
759 | static pfm_msg_t * | |
760 | pfm_get_new_msg(pfm_context_t *ctx) | |
761 | { | |
762 | int idx, next; | |
763 | ||
764 | next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS; | |
765 | ||
766 | DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); | |
767 | if (next == ctx->ctx_msgq_head) return NULL; | |
768 | ||
769 | idx = ctx->ctx_msgq_tail; | |
770 | ctx->ctx_msgq_tail = next; | |
771 | ||
772 | DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx)); | |
773 | ||
774 | return ctx->ctx_msgq+idx; | |
775 | } | |
776 | ||
777 | static pfm_msg_t * | |
778 | pfm_get_next_msg(pfm_context_t *ctx) | |
779 | { | |
780 | pfm_msg_t *msg; | |
781 | ||
782 | DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); | |
783 | ||
784 | if (PFM_CTXQ_EMPTY(ctx)) return NULL; | |
785 | ||
786 | /* | |
787 | * get oldest message | |
788 | */ | |
789 | msg = ctx->ctx_msgq+ctx->ctx_msgq_head; | |
790 | ||
791 | /* | |
792 | * and move forward | |
793 | */ | |
794 | ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS; | |
795 | ||
796 | DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type)); | |
797 | ||
798 | return msg; | |
799 | } | |
800 | ||
801 | static void | |
802 | pfm_reset_msgq(pfm_context_t *ctx) | |
803 | { | |
804 | ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0; | |
805 | DPRINT(("ctx=%p msgq reset\n", ctx)); | |
806 | } | |
807 | ||
808 | static void * | |
809 | pfm_rvmalloc(unsigned long size) | |
810 | { | |
811 | void *mem; | |
812 | unsigned long addr; | |
813 | ||
814 | size = PAGE_ALIGN(size); | |
815 | mem = vmalloc(size); | |
816 | if (mem) { | |
817 | //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem); | |
818 | memset(mem, 0, size); | |
819 | addr = (unsigned long)mem; | |
820 | while (size > 0) { | |
821 | pfm_reserve_page(addr); | |
822 | addr+=PAGE_SIZE; | |
823 | size-=PAGE_SIZE; | |
824 | } | |
825 | } | |
826 | return mem; | |
827 | } | |
828 | ||
829 | static void | |
830 | pfm_rvfree(void *mem, unsigned long size) | |
831 | { | |
832 | unsigned long addr; | |
833 | ||
834 | if (mem) { | |
835 | DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size)); | |
836 | addr = (unsigned long) mem; | |
837 | while ((long) size > 0) { | |
838 | pfm_unreserve_page(addr); | |
839 | addr+=PAGE_SIZE; | |
840 | size-=PAGE_SIZE; | |
841 | } | |
842 | vfree(mem); | |
843 | } | |
844 | return; | |
845 | } | |
846 | ||
847 | static pfm_context_t * | |
848 | pfm_context_alloc(void) | |
849 | { | |
850 | pfm_context_t *ctx; | |
851 | ||
852 | /* | |
853 | * allocate context descriptor | |
854 | * must be able to free with interrupts disabled | |
855 | */ | |
52fd9108 | 856 | ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL); |
1da177e4 | 857 | if (ctx) { |
1da177e4 LT |
858 | DPRINT(("alloc ctx @%p\n", ctx)); |
859 | } | |
860 | return ctx; | |
861 | } | |
862 | ||
863 | static void | |
864 | pfm_context_free(pfm_context_t *ctx) | |
865 | { | |
866 | if (ctx) { | |
867 | DPRINT(("free ctx @%p\n", ctx)); | |
868 | kfree(ctx); | |
869 | } | |
870 | } | |
871 | ||
872 | static void | |
873 | pfm_mask_monitoring(struct task_struct *task) | |
874 | { | |
875 | pfm_context_t *ctx = PFM_GET_CTX(task); | |
1da177e4 LT |
876 | unsigned long mask, val, ovfl_mask; |
877 | int i; | |
878 | ||
879 | DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid)); | |
880 | ||
881 | ovfl_mask = pmu_conf->ovfl_val; | |
882 | /* | |
883 | * monitoring can only be masked as a result of a valid | |
884 | * counter overflow. In UP, it means that the PMU still | |
885 | * has an owner. Note that the owner can be different | |
886 | * from the current task. However the PMU state belongs | |
887 | * to the owner. | |
888 | * In SMP, a valid overflow only happens when task is | |
889 | * current. Therefore if we come here, we know that | |
890 | * the PMU state belongs to the current task, therefore | |
891 | * we can access the live registers. | |
892 | * | |
893 | * So in both cases, the live register contains the owner's | |
894 | * state. We can ONLY touch the PMU registers and NOT the PSR. | |
895 | * | |
35589a8f | 896 | * As a consequence to this call, the ctx->th_pmds[] array |
1da177e4 LT |
897 | * contains stale information which must be ignored |
898 | * when context is reloaded AND monitoring is active (see | |
899 | * pfm_restart). | |
900 | */ | |
901 | mask = ctx->ctx_used_pmds[0]; | |
902 | for (i = 0; mask; i++, mask>>=1) { | |
903 | /* skip non used pmds */ | |
904 | if ((mask & 0x1) == 0) continue; | |
905 | val = ia64_get_pmd(i); | |
906 | ||
907 | if (PMD_IS_COUNTING(i)) { | |
908 | /* | |
909 | * we rebuild the full 64 bit value of the counter | |
910 | */ | |
911 | ctx->ctx_pmds[i].val += (val & ovfl_mask); | |
912 | } else { | |
913 | ctx->ctx_pmds[i].val = val; | |
914 | } | |
915 | DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n", | |
916 | i, | |
917 | ctx->ctx_pmds[i].val, | |
918 | val & ovfl_mask)); | |
919 | } | |
920 | /* | |
921 | * mask monitoring by setting the privilege level to 0 | |
922 | * we cannot use psr.pp/psr.up for this, it is controlled by | |
923 | * the user | |
924 | * | |
925 | * if task is current, modify actual registers, otherwise modify | |
926 | * thread save state, i.e., what will be restored in pfm_load_regs() | |
927 | */ | |
928 | mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; | |
929 | for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { | |
930 | if ((mask & 0x1) == 0UL) continue; | |
35589a8f KA |
931 | ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL); |
932 | ctx->th_pmcs[i] &= ~0xfUL; | |
933 | DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i])); | |
1da177e4 LT |
934 | } |
935 | /* | |
936 | * make all of this visible | |
937 | */ | |
938 | ia64_srlz_d(); | |
939 | } | |
940 | ||
941 | /* | |
942 | * must always be done with task == current | |
943 | * | |
944 | * context must be in MASKED state when calling | |
945 | */ | |
946 | static void | |
947 | pfm_restore_monitoring(struct task_struct *task) | |
948 | { | |
949 | pfm_context_t *ctx = PFM_GET_CTX(task); | |
1da177e4 LT |
950 | unsigned long mask, ovfl_mask; |
951 | unsigned long psr, val; | |
952 | int i, is_system; | |
953 | ||
954 | is_system = ctx->ctx_fl_system; | |
955 | ovfl_mask = pmu_conf->ovfl_val; | |
956 | ||
957 | if (task != current) { | |
958 | printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid); | |
959 | return; | |
960 | } | |
961 | if (ctx->ctx_state != PFM_CTX_MASKED) { | |
962 | printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__, | |
963 | task->pid, current->pid, ctx->ctx_state); | |
964 | return; | |
965 | } | |
966 | psr = pfm_get_psr(); | |
967 | /* | |
968 | * monitoring is masked via the PMC. | |
969 | * As we restore their value, we do not want each counter to | |
970 | * restart right away. We stop monitoring using the PSR, | |
971 | * restore the PMC (and PMD) and then re-establish the psr | |
972 | * as it was. Note that there can be no pending overflow at | |
973 | * this point, because monitoring was MASKED. | |
974 | * | |
975 | * system-wide session are pinned and self-monitoring | |
976 | */ | |
977 | if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { | |
978 | /* disable dcr pp */ | |
979 | ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); | |
980 | pfm_clear_psr_pp(); | |
981 | } else { | |
982 | pfm_clear_psr_up(); | |
983 | } | |
984 | /* | |
985 | * first, we restore the PMD | |
986 | */ | |
987 | mask = ctx->ctx_used_pmds[0]; | |
988 | for (i = 0; mask; i++, mask>>=1) { | |
989 | /* skip non used pmds */ | |
990 | if ((mask & 0x1) == 0) continue; | |
991 | ||
992 | if (PMD_IS_COUNTING(i)) { | |
993 | /* | |
994 | * we split the 64bit value according to | |
995 | * counter width | |
996 | */ | |
997 | val = ctx->ctx_pmds[i].val & ovfl_mask; | |
998 | ctx->ctx_pmds[i].val &= ~ovfl_mask; | |
999 | } else { | |
1000 | val = ctx->ctx_pmds[i].val; | |
1001 | } | |
1002 | ia64_set_pmd(i, val); | |
1003 | ||
1004 | DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n", | |
1005 | i, | |
1006 | ctx->ctx_pmds[i].val, | |
1007 | val)); | |
1008 | } | |
1009 | /* | |
1010 | * restore the PMCs | |
1011 | */ | |
1012 | mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; | |
1013 | for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { | |
1014 | if ((mask & 0x1) == 0UL) continue; | |
35589a8f KA |
1015 | ctx->th_pmcs[i] = ctx->ctx_pmcs[i]; |
1016 | ia64_set_pmc(i, ctx->th_pmcs[i]); | |
1017 | DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, ctx->th_pmcs[i])); | |
1da177e4 LT |
1018 | } |
1019 | ia64_srlz_d(); | |
1020 | ||
1021 | /* | |
1022 | * must restore DBR/IBR because could be modified while masked | |
1023 | * XXX: need to optimize | |
1024 | */ | |
1025 | if (ctx->ctx_fl_using_dbreg) { | |
1026 | pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); | |
1027 | pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); | |
1028 | } | |
1029 | ||
1030 | /* | |
1031 | * now restore PSR | |
1032 | */ | |
1033 | if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { | |
1034 | /* enable dcr pp */ | |
1035 | ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); | |
1036 | ia64_srlz_i(); | |
1037 | } | |
1038 | pfm_set_psr_l(psr); | |
1039 | } | |
1040 | ||
1041 | static inline void | |
1042 | pfm_save_pmds(unsigned long *pmds, unsigned long mask) | |
1043 | { | |
1044 | int i; | |
1045 | ||
1046 | ia64_srlz_d(); | |
1047 | ||
1048 | for (i=0; mask; i++, mask>>=1) { | |
1049 | if (mask & 0x1) pmds[i] = ia64_get_pmd(i); | |
1050 | } | |
1051 | } | |
1052 | ||
1053 | /* | |
1054 | * reload from thread state (used for ctxw only) | |
1055 | */ | |
1056 | static inline void | |
1057 | pfm_restore_pmds(unsigned long *pmds, unsigned long mask) | |
1058 | { | |
1059 | int i; | |
1060 | unsigned long val, ovfl_val = pmu_conf->ovfl_val; | |
1061 | ||
1062 | for (i=0; mask; i++, mask>>=1) { | |
1063 | if ((mask & 0x1) == 0) continue; | |
1064 | val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i]; | |
1065 | ia64_set_pmd(i, val); | |
1066 | } | |
1067 | ia64_srlz_d(); | |
1068 | } | |
1069 | ||
1070 | /* | |
1071 | * propagate PMD from context to thread-state | |
1072 | */ | |
1073 | static inline void | |
1074 | pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx) | |
1075 | { | |
1da177e4 LT |
1076 | unsigned long ovfl_val = pmu_conf->ovfl_val; |
1077 | unsigned long mask = ctx->ctx_all_pmds[0]; | |
1078 | unsigned long val; | |
1079 | int i; | |
1080 | ||
1081 | DPRINT(("mask=0x%lx\n", mask)); | |
1082 | ||
1083 | for (i=0; mask; i++, mask>>=1) { | |
1084 | ||
1085 | val = ctx->ctx_pmds[i].val; | |
1086 | ||
1087 | /* | |
1088 | * We break up the 64 bit value into 2 pieces | |
1089 | * the lower bits go to the machine state in the | |
1090 | * thread (will be reloaded on ctxsw in). | |
1091 | * The upper part stays in the soft-counter. | |
1092 | */ | |
1093 | if (PMD_IS_COUNTING(i)) { | |
1094 | ctx->ctx_pmds[i].val = val & ~ovfl_val; | |
1095 | val &= ovfl_val; | |
1096 | } | |
35589a8f | 1097 | ctx->th_pmds[i] = val; |
1da177e4 LT |
1098 | |
1099 | DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n", | |
1100 | i, | |
35589a8f | 1101 | ctx->th_pmds[i], |
1da177e4 LT |
1102 | ctx->ctx_pmds[i].val)); |
1103 | } | |
1104 | } | |
1105 | ||
1106 | /* | |
1107 | * propagate PMC from context to thread-state | |
1108 | */ | |
1109 | static inline void | |
1110 | pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx) | |
1111 | { | |
1da177e4 LT |
1112 | unsigned long mask = ctx->ctx_all_pmcs[0]; |
1113 | int i; | |
1114 | ||
1115 | DPRINT(("mask=0x%lx\n", mask)); | |
1116 | ||
1117 | for (i=0; mask; i++, mask>>=1) { | |
1118 | /* masking 0 with ovfl_val yields 0 */ | |
35589a8f KA |
1119 | ctx->th_pmcs[i] = ctx->ctx_pmcs[i]; |
1120 | DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i])); | |
1da177e4 LT |
1121 | } |
1122 | } | |
1123 | ||
1124 | ||
1125 | ||
1126 | static inline void | |
1127 | pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask) | |
1128 | { | |
1129 | int i; | |
1130 | ||
1131 | for (i=0; mask; i++, mask>>=1) { | |
1132 | if ((mask & 0x1) == 0) continue; | |
1133 | ia64_set_pmc(i, pmcs[i]); | |
1134 | } | |
1135 | ia64_srlz_d(); | |
1136 | } | |
1137 | ||
1138 | static inline int | |
1139 | pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b) | |
1140 | { | |
1141 | return memcmp(a, b, sizeof(pfm_uuid_t)); | |
1142 | } | |
1143 | ||
1144 | static inline int | |
1145 | pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs) | |
1146 | { | |
1147 | int ret = 0; | |
1148 | if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs); | |
1149 | return ret; | |
1150 | } | |
1151 | ||
1152 | static inline int | |
1153 | pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size) | |
1154 | { | |
1155 | int ret = 0; | |
1156 | if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size); | |
1157 | return ret; | |
1158 | } | |
1159 | ||
1160 | ||
1161 | static inline int | |
1162 | pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, | |
1163 | int cpu, void *arg) | |
1164 | { | |
1165 | int ret = 0; | |
1166 | if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg); | |
1167 | return ret; | |
1168 | } | |
1169 | ||
1170 | static inline int | |
1171 | pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags, | |
1172 | int cpu, void *arg) | |
1173 | { | |
1174 | int ret = 0; | |
1175 | if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg); | |
1176 | return ret; | |
1177 | } | |
1178 | ||
1179 | static inline int | |
1180 | pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs) | |
1181 | { | |
1182 | int ret = 0; | |
1183 | if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs); | |
1184 | return ret; | |
1185 | } | |
1186 | ||
1187 | static inline int | |
1188 | pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs) | |
1189 | { | |
1190 | int ret = 0; | |
1191 | if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs); | |
1192 | return ret; | |
1193 | } | |
1194 | ||
1195 | static pfm_buffer_fmt_t * | |
1196 | __pfm_find_buffer_fmt(pfm_uuid_t uuid) | |
1197 | { | |
1198 | struct list_head * pos; | |
1199 | pfm_buffer_fmt_t * entry; | |
1200 | ||
1201 | list_for_each(pos, &pfm_buffer_fmt_list) { | |
1202 | entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list); | |
1203 | if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0) | |
1204 | return entry; | |
1205 | } | |
1206 | return NULL; | |
1207 | } | |
1208 | ||
1209 | /* | |
1210 | * find a buffer format based on its uuid | |
1211 | */ | |
1212 | static pfm_buffer_fmt_t * | |
1213 | pfm_find_buffer_fmt(pfm_uuid_t uuid) | |
1214 | { | |
1215 | pfm_buffer_fmt_t * fmt; | |
1216 | spin_lock(&pfm_buffer_fmt_lock); | |
1217 | fmt = __pfm_find_buffer_fmt(uuid); | |
1218 | spin_unlock(&pfm_buffer_fmt_lock); | |
1219 | return fmt; | |
1220 | } | |
1221 | ||
1222 | int | |
1223 | pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt) | |
1224 | { | |
1225 | int ret = 0; | |
1226 | ||
1227 | /* some sanity checks */ | |
1228 | if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL; | |
1229 | ||
1230 | /* we need at least a handler */ | |
1231 | if (fmt->fmt_handler == NULL) return -EINVAL; | |
1232 | ||
1233 | /* | |
1234 | * XXX: need check validity of fmt_arg_size | |
1235 | */ | |
1236 | ||
1237 | spin_lock(&pfm_buffer_fmt_lock); | |
1238 | ||
1239 | if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) { | |
1240 | printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name); | |
1241 | ret = -EBUSY; | |
1242 | goto out; | |
1243 | } | |
1244 | list_add(&fmt->fmt_list, &pfm_buffer_fmt_list); | |
1245 | printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name); | |
1246 | ||
1247 | out: | |
1248 | spin_unlock(&pfm_buffer_fmt_lock); | |
1249 | return ret; | |
1250 | } | |
1251 | EXPORT_SYMBOL(pfm_register_buffer_fmt); | |
1252 | ||
1253 | int | |
1254 | pfm_unregister_buffer_fmt(pfm_uuid_t uuid) | |
1255 | { | |
1256 | pfm_buffer_fmt_t *fmt; | |
1257 | int ret = 0; | |
1258 | ||
1259 | spin_lock(&pfm_buffer_fmt_lock); | |
1260 | ||
1261 | fmt = __pfm_find_buffer_fmt(uuid); | |
1262 | if (!fmt) { | |
1263 | printk(KERN_ERR "perfmon: cannot unregister format, not found\n"); | |
1264 | ret = -EINVAL; | |
1265 | goto out; | |
1266 | } | |
1267 | list_del_init(&fmt->fmt_list); | |
1268 | printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name); | |
1269 | ||
1270 | out: | |
1271 | spin_unlock(&pfm_buffer_fmt_lock); | |
1272 | return ret; | |
1273 | ||
1274 | } | |
1275 | EXPORT_SYMBOL(pfm_unregister_buffer_fmt); | |
1276 | ||
8df5a500 SE |
1277 | extern void update_pal_halt_status(int); |
1278 | ||
1da177e4 LT |
1279 | static int |
1280 | pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) | |
1281 | { | |
1282 | unsigned long flags; | |
1283 | /* | |
1284 | * validy checks on cpu_mask have been done upstream | |
1285 | */ | |
1286 | LOCK_PFS(flags); | |
1287 | ||
1288 | DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", | |
1289 | pfm_sessions.pfs_sys_sessions, | |
1290 | pfm_sessions.pfs_task_sessions, | |
1291 | pfm_sessions.pfs_sys_use_dbregs, | |
1292 | is_syswide, | |
1293 | cpu)); | |
1294 | ||
1295 | if (is_syswide) { | |
1296 | /* | |
1297 | * cannot mix system wide and per-task sessions | |
1298 | */ | |
1299 | if (pfm_sessions.pfs_task_sessions > 0UL) { | |
1300 | DPRINT(("system wide not possible, %u conflicting task_sessions\n", | |
1301 | pfm_sessions.pfs_task_sessions)); | |
1302 | goto abort; | |
1303 | } | |
1304 | ||
1305 | if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict; | |
1306 | ||
1307 | DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id())); | |
1308 | ||
1309 | pfm_sessions.pfs_sys_session[cpu] = task; | |
1310 | ||
1311 | pfm_sessions.pfs_sys_sessions++ ; | |
1312 | ||
1313 | } else { | |
1314 | if (pfm_sessions.pfs_sys_sessions) goto abort; | |
1315 | pfm_sessions.pfs_task_sessions++; | |
1316 | } | |
1317 | ||
1318 | DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", | |
1319 | pfm_sessions.pfs_sys_sessions, | |
1320 | pfm_sessions.pfs_task_sessions, | |
1321 | pfm_sessions.pfs_sys_use_dbregs, | |
1322 | is_syswide, | |
1323 | cpu)); | |
1324 | ||
8df5a500 SE |
1325 | /* |
1326 | * disable default_idle() to go to PAL_HALT | |
1327 | */ | |
1328 | update_pal_halt_status(0); | |
1329 | ||
1da177e4 LT |
1330 | UNLOCK_PFS(flags); |
1331 | ||
1332 | return 0; | |
1333 | ||
1334 | error_conflict: | |
1335 | DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n", | |
1336 | pfm_sessions.pfs_sys_session[cpu]->pid, | |
a1ecf7f6 | 1337 | cpu)); |
1da177e4 LT |
1338 | abort: |
1339 | UNLOCK_PFS(flags); | |
1340 | ||
1341 | return -EBUSY; | |
1342 | ||
1343 | } | |
1344 | ||
1345 | static int | |
1346 | pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) | |
1347 | { | |
1348 | unsigned long flags; | |
1349 | /* | |
1350 | * validy checks on cpu_mask have been done upstream | |
1351 | */ | |
1352 | LOCK_PFS(flags); | |
1353 | ||
1354 | DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", | |
1355 | pfm_sessions.pfs_sys_sessions, | |
1356 | pfm_sessions.pfs_task_sessions, | |
1357 | pfm_sessions.pfs_sys_use_dbregs, | |
1358 | is_syswide, | |
1359 | cpu)); | |
1360 | ||
1361 | ||
1362 | if (is_syswide) { | |
1363 | pfm_sessions.pfs_sys_session[cpu] = NULL; | |
1364 | /* | |
1365 | * would not work with perfmon+more than one bit in cpu_mask | |
1366 | */ | |
1367 | if (ctx && ctx->ctx_fl_using_dbreg) { | |
1368 | if (pfm_sessions.pfs_sys_use_dbregs == 0) { | |
1369 | printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx); | |
1370 | } else { | |
1371 | pfm_sessions.pfs_sys_use_dbregs--; | |
1372 | } | |
1373 | } | |
1374 | pfm_sessions.pfs_sys_sessions--; | |
1375 | } else { | |
1376 | pfm_sessions.pfs_task_sessions--; | |
1377 | } | |
1378 | DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", | |
1379 | pfm_sessions.pfs_sys_sessions, | |
1380 | pfm_sessions.pfs_task_sessions, | |
1381 | pfm_sessions.pfs_sys_use_dbregs, | |
1382 | is_syswide, | |
1383 | cpu)); | |
1384 | ||
8df5a500 SE |
1385 | /* |
1386 | * if possible, enable default_idle() to go into PAL_HALT | |
1387 | */ | |
1388 | if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0) | |
1389 | update_pal_halt_status(1); | |
1390 | ||
1da177e4 LT |
1391 | UNLOCK_PFS(flags); |
1392 | ||
1393 | return 0; | |
1394 | } | |
1395 | ||
1396 | /* | |
1397 | * removes virtual mapping of the sampling buffer. | |
1398 | * IMPORTANT: cannot be called with interrupts disable, e.g. inside | |
1399 | * a PROTECT_CTX() section. | |
1400 | */ | |
1401 | static int | |
1402 | pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size) | |
1403 | { | |
1404 | int r; | |
1405 | ||
1406 | /* sanity checks */ | |
1407 | if (task->mm == NULL || size == 0UL || vaddr == NULL) { | |
1408 | printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm); | |
1409 | return -EINVAL; | |
1410 | } | |
1411 | ||
1412 | DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size)); | |
1413 | ||
1414 | /* | |
1415 | * does the actual unmapping | |
1416 | */ | |
1417 | down_write(&task->mm->mmap_sem); | |
1418 | ||
1419 | DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size)); | |
1420 | ||
1421 | r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0); | |
1422 | ||
1423 | up_write(&task->mm->mmap_sem); | |
1424 | if (r !=0) { | |
1425 | printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size); | |
1426 | } | |
1427 | ||
1428 | DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r)); | |
1429 | ||
1430 | return 0; | |
1431 | } | |
1432 | ||
1433 | /* | |
1434 | * free actual physical storage used by sampling buffer | |
1435 | */ | |
1436 | #if 0 | |
1437 | static int | |
1438 | pfm_free_smpl_buffer(pfm_context_t *ctx) | |
1439 | { | |
1440 | pfm_buffer_fmt_t *fmt; | |
1441 | ||
1442 | if (ctx->ctx_smpl_hdr == NULL) goto invalid_free; | |
1443 | ||
1444 | /* | |
1445 | * we won't use the buffer format anymore | |
1446 | */ | |
1447 | fmt = ctx->ctx_buf_fmt; | |
1448 | ||
1449 | DPRINT(("sampling buffer @%p size %lu vaddr=%p\n", | |
1450 | ctx->ctx_smpl_hdr, | |
1451 | ctx->ctx_smpl_size, | |
1452 | ctx->ctx_smpl_vaddr)); | |
1453 | ||
1454 | pfm_buf_fmt_exit(fmt, current, NULL, NULL); | |
1455 | ||
1456 | /* | |
1457 | * free the buffer | |
1458 | */ | |
1459 | pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size); | |
1460 | ||
1461 | ctx->ctx_smpl_hdr = NULL; | |
1462 | ctx->ctx_smpl_size = 0UL; | |
1463 | ||
1464 | return 0; | |
1465 | ||
1466 | invalid_free: | |
1467 | printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid); | |
1468 | return -EINVAL; | |
1469 | } | |
1470 | #endif | |
1471 | ||
1472 | static inline void | |
1473 | pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt) | |
1474 | { | |
1475 | if (fmt == NULL) return; | |
1476 | ||
1477 | pfm_buf_fmt_exit(fmt, current, NULL, NULL); | |
1478 | ||
1479 | } | |
1480 | ||
1481 | /* | |
1482 | * pfmfs should _never_ be mounted by userland - too much of security hassle, | |
1483 | * no real gain from having the whole whorehouse mounted. So we don't need | |
1484 | * any operations on the root directory. However, we need a non-trivial | |
1485 | * d_name - pfm: will go nicely and kill the special-casing in procfs. | |
1486 | */ | |
1487 | static struct vfsmount *pfmfs_mnt; | |
1488 | ||
1489 | static int __init | |
1490 | init_pfm_fs(void) | |
1491 | { | |
1492 | int err = register_filesystem(&pfm_fs_type); | |
1493 | if (!err) { | |
1494 | pfmfs_mnt = kern_mount(&pfm_fs_type); | |
1495 | err = PTR_ERR(pfmfs_mnt); | |
1496 | if (IS_ERR(pfmfs_mnt)) | |
1497 | unregister_filesystem(&pfm_fs_type); | |
1498 | else | |
1499 | err = 0; | |
1500 | } | |
1501 | return err; | |
1502 | } | |
1503 | ||
1504 | static void __exit | |
1505 | exit_pfm_fs(void) | |
1506 | { | |
1507 | unregister_filesystem(&pfm_fs_type); | |
1508 | mntput(pfmfs_mnt); | |
1509 | } | |
1510 | ||
1511 | static ssize_t | |
1512 | pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) | |
1513 | { | |
1514 | pfm_context_t *ctx; | |
1515 | pfm_msg_t *msg; | |
1516 | ssize_t ret; | |
1517 | unsigned long flags; | |
1518 | DECLARE_WAITQUEUE(wait, current); | |
1519 | if (PFM_IS_FILE(filp) == 0) { | |
1520 | printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); | |
1521 | return -EINVAL; | |
1522 | } | |
1523 | ||
1524 | ctx = (pfm_context_t *)filp->private_data; | |
1525 | if (ctx == NULL) { | |
1526 | printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid); | |
1527 | return -EINVAL; | |
1528 | } | |
1529 | ||
1530 | /* | |
1531 | * check even when there is no message | |
1532 | */ | |
1533 | if (size < sizeof(pfm_msg_t)) { | |
1534 | DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t))); | |
1535 | return -EINVAL; | |
1536 | } | |
1537 | ||
1538 | PROTECT_CTX(ctx, flags); | |
1539 | ||
1540 | /* | |
1541 | * put ourselves on the wait queue | |
1542 | */ | |
1543 | add_wait_queue(&ctx->ctx_msgq_wait, &wait); | |
1544 | ||
1545 | ||
1546 | for(;;) { | |
1547 | /* | |
1548 | * check wait queue | |
1549 | */ | |
1550 | ||
1551 | set_current_state(TASK_INTERRUPTIBLE); | |
1552 | ||
1553 | DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); | |
1554 | ||
1555 | ret = 0; | |
1556 | if(PFM_CTXQ_EMPTY(ctx) == 0) break; | |
1557 | ||
1558 | UNPROTECT_CTX(ctx, flags); | |
1559 | ||
1560 | /* | |
1561 | * check non-blocking read | |
1562 | */ | |
1563 | ret = -EAGAIN; | |
1564 | if(filp->f_flags & O_NONBLOCK) break; | |
1565 | ||
1566 | /* | |
1567 | * check pending signals | |
1568 | */ | |
1569 | if(signal_pending(current)) { | |
1570 | ret = -EINTR; | |
1571 | break; | |
1572 | } | |
1573 | /* | |
1574 | * no message, so wait | |
1575 | */ | |
1576 | schedule(); | |
1577 | ||
1578 | PROTECT_CTX(ctx, flags); | |
1579 | } | |
1580 | DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret)); | |
1581 | set_current_state(TASK_RUNNING); | |
1582 | remove_wait_queue(&ctx->ctx_msgq_wait, &wait); | |
1583 | ||
1584 | if (ret < 0) goto abort; | |
1585 | ||
1586 | ret = -EINVAL; | |
1587 | msg = pfm_get_next_msg(ctx); | |
1588 | if (msg == NULL) { | |
1589 | printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid); | |
1590 | goto abort_locked; | |
1591 | } | |
1592 | ||
4944930a | 1593 | DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type)); |
1da177e4 LT |
1594 | |
1595 | ret = -EFAULT; | |
1596 | if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t); | |
1597 | ||
1598 | abort_locked: | |
1599 | UNPROTECT_CTX(ctx, flags); | |
1600 | abort: | |
1601 | return ret; | |
1602 | } | |
1603 | ||
1604 | static ssize_t | |
1605 | pfm_write(struct file *file, const char __user *ubuf, | |
1606 | size_t size, loff_t *ppos) | |
1607 | { | |
1608 | DPRINT(("pfm_write called\n")); | |
1609 | return -EINVAL; | |
1610 | } | |
1611 | ||
1612 | static unsigned int | |
1613 | pfm_poll(struct file *filp, poll_table * wait) | |
1614 | { | |
1615 | pfm_context_t *ctx; | |
1616 | unsigned long flags; | |
1617 | unsigned int mask = 0; | |
1618 | ||
1619 | if (PFM_IS_FILE(filp) == 0) { | |
1620 | printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); | |
1621 | return 0; | |
1622 | } | |
1623 | ||
1624 | ctx = (pfm_context_t *)filp->private_data; | |
1625 | if (ctx == NULL) { | |
1626 | printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid); | |
1627 | return 0; | |
1628 | } | |
1629 | ||
1630 | ||
1631 | DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd)); | |
1632 | ||
1633 | poll_wait(filp, &ctx->ctx_msgq_wait, wait); | |
1634 | ||
1635 | PROTECT_CTX(ctx, flags); | |
1636 | ||
1637 | if (PFM_CTXQ_EMPTY(ctx) == 0) | |
1638 | mask = POLLIN | POLLRDNORM; | |
1639 | ||
1640 | UNPROTECT_CTX(ctx, flags); | |
1641 | ||
1642 | DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask)); | |
1643 | ||
1644 | return mask; | |
1645 | } | |
1646 | ||
1647 | static int | |
1648 | pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) | |
1649 | { | |
1650 | DPRINT(("pfm_ioctl called\n")); | |
1651 | return -EINVAL; | |
1652 | } | |
1653 | ||
1654 | /* | |
1655 | * interrupt cannot be masked when coming here | |
1656 | */ | |
1657 | static inline int | |
1658 | pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on) | |
1659 | { | |
1660 | int ret; | |
1661 | ||
1662 | ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue); | |
1663 | ||
1664 | DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n", | |
1665 | current->pid, | |
1666 | fd, | |
1667 | on, | |
1668 | ctx->ctx_async_queue, ret)); | |
1669 | ||
1670 | return ret; | |
1671 | } | |
1672 | ||
1673 | static int | |
1674 | pfm_fasync(int fd, struct file *filp, int on) | |
1675 | { | |
1676 | pfm_context_t *ctx; | |
1677 | int ret; | |
1678 | ||
1679 | if (PFM_IS_FILE(filp) == 0) { | |
1680 | printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid); | |
1681 | return -EBADF; | |
1682 | } | |
1683 | ||
1684 | ctx = (pfm_context_t *)filp->private_data; | |
1685 | if (ctx == NULL) { | |
1686 | printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid); | |
1687 | return -EBADF; | |
1688 | } | |
1689 | /* | |
1690 | * we cannot mask interrupts during this call because this may | |
1691 | * may go to sleep if memory is not readily avalaible. | |
1692 | * | |
1693 | * We are protected from the conetxt disappearing by the get_fd()/put_fd() | |
1694 | * done in caller. Serialization of this function is ensured by caller. | |
1695 | */ | |
1696 | ret = pfm_do_fasync(fd, filp, ctx, on); | |
1697 | ||
1698 | ||
1699 | DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n", | |
1700 | fd, | |
1701 | on, | |
1702 | ctx->ctx_async_queue, ret)); | |
1703 | ||
1704 | return ret; | |
1705 | } | |
1706 | ||
1707 | #ifdef CONFIG_SMP | |
1708 | /* | |
1709 | * this function is exclusively called from pfm_close(). | |
1710 | * The context is not protected at that time, nor are interrupts | |
1711 | * on the remote CPU. That's necessary to avoid deadlocks. | |
1712 | */ | |
1713 | static void | |
1714 | pfm_syswide_force_stop(void *info) | |
1715 | { | |
1716 | pfm_context_t *ctx = (pfm_context_t *)info; | |
6450578f | 1717 | struct pt_regs *regs = task_pt_regs(current); |
1da177e4 LT |
1718 | struct task_struct *owner; |
1719 | unsigned long flags; | |
1720 | int ret; | |
1721 | ||
1722 | if (ctx->ctx_cpu != smp_processor_id()) { | |
1723 | printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n", | |
1724 | ctx->ctx_cpu, | |
1725 | smp_processor_id()); | |
1726 | return; | |
1727 | } | |
1728 | owner = GET_PMU_OWNER(); | |
1729 | if (owner != ctx->ctx_task) { | |
1730 | printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n", | |
1731 | smp_processor_id(), | |
1732 | owner->pid, ctx->ctx_task->pid); | |
1733 | return; | |
1734 | } | |
1735 | if (GET_PMU_CTX() != ctx) { | |
1736 | printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n", | |
1737 | smp_processor_id(), | |
1738 | GET_PMU_CTX(), ctx); | |
1739 | return; | |
1740 | } | |
1741 | ||
1742 | DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid)); | |
1743 | /* | |
1744 | * the context is already protected in pfm_close(), we simply | |
1745 | * need to mask interrupts to avoid a PMU interrupt race on | |
1746 | * this CPU | |
1747 | */ | |
1748 | local_irq_save(flags); | |
1749 | ||
1750 | ret = pfm_context_unload(ctx, NULL, 0, regs); | |
1751 | if (ret) { | |
1752 | DPRINT(("context_unload returned %d\n", ret)); | |
1753 | } | |
1754 | ||
1755 | /* | |
1756 | * unmask interrupts, PMU interrupts are now spurious here | |
1757 | */ | |
1758 | local_irq_restore(flags); | |
1759 | } | |
1760 | ||
1761 | static void | |
1762 | pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx) | |
1763 | { | |
1764 | int ret; | |
1765 | ||
1766 | DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); | |
1767 | ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); | |
1768 | DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); | |
1769 | } | |
1770 | #endif /* CONFIG_SMP */ | |
1771 | ||
1772 | /* | |
1773 | * called for each close(). Partially free resources. | |
1774 | * When caller is self-monitoring, the context is unloaded. | |
1775 | */ | |
1776 | static int | |
75e1fcc0 | 1777 | pfm_flush(struct file *filp, fl_owner_t id) |
1da177e4 LT |
1778 | { |
1779 | pfm_context_t *ctx; | |
1780 | struct task_struct *task; | |
1781 | struct pt_regs *regs; | |
1782 | unsigned long flags; | |
1783 | unsigned long smpl_buf_size = 0UL; | |
1784 | void *smpl_buf_vaddr = NULL; | |
1785 | int state, is_system; | |
1786 | ||
1787 | if (PFM_IS_FILE(filp) == 0) { | |
1788 | DPRINT(("bad magic for\n")); | |
1789 | return -EBADF; | |
1790 | } | |
1791 | ||
1792 | ctx = (pfm_context_t *)filp->private_data; | |
1793 | if (ctx == NULL) { | |
1794 | printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid); | |
1795 | return -EBADF; | |
1796 | } | |
1797 | ||
1798 | /* | |
1799 | * remove our file from the async queue, if we use this mode. | |
1800 | * This can be done without the context being protected. We come | |
1801 | * here when the context has become unreacheable by other tasks. | |
1802 | * | |
1803 | * We may still have active monitoring at this point and we may | |
1804 | * end up in pfm_overflow_handler(). However, fasync_helper() | |
1805 | * operates with interrupts disabled and it cleans up the | |
1806 | * queue. If the PMU handler is called prior to entering | |
1807 | * fasync_helper() then it will send a signal. If it is | |
1808 | * invoked after, it will find an empty queue and no | |
1809 | * signal will be sent. In both case, we are safe | |
1810 | */ | |
1811 | if (filp->f_flags & FASYNC) { | |
1812 | DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue)); | |
1813 | pfm_do_fasync (-1, filp, ctx, 0); | |
1814 | } | |
1815 | ||
1816 | PROTECT_CTX(ctx, flags); | |
1817 | ||
1818 | state = ctx->ctx_state; | |
1819 | is_system = ctx->ctx_fl_system; | |
1820 | ||
1821 | task = PFM_CTX_TASK(ctx); | |
6450578f | 1822 | regs = task_pt_regs(task); |
1da177e4 LT |
1823 | |
1824 | DPRINT(("ctx_state=%d is_current=%d\n", | |
1825 | state, | |
1826 | task == current ? 1 : 0)); | |
1827 | ||
1828 | /* | |
1829 | * if state == UNLOADED, then task is NULL | |
1830 | */ | |
1831 | ||
1832 | /* | |
1833 | * we must stop and unload because we are losing access to the context. | |
1834 | */ | |
1835 | if (task == current) { | |
1836 | #ifdef CONFIG_SMP | |
1837 | /* | |
1838 | * the task IS the owner but it migrated to another CPU: that's bad | |
1839 | * but we must handle this cleanly. Unfortunately, the kernel does | |
1840 | * not provide a mechanism to block migration (while the context is loaded). | |
1841 | * | |
1842 | * We need to release the resource on the ORIGINAL cpu. | |
1843 | */ | |
1844 | if (is_system && ctx->ctx_cpu != smp_processor_id()) { | |
1845 | ||
1846 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
1847 | /* | |
1848 | * keep context protected but unmask interrupt for IPI | |
1849 | */ | |
1850 | local_irq_restore(flags); | |
1851 | ||
1852 | pfm_syswide_cleanup_other_cpu(ctx); | |
1853 | ||
1854 | /* | |
1855 | * restore interrupt masking | |
1856 | */ | |
1857 | local_irq_save(flags); | |
1858 | ||
1859 | /* | |
1860 | * context is unloaded at this point | |
1861 | */ | |
1862 | } else | |
1863 | #endif /* CONFIG_SMP */ | |
1864 | { | |
1865 | ||
1866 | DPRINT(("forcing unload\n")); | |
1867 | /* | |
1868 | * stop and unload, returning with state UNLOADED | |
1869 | * and session unreserved. | |
1870 | */ | |
1871 | pfm_context_unload(ctx, NULL, 0, regs); | |
1872 | ||
1873 | DPRINT(("ctx_state=%d\n", ctx->ctx_state)); | |
1874 | } | |
1875 | } | |
1876 | ||
1877 | /* | |
1878 | * remove virtual mapping, if any, for the calling task. | |
1879 | * cannot reset ctx field until last user is calling close(). | |
1880 | * | |
1881 | * ctx_smpl_vaddr must never be cleared because it is needed | |
1882 | * by every task with access to the context | |
1883 | * | |
1884 | * When called from do_exit(), the mm context is gone already, therefore | |
1885 | * mm is NULL, i.e., the VMA is already gone and we do not have to | |
1886 | * do anything here | |
1887 | */ | |
1888 | if (ctx->ctx_smpl_vaddr && current->mm) { | |
1889 | smpl_buf_vaddr = ctx->ctx_smpl_vaddr; | |
1890 | smpl_buf_size = ctx->ctx_smpl_size; | |
1891 | } | |
1892 | ||
1893 | UNPROTECT_CTX(ctx, flags); | |
1894 | ||
1895 | /* | |
1896 | * if there was a mapping, then we systematically remove it | |
1897 | * at this point. Cannot be done inside critical section | |
1898 | * because some VM function reenables interrupts. | |
1899 | * | |
1900 | */ | |
1901 | if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size); | |
1902 | ||
1903 | return 0; | |
1904 | } | |
1905 | /* | |
1906 | * called either on explicit close() or from exit_files(). | |
1907 | * Only the LAST user of the file gets to this point, i.e., it is | |
1908 | * called only ONCE. | |
1909 | * | |
1910 | * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero | |
1911 | * (fput()),i.e, last task to access the file. Nobody else can access the | |
1912 | * file at this point. | |
1913 | * | |
1914 | * When called from exit_files(), the VMA has been freed because exit_mm() | |
1915 | * is executed before exit_files(). | |
1916 | * | |
1917 | * When called from exit_files(), the current task is not yet ZOMBIE but we | |
1918 | * flush the PMU state to the context. | |
1919 | */ | |
1920 | static int | |
1921 | pfm_close(struct inode *inode, struct file *filp) | |
1922 | { | |
1923 | pfm_context_t *ctx; | |
1924 | struct task_struct *task; | |
1925 | struct pt_regs *regs; | |
1926 | DECLARE_WAITQUEUE(wait, current); | |
1927 | unsigned long flags; | |
1928 | unsigned long smpl_buf_size = 0UL; | |
1929 | void *smpl_buf_addr = NULL; | |
1930 | int free_possible = 1; | |
1931 | int state, is_system; | |
1932 | ||
1933 | DPRINT(("pfm_close called private=%p\n", filp->private_data)); | |
1934 | ||
1935 | if (PFM_IS_FILE(filp) == 0) { | |
1936 | DPRINT(("bad magic\n")); | |
1937 | return -EBADF; | |
1938 | } | |
1939 | ||
1940 | ctx = (pfm_context_t *)filp->private_data; | |
1941 | if (ctx == NULL) { | |
1942 | printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid); | |
1943 | return -EBADF; | |
1944 | } | |
1945 | ||
1946 | PROTECT_CTX(ctx, flags); | |
1947 | ||
1948 | state = ctx->ctx_state; | |
1949 | is_system = ctx->ctx_fl_system; | |
1950 | ||
1951 | task = PFM_CTX_TASK(ctx); | |
6450578f | 1952 | regs = task_pt_regs(task); |
1da177e4 LT |
1953 | |
1954 | DPRINT(("ctx_state=%d is_current=%d\n", | |
1955 | state, | |
1956 | task == current ? 1 : 0)); | |
1957 | ||
1958 | /* | |
1959 | * if task == current, then pfm_flush() unloaded the context | |
1960 | */ | |
1961 | if (state == PFM_CTX_UNLOADED) goto doit; | |
1962 | ||
1963 | /* | |
1964 | * context is loaded/masked and task != current, we need to | |
1965 | * either force an unload or go zombie | |
1966 | */ | |
1967 | ||
1968 | /* | |
1969 | * The task is currently blocked or will block after an overflow. | |
1970 | * we must force it to wakeup to get out of the | |
1971 | * MASKED state and transition to the unloaded state by itself. | |
1972 | * | |
1973 | * This situation is only possible for per-task mode | |
1974 | */ | |
1975 | if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) { | |
1976 | ||
1977 | /* | |
1978 | * set a "partial" zombie state to be checked | |
1979 | * upon return from down() in pfm_handle_work(). | |
1980 | * | |
1981 | * We cannot use the ZOMBIE state, because it is checked | |
1982 | * by pfm_load_regs() which is called upon wakeup from down(). | |
1983 | * In such case, it would free the context and then we would | |
1984 | * return to pfm_handle_work() which would access the | |
1985 | * stale context. Instead, we set a flag invisible to pfm_load_regs() | |
1986 | * but visible to pfm_handle_work(). | |
1987 | * | |
1988 | * For some window of time, we have a zombie context with | |
1989 | * ctx_state = MASKED and not ZOMBIE | |
1990 | */ | |
1991 | ctx->ctx_fl_going_zombie = 1; | |
1992 | ||
1993 | /* | |
1994 | * force task to wake up from MASKED state | |
1995 | */ | |
60f1c444 | 1996 | complete(&ctx->ctx_restart_done); |
1da177e4 LT |
1997 | |
1998 | DPRINT(("waking up ctx_state=%d\n", state)); | |
1999 | ||
2000 | /* | |
2001 | * put ourself to sleep waiting for the other | |
2002 | * task to report completion | |
2003 | * | |
2004 | * the context is protected by mutex, therefore there | |
2005 | * is no risk of being notified of completion before | |
2006 | * begin actually on the waitq. | |
2007 | */ | |
2008 | set_current_state(TASK_INTERRUPTIBLE); | |
2009 | add_wait_queue(&ctx->ctx_zombieq, &wait); | |
2010 | ||
2011 | UNPROTECT_CTX(ctx, flags); | |
2012 | ||
2013 | /* | |
2014 | * XXX: check for signals : | |
2015 | * - ok for explicit close | |
2016 | * - not ok when coming from exit_files() | |
2017 | */ | |
2018 | schedule(); | |
2019 | ||
2020 | ||
2021 | PROTECT_CTX(ctx, flags); | |
2022 | ||
2023 | ||
2024 | remove_wait_queue(&ctx->ctx_zombieq, &wait); | |
2025 | set_current_state(TASK_RUNNING); | |
2026 | ||
2027 | /* | |
2028 | * context is unloaded at this point | |
2029 | */ | |
2030 | DPRINT(("after zombie wakeup ctx_state=%d for\n", state)); | |
2031 | } | |
2032 | else if (task != current) { | |
2033 | #ifdef CONFIG_SMP | |
2034 | /* | |
2035 | * switch context to zombie state | |
2036 | */ | |
2037 | ctx->ctx_state = PFM_CTX_ZOMBIE; | |
2038 | ||
2039 | DPRINT(("zombie ctx for [%d]\n", task->pid)); | |
2040 | /* | |
2041 | * cannot free the context on the spot. deferred until | |
2042 | * the task notices the ZOMBIE state | |
2043 | */ | |
2044 | free_possible = 0; | |
2045 | #else | |
2046 | pfm_context_unload(ctx, NULL, 0, regs); | |
2047 | #endif | |
2048 | } | |
2049 | ||
2050 | doit: | |
2051 | /* reload state, may have changed during opening of critical section */ | |
2052 | state = ctx->ctx_state; | |
2053 | ||
2054 | /* | |
2055 | * the context is still attached to a task (possibly current) | |
2056 | * we cannot destroy it right now | |
2057 | */ | |
2058 | ||
2059 | /* | |
2060 | * we must free the sampling buffer right here because | |
2061 | * we cannot rely on it being cleaned up later by the | |
2062 | * monitored task. It is not possible to free vmalloc'ed | |
2063 | * memory in pfm_load_regs(). Instead, we remove the buffer | |
2064 | * now. should there be subsequent PMU overflow originally | |
2065 | * meant for sampling, the will be converted to spurious | |
2066 | * and that's fine because the monitoring tools is gone anyway. | |
2067 | */ | |
2068 | if (ctx->ctx_smpl_hdr) { | |
2069 | smpl_buf_addr = ctx->ctx_smpl_hdr; | |
2070 | smpl_buf_size = ctx->ctx_smpl_size; | |
2071 | /* no more sampling */ | |
2072 | ctx->ctx_smpl_hdr = NULL; | |
2073 | ctx->ctx_fl_is_sampling = 0; | |
2074 | } | |
2075 | ||
2076 | DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n", | |
2077 | state, | |
2078 | free_possible, | |
2079 | smpl_buf_addr, | |
2080 | smpl_buf_size)); | |
2081 | ||
2082 | if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt); | |
2083 | ||
2084 | /* | |
2085 | * UNLOADED that the session has already been unreserved. | |
2086 | */ | |
2087 | if (state == PFM_CTX_ZOMBIE) { | |
2088 | pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu); | |
2089 | } | |
2090 | ||
2091 | /* | |
2092 | * disconnect file descriptor from context must be done | |
2093 | * before we unlock. | |
2094 | */ | |
2095 | filp->private_data = NULL; | |
2096 | ||
2097 | /* | |
2098 | * if we free on the spot, the context is now completely unreacheable | |
2099 | * from the callers side. The monitored task side is also cut, so we | |
2100 | * can freely cut. | |
2101 | * | |
2102 | * If we have a deferred free, only the caller side is disconnected. | |
2103 | */ | |
2104 | UNPROTECT_CTX(ctx, flags); | |
2105 | ||
2106 | /* | |
2107 | * All memory free operations (especially for vmalloc'ed memory) | |
2108 | * MUST be done with interrupts ENABLED. | |
2109 | */ | |
2110 | if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size); | |
2111 | ||
2112 | /* | |
2113 | * return the memory used by the context | |
2114 | */ | |
2115 | if (free_possible) pfm_context_free(ctx); | |
2116 | ||
2117 | return 0; | |
2118 | } | |
2119 | ||
2120 | static int | |
2121 | pfm_no_open(struct inode *irrelevant, struct file *dontcare) | |
2122 | { | |
2123 | DPRINT(("pfm_no_open called\n")); | |
2124 | return -ENXIO; | |
2125 | } | |
2126 | ||
2127 | ||
2128 | ||
5dfe4c96 | 2129 | static const struct file_operations pfm_file_ops = { |
1da177e4 LT |
2130 | .llseek = no_llseek, |
2131 | .read = pfm_read, | |
2132 | .write = pfm_write, | |
2133 | .poll = pfm_poll, | |
2134 | .ioctl = pfm_ioctl, | |
2135 | .open = pfm_no_open, /* special open code to disallow open via /proc */ | |
2136 | .fasync = pfm_fasync, | |
2137 | .release = pfm_close, | |
2138 | .flush = pfm_flush | |
2139 | }; | |
2140 | ||
2141 | static int | |
2142 | pfmfs_delete_dentry(struct dentry *dentry) | |
2143 | { | |
2144 | return 1; | |
2145 | } | |
2146 | ||
2147 | static struct dentry_operations pfmfs_dentry_operations = { | |
2148 | .d_delete = pfmfs_delete_dentry, | |
2149 | }; | |
2150 | ||
2151 | ||
2152 | static int | |
2153 | pfm_alloc_fd(struct file **cfile) | |
2154 | { | |
2155 | int fd, ret = 0; | |
2156 | struct file *file = NULL; | |
2157 | struct inode * inode; | |
2158 | char name[32]; | |
2159 | struct qstr this; | |
2160 | ||
2161 | fd = get_unused_fd(); | |
2162 | if (fd < 0) return -ENFILE; | |
2163 | ||
2164 | ret = -ENFILE; | |
2165 | ||
2166 | file = get_empty_filp(); | |
2167 | if (!file) goto out; | |
2168 | ||
2169 | /* | |
2170 | * allocate a new inode | |
2171 | */ | |
2172 | inode = new_inode(pfmfs_mnt->mnt_sb); | |
2173 | if (!inode) goto out; | |
2174 | ||
2175 | DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode)); | |
2176 | ||
2177 | inode->i_mode = S_IFCHR|S_IRUGO; | |
2178 | inode->i_uid = current->fsuid; | |
2179 | inode->i_gid = current->fsgid; | |
2180 | ||
2181 | sprintf(name, "[%lu]", inode->i_ino); | |
2182 | this.name = name; | |
2183 | this.len = strlen(name); | |
2184 | this.hash = inode->i_ino; | |
2185 | ||
2186 | ret = -ENOMEM; | |
2187 | ||
2188 | /* | |
2189 | * allocate a new dcache entry | |
2190 | */ | |
b66ffad9 JS |
2191 | file->f_path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); |
2192 | if (!file->f_path.dentry) goto out; | |
1da177e4 | 2193 | |
b66ffad9 | 2194 | file->f_path.dentry->d_op = &pfmfs_dentry_operations; |
1da177e4 | 2195 | |
b66ffad9 JS |
2196 | d_add(file->f_path.dentry, inode); |
2197 | file->f_path.mnt = mntget(pfmfs_mnt); | |
1da177e4 LT |
2198 | file->f_mapping = inode->i_mapping; |
2199 | ||
2200 | file->f_op = &pfm_file_ops; | |
2201 | file->f_mode = FMODE_READ; | |
2202 | file->f_flags = O_RDONLY; | |
2203 | file->f_pos = 0; | |
2204 | ||
2205 | /* | |
2206 | * may have to delay until context is attached? | |
2207 | */ | |
2208 | fd_install(fd, file); | |
2209 | ||
2210 | /* | |
2211 | * the file structure we will use | |
2212 | */ | |
2213 | *cfile = file; | |
2214 | ||
2215 | return fd; | |
2216 | out: | |
2217 | if (file) put_filp(file); | |
2218 | put_unused_fd(fd); | |
2219 | return ret; | |
2220 | } | |
2221 | ||
2222 | static void | |
2223 | pfm_free_fd(int fd, struct file *file) | |
2224 | { | |
2225 | struct files_struct *files = current->files; | |
4fb3a538 | 2226 | struct fdtable *fdt; |
1da177e4 LT |
2227 | |
2228 | /* | |
2229 | * there ie no fd_uninstall(), so we do it here | |
2230 | */ | |
2231 | spin_lock(&files->file_lock); | |
4fb3a538 | 2232 | fdt = files_fdtable(files); |
badf1662 | 2233 | rcu_assign_pointer(fdt->fd[fd], NULL); |
1da177e4 LT |
2234 | spin_unlock(&files->file_lock); |
2235 | ||
badf1662 DS |
2236 | if (file) |
2237 | put_filp(file); | |
1da177e4 LT |
2238 | put_unused_fd(fd); |
2239 | } | |
2240 | ||
2241 | static int | |
2242 | pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size) | |
2243 | { | |
2244 | DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size)); | |
2245 | ||
2246 | while (size > 0) { | |
2247 | unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT; | |
2248 | ||
2249 | ||
2250 | if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY)) | |
2251 | return -ENOMEM; | |
2252 | ||
2253 | addr += PAGE_SIZE; | |
2254 | buf += PAGE_SIZE; | |
2255 | size -= PAGE_SIZE; | |
2256 | } | |
2257 | return 0; | |
2258 | } | |
2259 | ||
2260 | /* | |
2261 | * allocate a sampling buffer and remaps it into the user address space of the task | |
2262 | */ | |
2263 | static int | |
2264 | pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr) | |
2265 | { | |
2266 | struct mm_struct *mm = task->mm; | |
2267 | struct vm_area_struct *vma = NULL; | |
2268 | unsigned long size; | |
2269 | void *smpl_buf; | |
2270 | ||
2271 | ||
2272 | /* | |
2273 | * the fixed header + requested size and align to page boundary | |
2274 | */ | |
2275 | size = PAGE_ALIGN(rsize); | |
2276 | ||
2277 | DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size)); | |
2278 | ||
2279 | /* | |
2280 | * check requested size to avoid Denial-of-service attacks | |
2281 | * XXX: may have to refine this test | |
2282 | * Check against address space limit. | |
2283 | * | |
2284 | * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur) | |
2285 | * return -ENOMEM; | |
2286 | */ | |
2287 | if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) | |
2288 | return -ENOMEM; | |
2289 | ||
2290 | /* | |
2291 | * We do the easy to undo allocations first. | |
2292 | * | |
2293 | * pfm_rvmalloc(), clears the buffer, so there is no leak | |
2294 | */ | |
2295 | smpl_buf = pfm_rvmalloc(size); | |
2296 | if (smpl_buf == NULL) { | |
2297 | DPRINT(("Can't allocate sampling buffer\n")); | |
2298 | return -ENOMEM; | |
2299 | } | |
2300 | ||
2301 | DPRINT(("smpl_buf @%p\n", smpl_buf)); | |
2302 | ||
2303 | /* allocate vma */ | |
c3762229 | 2304 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
1da177e4 LT |
2305 | if (!vma) { |
2306 | DPRINT(("Cannot allocate vma\n")); | |
2307 | goto error_kmem; | |
2308 | } | |
1da177e4 LT |
2309 | |
2310 | /* | |
2311 | * partially initialize the vma for the sampling buffer | |
2312 | */ | |
2313 | vma->vm_mm = mm; | |
2314 | vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED; | |
2315 | vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ | |
2316 | ||
2317 | /* | |
2318 | * Now we have everything we need and we can initialize | |
2319 | * and connect all the data structures | |
2320 | */ | |
2321 | ||
2322 | ctx->ctx_smpl_hdr = smpl_buf; | |
2323 | ctx->ctx_smpl_size = size; /* aligned size */ | |
2324 | ||
2325 | /* | |
2326 | * Let's do the difficult operations next. | |
2327 | * | |
2328 | * now we atomically find some area in the address space and | |
2329 | * remap the buffer in it. | |
2330 | */ | |
2331 | down_write(&task->mm->mmap_sem); | |
2332 | ||
2333 | /* find some free area in address space, must have mmap sem held */ | |
2334 | vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0); | |
2335 | if (vma->vm_start == 0UL) { | |
2336 | DPRINT(("Cannot find unmapped area for size %ld\n", size)); | |
2337 | up_write(&task->mm->mmap_sem); | |
2338 | goto error; | |
2339 | } | |
2340 | vma->vm_end = vma->vm_start + size; | |
2341 | vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; | |
2342 | ||
2343 | DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start)); | |
2344 | ||
2345 | /* can only be applied to current task, need to have the mm semaphore held when called */ | |
2346 | if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) { | |
2347 | DPRINT(("Can't remap buffer\n")); | |
2348 | up_write(&task->mm->mmap_sem); | |
2349 | goto error; | |
2350 | } | |
2351 | ||
2352 | /* | |
2353 | * now insert the vma in the vm list for the process, must be | |
2354 | * done with mmap lock held | |
2355 | */ | |
2356 | insert_vm_struct(mm, vma); | |
2357 | ||
2358 | mm->total_vm += size >> PAGE_SHIFT; | |
ab50b8ed HD |
2359 | vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, |
2360 | vma_pages(vma)); | |
1da177e4 LT |
2361 | up_write(&task->mm->mmap_sem); |
2362 | ||
2363 | /* | |
2364 | * keep track of user level virtual address | |
2365 | */ | |
2366 | ctx->ctx_smpl_vaddr = (void *)vma->vm_start; | |
2367 | *(unsigned long *)user_vaddr = vma->vm_start; | |
2368 | ||
2369 | return 0; | |
2370 | ||
2371 | error: | |
2372 | kmem_cache_free(vm_area_cachep, vma); | |
2373 | error_kmem: | |
2374 | pfm_rvfree(smpl_buf, size); | |
2375 | ||
2376 | return -ENOMEM; | |
2377 | } | |
2378 | ||
2379 | /* | |
2380 | * XXX: do something better here | |
2381 | */ | |
2382 | static int | |
2383 | pfm_bad_permissions(struct task_struct *task) | |
2384 | { | |
2385 | /* inspired by ptrace_attach() */ | |
2386 | DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n", | |
2387 | current->uid, | |
2388 | current->gid, | |
2389 | task->euid, | |
2390 | task->suid, | |
2391 | task->uid, | |
2392 | task->egid, | |
2393 | task->sgid)); | |
2394 | ||
2395 | return ((current->uid != task->euid) | |
2396 | || (current->uid != task->suid) | |
2397 | || (current->uid != task->uid) | |
2398 | || (current->gid != task->egid) | |
2399 | || (current->gid != task->sgid) | |
2400 | || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE); | |
2401 | } | |
2402 | ||
2403 | static int | |
2404 | pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx) | |
2405 | { | |
2406 | int ctx_flags; | |
2407 | ||
2408 | /* valid signal */ | |
2409 | ||
2410 | ctx_flags = pfx->ctx_flags; | |
2411 | ||
2412 | if (ctx_flags & PFM_FL_SYSTEM_WIDE) { | |
2413 | ||
2414 | /* | |
2415 | * cannot block in this mode | |
2416 | */ | |
2417 | if (ctx_flags & PFM_FL_NOTIFY_BLOCK) { | |
2418 | DPRINT(("cannot use blocking mode when in system wide monitoring\n")); | |
2419 | return -EINVAL; | |
2420 | } | |
2421 | } else { | |
2422 | } | |
2423 | /* probably more to add here */ | |
2424 | ||
2425 | return 0; | |
2426 | } | |
2427 | ||
2428 | static int | |
2429 | pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int ctx_flags, | |
2430 | unsigned int cpu, pfarg_context_t *arg) | |
2431 | { | |
2432 | pfm_buffer_fmt_t *fmt = NULL; | |
2433 | unsigned long size = 0UL; | |
2434 | void *uaddr = NULL; | |
2435 | void *fmt_arg = NULL; | |
2436 | int ret = 0; | |
2437 | #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1) | |
2438 | ||
2439 | /* invoke and lock buffer format, if found */ | |
2440 | fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id); | |
2441 | if (fmt == NULL) { | |
2442 | DPRINT(("[%d] cannot find buffer format\n", task->pid)); | |
2443 | return -EINVAL; | |
2444 | } | |
2445 | ||
2446 | /* | |
2447 | * buffer argument MUST be contiguous to pfarg_context_t | |
2448 | */ | |
2449 | if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg); | |
2450 | ||
2451 | ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg); | |
2452 | ||
2453 | DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret)); | |
2454 | ||
2455 | if (ret) goto error; | |
2456 | ||
2457 | /* link buffer format and context */ | |
2458 | ctx->ctx_buf_fmt = fmt; | |
2459 | ||
2460 | /* | |
2461 | * check if buffer format wants to use perfmon buffer allocation/mapping service | |
2462 | */ | |
2463 | ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size); | |
2464 | if (ret) goto error; | |
2465 | ||
2466 | if (size) { | |
2467 | /* | |
2468 | * buffer is always remapped into the caller's address space | |
2469 | */ | |
2470 | ret = pfm_smpl_buffer_alloc(current, ctx, size, &uaddr); | |
2471 | if (ret) goto error; | |
2472 | ||
2473 | /* keep track of user address of buffer */ | |
2474 | arg->ctx_smpl_vaddr = uaddr; | |
2475 | } | |
2476 | ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg); | |
2477 | ||
2478 | error: | |
2479 | return ret; | |
2480 | } | |
2481 | ||
2482 | static void | |
2483 | pfm_reset_pmu_state(pfm_context_t *ctx) | |
2484 | { | |
2485 | int i; | |
2486 | ||
2487 | /* | |
2488 | * install reset values for PMC. | |
2489 | */ | |
2490 | for (i=1; PMC_IS_LAST(i) == 0; i++) { | |
2491 | if (PMC_IS_IMPL(i) == 0) continue; | |
2492 | ctx->ctx_pmcs[i] = PMC_DFL_VAL(i); | |
2493 | DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i])); | |
2494 | } | |
2495 | /* | |
2496 | * PMD registers are set to 0UL when the context in memset() | |
2497 | */ | |
2498 | ||
2499 | /* | |
2500 | * On context switched restore, we must restore ALL pmc and ALL pmd even | |
2501 | * when they are not actively used by the task. In UP, the incoming process | |
2502 | * may otherwise pick up left over PMC, PMD state from the previous process. | |
2503 | * As opposed to PMD, stale PMC can cause harm to the incoming | |
2504 | * process because they may change what is being measured. | |
2505 | * Therefore, we must systematically reinstall the entire | |
2506 | * PMC state. In SMP, the same thing is possible on the | |
2507 | * same CPU but also on between 2 CPUs. | |
2508 | * | |
2509 | * The problem with PMD is information leaking especially | |
2510 | * to user level when psr.sp=0 | |
2511 | * | |
2512 | * There is unfortunately no easy way to avoid this problem | |
2513 | * on either UP or SMP. This definitively slows down the | |
2514 | * pfm_load_regs() function. | |
2515 | */ | |
2516 | ||
2517 | /* | |
2518 | * bitmask of all PMCs accessible to this context | |
2519 | * | |
2520 | * PMC0 is treated differently. | |
2521 | */ | |
2522 | ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1; | |
2523 | ||
2524 | /* | |
2525 | * bitmask of all PMDs that are accesible to this context | |
2526 | */ | |
2527 | ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0]; | |
2528 | ||
2529 | DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0])); | |
2530 | ||
2531 | /* | |
2532 | * useful in case of re-enable after disable | |
2533 | */ | |
2534 | ctx->ctx_used_ibrs[0] = 0UL; | |
2535 | ctx->ctx_used_dbrs[0] = 0UL; | |
2536 | } | |
2537 | ||
2538 | static int | |
2539 | pfm_ctx_getsize(void *arg, size_t *sz) | |
2540 | { | |
2541 | pfarg_context_t *req = (pfarg_context_t *)arg; | |
2542 | pfm_buffer_fmt_t *fmt; | |
2543 | ||
2544 | *sz = 0; | |
2545 | ||
2546 | if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0; | |
2547 | ||
2548 | fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id); | |
2549 | if (fmt == NULL) { | |
2550 | DPRINT(("cannot find buffer format\n")); | |
2551 | return -EINVAL; | |
2552 | } | |
2553 | /* get just enough to copy in user parameters */ | |
2554 | *sz = fmt->fmt_arg_size; | |
2555 | DPRINT(("arg_size=%lu\n", *sz)); | |
2556 | ||
2557 | return 0; | |
2558 | } | |
2559 | ||
2560 | ||
2561 | ||
2562 | /* | |
2563 | * cannot attach if : | |
2564 | * - kernel task | |
2565 | * - task not owned by caller | |
2566 | * - task incompatible with context mode | |
2567 | */ | |
2568 | static int | |
2569 | pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) | |
2570 | { | |
2571 | /* | |
2572 | * no kernel task or task not owner by caller | |
2573 | */ | |
2574 | if (task->mm == NULL) { | |
2575 | DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid)); | |
2576 | return -EPERM; | |
2577 | } | |
2578 | if (pfm_bad_permissions(task)) { | |
2579 | DPRINT(("no permission to attach to [%d]\n", task->pid)); | |
2580 | return -EPERM; | |
2581 | } | |
2582 | /* | |
2583 | * cannot block in self-monitoring mode | |
2584 | */ | |
2585 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) { | |
2586 | DPRINT(("cannot load a blocking context on self for [%d]\n", task->pid)); | |
2587 | return -EINVAL; | |
2588 | } | |
2589 | ||
2590 | if (task->exit_state == EXIT_ZOMBIE) { | |
2591 | DPRINT(("cannot attach to zombie task [%d]\n", task->pid)); | |
2592 | return -EBUSY; | |
2593 | } | |
2594 | ||
2595 | /* | |
2596 | * always ok for self | |
2597 | */ | |
2598 | if (task == current) return 0; | |
2599 | ||
2600 | if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) { | |
2601 | DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state)); | |
2602 | return -EBUSY; | |
2603 | } | |
2604 | /* | |
2605 | * make sure the task is off any CPU | |
2606 | */ | |
2607 | wait_task_inactive(task); | |
2608 | ||
2609 | /* more to come... */ | |
2610 | ||
2611 | return 0; | |
2612 | } | |
2613 | ||
2614 | static int | |
2615 | pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task) | |
2616 | { | |
2617 | struct task_struct *p = current; | |
2618 | int ret; | |
2619 | ||
2620 | /* XXX: need to add more checks here */ | |
2621 | if (pid < 2) return -EPERM; | |
2622 | ||
2623 | if (pid != current->pid) { | |
2624 | ||
2625 | read_lock(&tasklist_lock); | |
2626 | ||
2627 | p = find_task_by_pid(pid); | |
2628 | ||
2629 | /* make sure task cannot go away while we operate on it */ | |
2630 | if (p) get_task_struct(p); | |
2631 | ||
2632 | read_unlock(&tasklist_lock); | |
2633 | ||
2634 | if (p == NULL) return -ESRCH; | |
2635 | } | |
2636 | ||
2637 | ret = pfm_task_incompatible(ctx, p); | |
2638 | if (ret == 0) { | |
2639 | *task = p; | |
2640 | } else if (p != current) { | |
2641 | pfm_put_task(p); | |
2642 | } | |
2643 | return ret; | |
2644 | } | |
2645 | ||
2646 | ||
2647 | ||
2648 | static int | |
2649 | pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
2650 | { | |
2651 | pfarg_context_t *req = (pfarg_context_t *)arg; | |
2652 | struct file *filp; | |
2653 | int ctx_flags; | |
2654 | int ret; | |
2655 | ||
2656 | /* let's check the arguments first */ | |
2657 | ret = pfarg_is_sane(current, req); | |
2658 | if (ret < 0) return ret; | |
2659 | ||
2660 | ctx_flags = req->ctx_flags; | |
2661 | ||
2662 | ret = -ENOMEM; | |
2663 | ||
2664 | ctx = pfm_context_alloc(); | |
2665 | if (!ctx) goto error; | |
2666 | ||
2667 | ret = pfm_alloc_fd(&filp); | |
2668 | if (ret < 0) goto error_file; | |
2669 | ||
2670 | req->ctx_fd = ctx->ctx_fd = ret; | |
2671 | ||
2672 | /* | |
2673 | * attach context to file | |
2674 | */ | |
2675 | filp->private_data = ctx; | |
2676 | ||
2677 | /* | |
2678 | * does the user want to sample? | |
2679 | */ | |
2680 | if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) { | |
2681 | ret = pfm_setup_buffer_fmt(current, ctx, ctx_flags, 0, req); | |
2682 | if (ret) goto buffer_error; | |
2683 | } | |
2684 | ||
2685 | /* | |
2686 | * init context protection lock | |
2687 | */ | |
2688 | spin_lock_init(&ctx->ctx_lock); | |
2689 | ||
2690 | /* | |
2691 | * context is unloaded | |
2692 | */ | |
2693 | ctx->ctx_state = PFM_CTX_UNLOADED; | |
2694 | ||
2695 | /* | |
2696 | * initialization of context's flags | |
2697 | */ | |
2698 | ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0; | |
2699 | ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0; | |
2700 | ctx->ctx_fl_is_sampling = ctx->ctx_buf_fmt ? 1 : 0; /* assume record() is defined */ | |
2701 | ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0; | |
2702 | /* | |
2703 | * will move to set properties | |
2704 | * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0; | |
2705 | */ | |
2706 | ||
2707 | /* | |
2708 | * init restart semaphore to locked | |
2709 | */ | |
60f1c444 | 2710 | init_completion(&ctx->ctx_restart_done); |
1da177e4 LT |
2711 | |
2712 | /* | |
2713 | * activation is used in SMP only | |
2714 | */ | |
2715 | ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; | |
2716 | SET_LAST_CPU(ctx, -1); | |
2717 | ||
2718 | /* | |
2719 | * initialize notification message queue | |
2720 | */ | |
2721 | ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0; | |
2722 | init_waitqueue_head(&ctx->ctx_msgq_wait); | |
2723 | init_waitqueue_head(&ctx->ctx_zombieq); | |
2724 | ||
2725 | DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n", | |
2726 | ctx, | |
2727 | ctx_flags, | |
2728 | ctx->ctx_fl_system, | |
2729 | ctx->ctx_fl_block, | |
2730 | ctx->ctx_fl_excl_idle, | |
2731 | ctx->ctx_fl_no_msg, | |
2732 | ctx->ctx_fd)); | |
2733 | ||
2734 | /* | |
2735 | * initialize soft PMU state | |
2736 | */ | |
2737 | pfm_reset_pmu_state(ctx); | |
2738 | ||
2739 | return 0; | |
2740 | ||
2741 | buffer_error: | |
2742 | pfm_free_fd(ctx->ctx_fd, filp); | |
2743 | ||
2744 | if (ctx->ctx_buf_fmt) { | |
2745 | pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs); | |
2746 | } | |
2747 | error_file: | |
2748 | pfm_context_free(ctx); | |
2749 | ||
2750 | error: | |
2751 | return ret; | |
2752 | } | |
2753 | ||
2754 | static inline unsigned long | |
2755 | pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset) | |
2756 | { | |
2757 | unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset; | |
2758 | unsigned long new_seed, old_seed = reg->seed, mask = reg->mask; | |
2759 | extern unsigned long carta_random32 (unsigned long seed); | |
2760 | ||
2761 | if (reg->flags & PFM_REGFL_RANDOM) { | |
2762 | new_seed = carta_random32(old_seed); | |
2763 | val -= (old_seed & mask); /* counter values are negative numbers! */ | |
2764 | if ((mask >> 32) != 0) | |
2765 | /* construct a full 64-bit random value: */ | |
2766 | new_seed |= carta_random32(old_seed >> 32) << 32; | |
2767 | reg->seed = new_seed; | |
2768 | } | |
2769 | reg->lval = val; | |
2770 | return val; | |
2771 | } | |
2772 | ||
2773 | static void | |
2774 | pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset) | |
2775 | { | |
2776 | unsigned long mask = ovfl_regs[0]; | |
2777 | unsigned long reset_others = 0UL; | |
2778 | unsigned long val; | |
2779 | int i; | |
2780 | ||
2781 | /* | |
2782 | * now restore reset value on sampling overflowed counters | |
2783 | */ | |
2784 | mask >>= PMU_FIRST_COUNTER; | |
2785 | for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) { | |
2786 | ||
2787 | if ((mask & 0x1UL) == 0UL) continue; | |
2788 | ||
2789 | ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); | |
2790 | reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; | |
2791 | ||
2792 | DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val)); | |
2793 | } | |
2794 | ||
2795 | /* | |
2796 | * Now take care of resetting the other registers | |
2797 | */ | |
2798 | for(i = 0; reset_others; i++, reset_others >>= 1) { | |
2799 | ||
2800 | if ((reset_others & 0x1) == 0) continue; | |
2801 | ||
2802 | ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset); | |
2803 | ||
2804 | DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n", | |
2805 | is_long_reset ? "long" : "short", i, val)); | |
2806 | } | |
2807 | } | |
2808 | ||
2809 | static void | |
2810 | pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset) | |
2811 | { | |
2812 | unsigned long mask = ovfl_regs[0]; | |
2813 | unsigned long reset_others = 0UL; | |
2814 | unsigned long val; | |
2815 | int i; | |
2816 | ||
2817 | DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset)); | |
2818 | ||
2819 | if (ctx->ctx_state == PFM_CTX_MASKED) { | |
2820 | pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset); | |
2821 | return; | |
2822 | } | |
2823 | ||
2824 | /* | |
2825 | * now restore reset value on sampling overflowed counters | |
2826 | */ | |
2827 | mask >>= PMU_FIRST_COUNTER; | |
2828 | for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) { | |
2829 | ||
2830 | if ((mask & 0x1UL) == 0UL) continue; | |
2831 | ||
2832 | val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); | |
2833 | reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; | |
2834 | ||
2835 | DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val)); | |
2836 | ||
2837 | pfm_write_soft_counter(ctx, i, val); | |
2838 | } | |
2839 | ||
2840 | /* | |
2841 | * Now take care of resetting the other registers | |
2842 | */ | |
2843 | for(i = 0; reset_others; i++, reset_others >>= 1) { | |
2844 | ||
2845 | if ((reset_others & 0x1) == 0) continue; | |
2846 | ||
2847 | val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset); | |
2848 | ||
2849 | if (PMD_IS_COUNTING(i)) { | |
2850 | pfm_write_soft_counter(ctx, i, val); | |
2851 | } else { | |
2852 | ia64_set_pmd(i, val); | |
2853 | } | |
2854 | DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n", | |
2855 | is_long_reset ? "long" : "short", i, val)); | |
2856 | } | |
2857 | ia64_srlz_d(); | |
2858 | } | |
2859 | ||
2860 | static int | |
2861 | pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
2862 | { | |
1da177e4 LT |
2863 | struct task_struct *task; |
2864 | pfarg_reg_t *req = (pfarg_reg_t *)arg; | |
2865 | unsigned long value, pmc_pm; | |
2866 | unsigned long smpl_pmds, reset_pmds, impl_pmds; | |
2867 | unsigned int cnum, reg_flags, flags, pmc_type; | |
2868 | int i, can_access_pmu = 0, is_loaded, is_system, expert_mode; | |
2869 | int is_monitor, is_counting, state; | |
2870 | int ret = -EINVAL; | |
2871 | pfm_reg_check_t wr_func; | |
2872 | #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z)) | |
2873 | ||
2874 | state = ctx->ctx_state; | |
2875 | is_loaded = state == PFM_CTX_LOADED ? 1 : 0; | |
2876 | is_system = ctx->ctx_fl_system; | |
2877 | task = ctx->ctx_task; | |
2878 | impl_pmds = pmu_conf->impl_pmds[0]; | |
2879 | ||
2880 | if (state == PFM_CTX_ZOMBIE) return -EINVAL; | |
2881 | ||
2882 | if (is_loaded) { | |
1da177e4 LT |
2883 | /* |
2884 | * In system wide and when the context is loaded, access can only happen | |
2885 | * when the caller is running on the CPU being monitored by the session. | |
2886 | * It does not have to be the owner (ctx_task) of the context per se. | |
2887 | */ | |
2888 | if (is_system && ctx->ctx_cpu != smp_processor_id()) { | |
2889 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
2890 | return -EBUSY; | |
2891 | } | |
2892 | can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; | |
2893 | } | |
2894 | expert_mode = pfm_sysctl.expert_mode; | |
2895 | ||
2896 | for (i = 0; i < count; i++, req++) { | |
2897 | ||
2898 | cnum = req->reg_num; | |
2899 | reg_flags = req->reg_flags; | |
2900 | value = req->reg_value; | |
2901 | smpl_pmds = req->reg_smpl_pmds[0]; | |
2902 | reset_pmds = req->reg_reset_pmds[0]; | |
2903 | flags = 0; | |
2904 | ||
2905 | ||
2906 | if (cnum >= PMU_MAX_PMCS) { | |
2907 | DPRINT(("pmc%u is invalid\n", cnum)); | |
2908 | goto error; | |
2909 | } | |
2910 | ||
2911 | pmc_type = pmu_conf->pmc_desc[cnum].type; | |
2912 | pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1; | |
2913 | is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0; | |
2914 | is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0; | |
2915 | ||
2916 | /* | |
2917 | * we reject all non implemented PMC as well | |
2918 | * as attempts to modify PMC[0-3] which are used | |
2919 | * as status registers by the PMU | |
2920 | */ | |
2921 | if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) { | |
2922 | DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type)); | |
2923 | goto error; | |
2924 | } | |
2925 | wr_func = pmu_conf->pmc_desc[cnum].write_check; | |
2926 | /* | |
2927 | * If the PMC is a monitor, then if the value is not the default: | |
2928 | * - system-wide session: PMCx.pm=1 (privileged monitor) | |
2929 | * - per-task : PMCx.pm=0 (user monitor) | |
2930 | */ | |
2931 | if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) { | |
2932 | DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n", | |
2933 | cnum, | |
2934 | pmc_pm, | |
2935 | is_system)); | |
2936 | goto error; | |
2937 | } | |
2938 | ||
2939 | if (is_counting) { | |
2940 | /* | |
2941 | * enforce generation of overflow interrupt. Necessary on all | |
2942 | * CPUs. | |
2943 | */ | |
2944 | value |= 1 << PMU_PMC_OI; | |
2945 | ||
2946 | if (reg_flags & PFM_REGFL_OVFL_NOTIFY) { | |
2947 | flags |= PFM_REGFL_OVFL_NOTIFY; | |
2948 | } | |
2949 | ||
2950 | if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM; | |
2951 | ||
2952 | /* verify validity of smpl_pmds */ | |
2953 | if ((smpl_pmds & impl_pmds) != smpl_pmds) { | |
2954 | DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum)); | |
2955 | goto error; | |
2956 | } | |
2957 | ||
2958 | /* verify validity of reset_pmds */ | |
2959 | if ((reset_pmds & impl_pmds) != reset_pmds) { | |
2960 | DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum)); | |
2961 | goto error; | |
2962 | } | |
2963 | } else { | |
2964 | if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) { | |
2965 | DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum)); | |
2966 | goto error; | |
2967 | } | |
2968 | /* eventid on non-counting monitors are ignored */ | |
2969 | } | |
2970 | ||
2971 | /* | |
2972 | * execute write checker, if any | |
2973 | */ | |
2974 | if (likely(expert_mode == 0 && wr_func)) { | |
2975 | ret = (*wr_func)(task, ctx, cnum, &value, regs); | |
2976 | if (ret) goto error; | |
2977 | ret = -EINVAL; | |
2978 | } | |
2979 | ||
2980 | /* | |
2981 | * no error on this register | |
2982 | */ | |
2983 | PFM_REG_RETFLAG_SET(req->reg_flags, 0); | |
2984 | ||
2985 | /* | |
2986 | * Now we commit the changes to the software state | |
2987 | */ | |
2988 | ||
2989 | /* | |
2990 | * update overflow information | |
2991 | */ | |
2992 | if (is_counting) { | |
2993 | /* | |
2994 | * full flag update each time a register is programmed | |
2995 | */ | |
2996 | ctx->ctx_pmds[cnum].flags = flags; | |
2997 | ||
2998 | ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds; | |
2999 | ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds; | |
3000 | ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid; | |
3001 | ||
3002 | /* | |
3003 | * Mark all PMDS to be accessed as used. | |
3004 | * | |
3005 | * We do not keep track of PMC because we have to | |
3006 | * systematically restore ALL of them. | |
3007 | * | |
3008 | * We do not update the used_monitors mask, because | |
3009 | * if we have not programmed them, then will be in | |
3010 | * a quiescent state, therefore we will not need to | |
3011 | * mask/restore then when context is MASKED. | |
3012 | */ | |
3013 | CTX_USED_PMD(ctx, reset_pmds); | |
3014 | CTX_USED_PMD(ctx, smpl_pmds); | |
3015 | /* | |
3016 | * make sure we do not try to reset on | |
3017 | * restart because we have established new values | |
3018 | */ | |
3019 | if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum; | |
3020 | } | |
3021 | /* | |
3022 | * Needed in case the user does not initialize the equivalent | |
3023 | * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no | |
3024 | * possible leak here. | |
3025 | */ | |
3026 | CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]); | |
3027 | ||
3028 | /* | |
3029 | * keep track of the monitor PMC that we are using. | |
3030 | * we save the value of the pmc in ctx_pmcs[] and if | |
3031 | * the monitoring is not stopped for the context we also | |
3032 | * place it in the saved state area so that it will be | |
3033 | * picked up later by the context switch code. | |
3034 | * | |
3035 | * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs(). | |
3036 | * | |
35589a8f | 3037 | * The value in th_pmcs[] may be modified on overflow, i.e., when |
1da177e4 LT |
3038 | * monitoring needs to be stopped. |
3039 | */ | |
3040 | if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum); | |
3041 | ||
3042 | /* | |
3043 | * update context state | |
3044 | */ | |
3045 | ctx->ctx_pmcs[cnum] = value; | |
3046 | ||
3047 | if (is_loaded) { | |
3048 | /* | |
3049 | * write thread state | |
3050 | */ | |
35589a8f | 3051 | if (is_system == 0) ctx->th_pmcs[cnum] = value; |
1da177e4 LT |
3052 | |
3053 | /* | |
3054 | * write hardware register if we can | |
3055 | */ | |
3056 | if (can_access_pmu) { | |
3057 | ia64_set_pmc(cnum, value); | |
3058 | } | |
3059 | #ifdef CONFIG_SMP | |
3060 | else { | |
3061 | /* | |
3062 | * per-task SMP only here | |
3063 | * | |
3064 | * we are guaranteed that the task is not running on the other CPU, | |
3065 | * we indicate that this PMD will need to be reloaded if the task | |
3066 | * is rescheduled on the CPU it ran last on. | |
3067 | */ | |
3068 | ctx->ctx_reload_pmcs[0] |= 1UL << cnum; | |
3069 | } | |
3070 | #endif | |
3071 | } | |
3072 | ||
3073 | DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n", | |
3074 | cnum, | |
3075 | value, | |
3076 | is_loaded, | |
3077 | can_access_pmu, | |
3078 | flags, | |
3079 | ctx->ctx_all_pmcs[0], | |
3080 | ctx->ctx_used_pmds[0], | |
3081 | ctx->ctx_pmds[cnum].eventid, | |
3082 | smpl_pmds, | |
3083 | reset_pmds, | |
3084 | ctx->ctx_reload_pmcs[0], | |
3085 | ctx->ctx_used_monitors[0], | |
3086 | ctx->ctx_ovfl_regs[0])); | |
3087 | } | |
3088 | ||
3089 | /* | |
3090 | * make sure the changes are visible | |
3091 | */ | |
3092 | if (can_access_pmu) ia64_srlz_d(); | |
3093 | ||
3094 | return 0; | |
3095 | error: | |
3096 | PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); | |
3097 | return ret; | |
3098 | } | |
3099 | ||
3100 | static int | |
3101 | pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3102 | { | |
1da177e4 LT |
3103 | struct task_struct *task; |
3104 | pfarg_reg_t *req = (pfarg_reg_t *)arg; | |
3105 | unsigned long value, hw_value, ovfl_mask; | |
3106 | unsigned int cnum; | |
3107 | int i, can_access_pmu = 0, state; | |
3108 | int is_counting, is_loaded, is_system, expert_mode; | |
3109 | int ret = -EINVAL; | |
3110 | pfm_reg_check_t wr_func; | |
3111 | ||
3112 | ||
3113 | state = ctx->ctx_state; | |
3114 | is_loaded = state == PFM_CTX_LOADED ? 1 : 0; | |
3115 | is_system = ctx->ctx_fl_system; | |
3116 | ovfl_mask = pmu_conf->ovfl_val; | |
3117 | task = ctx->ctx_task; | |
3118 | ||
3119 | if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL; | |
3120 | ||
3121 | /* | |
3122 | * on both UP and SMP, we can only write to the PMC when the task is | |
3123 | * the owner of the local PMU. | |
3124 | */ | |
3125 | if (likely(is_loaded)) { | |
1da177e4 LT |
3126 | /* |
3127 | * In system wide and when the context is loaded, access can only happen | |
3128 | * when the caller is running on the CPU being monitored by the session. | |
3129 | * It does not have to be the owner (ctx_task) of the context per se. | |
3130 | */ | |
3131 | if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { | |
3132 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
3133 | return -EBUSY; | |
3134 | } | |
3135 | can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; | |
3136 | } | |
3137 | expert_mode = pfm_sysctl.expert_mode; | |
3138 | ||
3139 | for (i = 0; i < count; i++, req++) { | |
3140 | ||
3141 | cnum = req->reg_num; | |
3142 | value = req->reg_value; | |
3143 | ||
3144 | if (!PMD_IS_IMPL(cnum)) { | |
3145 | DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum)); | |
3146 | goto abort_mission; | |
3147 | } | |
3148 | is_counting = PMD_IS_COUNTING(cnum); | |
3149 | wr_func = pmu_conf->pmd_desc[cnum].write_check; | |
3150 | ||
3151 | /* | |
3152 | * execute write checker, if any | |
3153 | */ | |
3154 | if (unlikely(expert_mode == 0 && wr_func)) { | |
3155 | unsigned long v = value; | |
3156 | ||
3157 | ret = (*wr_func)(task, ctx, cnum, &v, regs); | |
3158 | if (ret) goto abort_mission; | |
3159 | ||
3160 | value = v; | |
3161 | ret = -EINVAL; | |
3162 | } | |
3163 | ||
3164 | /* | |
3165 | * no error on this register | |
3166 | */ | |
3167 | PFM_REG_RETFLAG_SET(req->reg_flags, 0); | |
3168 | ||
3169 | /* | |
3170 | * now commit changes to software state | |
3171 | */ | |
3172 | hw_value = value; | |
3173 | ||
3174 | /* | |
3175 | * update virtualized (64bits) counter | |
3176 | */ | |
3177 | if (is_counting) { | |
3178 | /* | |
3179 | * write context state | |
3180 | */ | |
3181 | ctx->ctx_pmds[cnum].lval = value; | |
3182 | ||
3183 | /* | |
3184 | * when context is load we use the split value | |
3185 | */ | |
3186 | if (is_loaded) { | |
3187 | hw_value = value & ovfl_mask; | |
3188 | value = value & ~ovfl_mask; | |
3189 | } | |
3190 | } | |
3191 | /* | |
3192 | * update reset values (not just for counters) | |
3193 | */ | |
3194 | ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset; | |
3195 | ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset; | |
3196 | ||
3197 | /* | |
3198 | * update randomization parameters (not just for counters) | |
3199 | */ | |
3200 | ctx->ctx_pmds[cnum].seed = req->reg_random_seed; | |
3201 | ctx->ctx_pmds[cnum].mask = req->reg_random_mask; | |
3202 | ||
3203 | /* | |
3204 | * update context value | |
3205 | */ | |
3206 | ctx->ctx_pmds[cnum].val = value; | |
3207 | ||
3208 | /* | |
3209 | * Keep track of what we use | |
3210 | * | |
3211 | * We do not keep track of PMC because we have to | |
3212 | * systematically restore ALL of them. | |
3213 | */ | |
3214 | CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum)); | |
3215 | ||
3216 | /* | |
3217 | * mark this PMD register used as well | |
3218 | */ | |
3219 | CTX_USED_PMD(ctx, RDEP(cnum)); | |
3220 | ||
3221 | /* | |
3222 | * make sure we do not try to reset on | |
3223 | * restart because we have established new values | |
3224 | */ | |
3225 | if (is_counting && state == PFM_CTX_MASKED) { | |
3226 | ctx->ctx_ovfl_regs[0] &= ~1UL << cnum; | |
3227 | } | |
3228 | ||
3229 | if (is_loaded) { | |
3230 | /* | |
3231 | * write thread state | |
3232 | */ | |
35589a8f | 3233 | if (is_system == 0) ctx->th_pmds[cnum] = hw_value; |
1da177e4 LT |
3234 | |
3235 | /* | |
3236 | * write hardware register if we can | |
3237 | */ | |
3238 | if (can_access_pmu) { | |
3239 | ia64_set_pmd(cnum, hw_value); | |
3240 | } else { | |
3241 | #ifdef CONFIG_SMP | |
3242 | /* | |
3243 | * we are guaranteed that the task is not running on the other CPU, | |
3244 | * we indicate that this PMD will need to be reloaded if the task | |
3245 | * is rescheduled on the CPU it ran last on. | |
3246 | */ | |
3247 | ctx->ctx_reload_pmds[0] |= 1UL << cnum; | |
3248 | #endif | |
3249 | } | |
3250 | } | |
3251 | ||
3252 | DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx " | |
3253 | "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n", | |
3254 | cnum, | |
3255 | value, | |
3256 | is_loaded, | |
3257 | can_access_pmu, | |
3258 | hw_value, | |
3259 | ctx->ctx_pmds[cnum].val, | |
3260 | ctx->ctx_pmds[cnum].short_reset, | |
3261 | ctx->ctx_pmds[cnum].long_reset, | |
3262 | PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N', | |
3263 | ctx->ctx_pmds[cnum].seed, | |
3264 | ctx->ctx_pmds[cnum].mask, | |
3265 | ctx->ctx_used_pmds[0], | |
3266 | ctx->ctx_pmds[cnum].reset_pmds[0], | |
3267 | ctx->ctx_reload_pmds[0], | |
3268 | ctx->ctx_all_pmds[0], | |
3269 | ctx->ctx_ovfl_regs[0])); | |
3270 | } | |
3271 | ||
3272 | /* | |
3273 | * make changes visible | |
3274 | */ | |
3275 | if (can_access_pmu) ia64_srlz_d(); | |
3276 | ||
3277 | return 0; | |
3278 | ||
3279 | abort_mission: | |
3280 | /* | |
3281 | * for now, we have only one possibility for error | |
3282 | */ | |
3283 | PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); | |
3284 | return ret; | |
3285 | } | |
3286 | ||
3287 | /* | |
3288 | * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function. | |
3289 | * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an | |
3290 | * interrupt is delivered during the call, it will be kept pending until we leave, making | |
3291 | * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are | |
3292 | * guaranteed to return consistent data to the user, it may simply be old. It is not | |
3293 | * trivial to treat the overflow while inside the call because you may end up in | |
3294 | * some module sampling buffer code causing deadlocks. | |
3295 | */ | |
3296 | static int | |
3297 | pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3298 | { | |
1da177e4 LT |
3299 | struct task_struct *task; |
3300 | unsigned long val = 0UL, lval, ovfl_mask, sval; | |
3301 | pfarg_reg_t *req = (pfarg_reg_t *)arg; | |
3302 | unsigned int cnum, reg_flags = 0; | |
3303 | int i, can_access_pmu = 0, state; | |
3304 | int is_loaded, is_system, is_counting, expert_mode; | |
3305 | int ret = -EINVAL; | |
3306 | pfm_reg_check_t rd_func; | |
3307 | ||
3308 | /* | |
3309 | * access is possible when loaded only for | |
3310 | * self-monitoring tasks or in UP mode | |
3311 | */ | |
3312 | ||
3313 | state = ctx->ctx_state; | |
3314 | is_loaded = state == PFM_CTX_LOADED ? 1 : 0; | |
3315 | is_system = ctx->ctx_fl_system; | |
3316 | ovfl_mask = pmu_conf->ovfl_val; | |
3317 | task = ctx->ctx_task; | |
3318 | ||
3319 | if (state == PFM_CTX_ZOMBIE) return -EINVAL; | |
3320 | ||
3321 | if (likely(is_loaded)) { | |
1da177e4 LT |
3322 | /* |
3323 | * In system wide and when the context is loaded, access can only happen | |
3324 | * when the caller is running on the CPU being monitored by the session. | |
3325 | * It does not have to be the owner (ctx_task) of the context per se. | |
3326 | */ | |
3327 | if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { | |
3328 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
3329 | return -EBUSY; | |
3330 | } | |
3331 | /* | |
3332 | * this can be true when not self-monitoring only in UP | |
3333 | */ | |
3334 | can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; | |
3335 | ||
3336 | if (can_access_pmu) ia64_srlz_d(); | |
3337 | } | |
3338 | expert_mode = pfm_sysctl.expert_mode; | |
3339 | ||
3340 | DPRINT(("ld=%d apmu=%d ctx_state=%d\n", | |
3341 | is_loaded, | |
3342 | can_access_pmu, | |
3343 | state)); | |
3344 | ||
3345 | /* | |
3346 | * on both UP and SMP, we can only read the PMD from the hardware register when | |
3347 | * the task is the owner of the local PMU. | |
3348 | */ | |
3349 | ||
3350 | for (i = 0; i < count; i++, req++) { | |
3351 | ||
3352 | cnum = req->reg_num; | |
3353 | reg_flags = req->reg_flags; | |
3354 | ||
3355 | if (unlikely(!PMD_IS_IMPL(cnum))) goto error; | |
3356 | /* | |
3357 | * we can only read the register that we use. That includes | |
3358 | * the one we explicitely initialize AND the one we want included | |
3359 | * in the sampling buffer (smpl_regs). | |
3360 | * | |
3361 | * Having this restriction allows optimization in the ctxsw routine | |
3362 | * without compromising security (leaks) | |
3363 | */ | |
3364 | if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error; | |
3365 | ||
3366 | sval = ctx->ctx_pmds[cnum].val; | |
3367 | lval = ctx->ctx_pmds[cnum].lval; | |
3368 | is_counting = PMD_IS_COUNTING(cnum); | |
3369 | ||
3370 | /* | |
3371 | * If the task is not the current one, then we check if the | |
3372 | * PMU state is still in the local live register due to lazy ctxsw. | |
3373 | * If true, then we read directly from the registers. | |
3374 | */ | |
3375 | if (can_access_pmu){ | |
3376 | val = ia64_get_pmd(cnum); | |
3377 | } else { | |
3378 | /* | |
3379 | * context has been saved | |
3380 | * if context is zombie, then task does not exist anymore. | |
3381 | * In this case, we use the full value saved in the context (pfm_flush_regs()). | |
3382 | */ | |
35589a8f | 3383 | val = is_loaded ? ctx->th_pmds[cnum] : 0UL; |
1da177e4 LT |
3384 | } |
3385 | rd_func = pmu_conf->pmd_desc[cnum].read_check; | |
3386 | ||
3387 | if (is_counting) { | |
3388 | /* | |
3389 | * XXX: need to check for overflow when loaded | |
3390 | */ | |
3391 | val &= ovfl_mask; | |
3392 | val += sval; | |
3393 | } | |
3394 | ||
3395 | /* | |
3396 | * execute read checker, if any | |
3397 | */ | |
3398 | if (unlikely(expert_mode == 0 && rd_func)) { | |
3399 | unsigned long v = val; | |
3400 | ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs); | |
3401 | if (ret) goto error; | |
3402 | val = v; | |
3403 | ret = -EINVAL; | |
3404 | } | |
3405 | ||
3406 | PFM_REG_RETFLAG_SET(reg_flags, 0); | |
3407 | ||
3408 | DPRINT(("pmd[%u]=0x%lx\n", cnum, val)); | |
3409 | ||
3410 | /* | |
3411 | * update register return value, abort all if problem during copy. | |
3412 | * we only modify the reg_flags field. no check mode is fine because | |
3413 | * access has been verified upfront in sys_perfmonctl(). | |
3414 | */ | |
3415 | req->reg_value = val; | |
3416 | req->reg_flags = reg_flags; | |
3417 | req->reg_last_reset_val = lval; | |
3418 | } | |
3419 | ||
3420 | return 0; | |
3421 | ||
3422 | error: | |
3423 | PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); | |
3424 | return ret; | |
3425 | } | |
3426 | ||
3427 | int | |
3428 | pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) | |
3429 | { | |
3430 | pfm_context_t *ctx; | |
3431 | ||
3432 | if (req == NULL) return -EINVAL; | |
3433 | ||
3434 | ctx = GET_PMU_CTX(); | |
3435 | ||
3436 | if (ctx == NULL) return -EINVAL; | |
3437 | ||
3438 | /* | |
3439 | * for now limit to current task, which is enough when calling | |
3440 | * from overflow handler | |
3441 | */ | |
3442 | if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; | |
3443 | ||
3444 | return pfm_write_pmcs(ctx, req, nreq, regs); | |
3445 | } | |
3446 | EXPORT_SYMBOL(pfm_mod_write_pmcs); | |
3447 | ||
3448 | int | |
3449 | pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) | |
3450 | { | |
3451 | pfm_context_t *ctx; | |
3452 | ||
3453 | if (req == NULL) return -EINVAL; | |
3454 | ||
3455 | ctx = GET_PMU_CTX(); | |
3456 | ||
3457 | if (ctx == NULL) return -EINVAL; | |
3458 | ||
3459 | /* | |
3460 | * for now limit to current task, which is enough when calling | |
3461 | * from overflow handler | |
3462 | */ | |
3463 | if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; | |
3464 | ||
3465 | return pfm_read_pmds(ctx, req, nreq, regs); | |
3466 | } | |
3467 | EXPORT_SYMBOL(pfm_mod_read_pmds); | |
3468 | ||
3469 | /* | |
3470 | * Only call this function when a process it trying to | |
3471 | * write the debug registers (reading is always allowed) | |
3472 | */ | |
3473 | int | |
3474 | pfm_use_debug_registers(struct task_struct *task) | |
3475 | { | |
3476 | pfm_context_t *ctx = task->thread.pfm_context; | |
3477 | unsigned long flags; | |
3478 | int ret = 0; | |
3479 | ||
3480 | if (pmu_conf->use_rr_dbregs == 0) return 0; | |
3481 | ||
3482 | DPRINT(("called for [%d]\n", task->pid)); | |
3483 | ||
3484 | /* | |
3485 | * do it only once | |
3486 | */ | |
3487 | if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0; | |
3488 | ||
3489 | /* | |
3490 | * Even on SMP, we do not need to use an atomic here because | |
3491 | * the only way in is via ptrace() and this is possible only when the | |
3492 | * process is stopped. Even in the case where the ctxsw out is not totally | |
3493 | * completed by the time we come here, there is no way the 'stopped' process | |
3494 | * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine. | |
3495 | * So this is always safe. | |
3496 | */ | |
3497 | if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1; | |
3498 | ||
3499 | LOCK_PFS(flags); | |
3500 | ||
3501 | /* | |
3502 | * We cannot allow setting breakpoints when system wide monitoring | |
3503 | * sessions are using the debug registers. | |
3504 | */ | |
3505 | if (pfm_sessions.pfs_sys_use_dbregs> 0) | |
3506 | ret = -1; | |
3507 | else | |
3508 | pfm_sessions.pfs_ptrace_use_dbregs++; | |
3509 | ||
3510 | DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n", | |
3511 | pfm_sessions.pfs_ptrace_use_dbregs, | |
3512 | pfm_sessions.pfs_sys_use_dbregs, | |
3513 | task->pid, ret)); | |
3514 | ||
3515 | UNLOCK_PFS(flags); | |
3516 | ||
3517 | return ret; | |
3518 | } | |
3519 | ||
3520 | /* | |
3521 | * This function is called for every task that exits with the | |
3522 | * IA64_THREAD_DBG_VALID set. This indicates a task which was | |
3523 | * able to use the debug registers for debugging purposes via | |
3524 | * ptrace(). Therefore we know it was not using them for | |
3525 | * perfmormance monitoring, so we only decrement the number | |
3526 | * of "ptraced" debug register users to keep the count up to date | |
3527 | */ | |
3528 | int | |
3529 | pfm_release_debug_registers(struct task_struct *task) | |
3530 | { | |
3531 | unsigned long flags; | |
3532 | int ret; | |
3533 | ||
3534 | if (pmu_conf->use_rr_dbregs == 0) return 0; | |
3535 | ||
3536 | LOCK_PFS(flags); | |
3537 | if (pfm_sessions.pfs_ptrace_use_dbregs == 0) { | |
3538 | printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid); | |
3539 | ret = -1; | |
3540 | } else { | |
3541 | pfm_sessions.pfs_ptrace_use_dbregs--; | |
3542 | ret = 0; | |
3543 | } | |
3544 | UNLOCK_PFS(flags); | |
3545 | ||
3546 | return ret; | |
3547 | } | |
3548 | ||
3549 | static int | |
3550 | pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3551 | { | |
3552 | struct task_struct *task; | |
3553 | pfm_buffer_fmt_t *fmt; | |
3554 | pfm_ovfl_ctrl_t rst_ctrl; | |
3555 | int state, is_system; | |
3556 | int ret = 0; | |
3557 | ||
3558 | state = ctx->ctx_state; | |
3559 | fmt = ctx->ctx_buf_fmt; | |
3560 | is_system = ctx->ctx_fl_system; | |
3561 | task = PFM_CTX_TASK(ctx); | |
3562 | ||
3563 | switch(state) { | |
3564 | case PFM_CTX_MASKED: | |
3565 | break; | |
3566 | case PFM_CTX_LOADED: | |
3567 | if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break; | |
3568 | /* fall through */ | |
3569 | case PFM_CTX_UNLOADED: | |
3570 | case PFM_CTX_ZOMBIE: | |
3571 | DPRINT(("invalid state=%d\n", state)); | |
3572 | return -EBUSY; | |
3573 | default: | |
3574 | DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state)); | |
3575 | return -EINVAL; | |
3576 | } | |
3577 | ||
3578 | /* | |
3579 | * In system wide and when the context is loaded, access can only happen | |
3580 | * when the caller is running on the CPU being monitored by the session. | |
3581 | * It does not have to be the owner (ctx_task) of the context per se. | |
3582 | */ | |
3583 | if (is_system && ctx->ctx_cpu != smp_processor_id()) { | |
3584 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
3585 | return -EBUSY; | |
3586 | } | |
3587 | ||
3588 | /* sanity check */ | |
3589 | if (unlikely(task == NULL)) { | |
3590 | printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid); | |
3591 | return -EINVAL; | |
3592 | } | |
3593 | ||
3594 | if (task == current || is_system) { | |
3595 | ||
3596 | fmt = ctx->ctx_buf_fmt; | |
3597 | ||
3598 | DPRINT(("restarting self %d ovfl=0x%lx\n", | |
3599 | task->pid, | |
3600 | ctx->ctx_ovfl_regs[0])); | |
3601 | ||
3602 | if (CTX_HAS_SMPL(ctx)) { | |
3603 | ||
3604 | prefetch(ctx->ctx_smpl_hdr); | |
3605 | ||
3606 | rst_ctrl.bits.mask_monitoring = 0; | |
3607 | rst_ctrl.bits.reset_ovfl_pmds = 0; | |
3608 | ||
3609 | if (state == PFM_CTX_LOADED) | |
3610 | ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); | |
3611 | else | |
3612 | ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); | |
3613 | } else { | |
3614 | rst_ctrl.bits.mask_monitoring = 0; | |
3615 | rst_ctrl.bits.reset_ovfl_pmds = 1; | |
3616 | } | |
3617 | ||
3618 | if (ret == 0) { | |
3619 | if (rst_ctrl.bits.reset_ovfl_pmds) | |
3620 | pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET); | |
3621 | ||
3622 | if (rst_ctrl.bits.mask_monitoring == 0) { | |
3623 | DPRINT(("resuming monitoring for [%d]\n", task->pid)); | |
3624 | ||
3625 | if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task); | |
3626 | } else { | |
3627 | DPRINT(("keeping monitoring stopped for [%d]\n", task->pid)); | |
3628 | ||
3629 | // cannot use pfm_stop_monitoring(task, regs); | |
3630 | } | |
3631 | } | |
3632 | /* | |
3633 | * clear overflowed PMD mask to remove any stale information | |
3634 | */ | |
3635 | ctx->ctx_ovfl_regs[0] = 0UL; | |
3636 | ||
3637 | /* | |
3638 | * back to LOADED state | |
3639 | */ | |
3640 | ctx->ctx_state = PFM_CTX_LOADED; | |
3641 | ||
3642 | /* | |
3643 | * XXX: not really useful for self monitoring | |
3644 | */ | |
3645 | ctx->ctx_fl_can_restart = 0; | |
3646 | ||
3647 | return 0; | |
3648 | } | |
3649 | ||
3650 | /* | |
3651 | * restart another task | |
3652 | */ | |
3653 | ||
3654 | /* | |
3655 | * When PFM_CTX_MASKED, we cannot issue a restart before the previous | |
3656 | * one is seen by the task. | |
3657 | */ | |
3658 | if (state == PFM_CTX_MASKED) { | |
3659 | if (ctx->ctx_fl_can_restart == 0) return -EINVAL; | |
3660 | /* | |
3661 | * will prevent subsequent restart before this one is | |
3662 | * seen by other task | |
3663 | */ | |
3664 | ctx->ctx_fl_can_restart = 0; | |
3665 | } | |
3666 | ||
3667 | /* | |
3668 | * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e. | |
3669 | * the task is blocked or on its way to block. That's the normal | |
3670 | * restart path. If the monitoring is not masked, then the task | |
3671 | * can be actively monitoring and we cannot directly intervene. | |
3672 | * Therefore we use the trap mechanism to catch the task and | |
3673 | * force it to reset the buffer/reset PMDs. | |
3674 | * | |
3675 | * if non-blocking, then we ensure that the task will go into | |
3676 | * pfm_handle_work() before returning to user mode. | |
3677 | * | |
3678 | * We cannot explicitely reset another task, it MUST always | |
3679 | * be done by the task itself. This works for system wide because | |
3680 | * the tool that is controlling the session is logically doing | |
3681 | * "self-monitoring". | |
3682 | */ | |
3683 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { | |
3684 | DPRINT(("unblocking [%d] \n", task->pid)); | |
60f1c444 | 3685 | complete(&ctx->ctx_restart_done); |
1da177e4 LT |
3686 | } else { |
3687 | DPRINT(("[%d] armed exit trap\n", task->pid)); | |
3688 | ||
3689 | ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET; | |
3690 | ||
3691 | PFM_SET_WORK_PENDING(task, 1); | |
3692 | ||
3693 | pfm_set_task_notify(task); | |
3694 | ||
3695 | /* | |
3696 | * XXX: send reschedule if task runs on another CPU | |
3697 | */ | |
3698 | } | |
3699 | return 0; | |
3700 | } | |
3701 | ||
3702 | static int | |
3703 | pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3704 | { | |
3705 | unsigned int m = *(unsigned int *)arg; | |
3706 | ||
3707 | pfm_sysctl.debug = m == 0 ? 0 : 1; | |
3708 | ||
1da177e4 LT |
3709 | printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); |
3710 | ||
3711 | if (m == 0) { | |
3712 | memset(pfm_stats, 0, sizeof(pfm_stats)); | |
3713 | for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL; | |
3714 | } | |
3715 | return 0; | |
3716 | } | |
3717 | ||
3718 | /* | |
3719 | * arg can be NULL and count can be zero for this function | |
3720 | */ | |
3721 | static int | |
3722 | pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3723 | { | |
3724 | struct thread_struct *thread = NULL; | |
3725 | struct task_struct *task; | |
3726 | pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg; | |
3727 | unsigned long flags; | |
3728 | dbreg_t dbreg; | |
3729 | unsigned int rnum; | |
3730 | int first_time; | |
3731 | int ret = 0, state; | |
3732 | int i, can_access_pmu = 0; | |
3733 | int is_system, is_loaded; | |
3734 | ||
3735 | if (pmu_conf->use_rr_dbregs == 0) return -EINVAL; | |
3736 | ||
3737 | state = ctx->ctx_state; | |
3738 | is_loaded = state == PFM_CTX_LOADED ? 1 : 0; | |
3739 | is_system = ctx->ctx_fl_system; | |
3740 | task = ctx->ctx_task; | |
3741 | ||
3742 | if (state == PFM_CTX_ZOMBIE) return -EINVAL; | |
3743 | ||
3744 | /* | |
3745 | * on both UP and SMP, we can only write to the PMC when the task is | |
3746 | * the owner of the local PMU. | |
3747 | */ | |
3748 | if (is_loaded) { | |
3749 | thread = &task->thread; | |
3750 | /* | |
3751 | * In system wide and when the context is loaded, access can only happen | |
3752 | * when the caller is running on the CPU being monitored by the session. | |
3753 | * It does not have to be the owner (ctx_task) of the context per se. | |
3754 | */ | |
3755 | if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { | |
3756 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
3757 | return -EBUSY; | |
3758 | } | |
3759 | can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; | |
3760 | } | |
3761 | ||
3762 | /* | |
3763 | * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w | |
3764 | * ensuring that no real breakpoint can be installed via this call. | |
3765 | * | |
3766 | * IMPORTANT: regs can be NULL in this function | |
3767 | */ | |
3768 | ||
3769 | first_time = ctx->ctx_fl_using_dbreg == 0; | |
3770 | ||
3771 | /* | |
3772 | * don't bother if we are loaded and task is being debugged | |
3773 | */ | |
3774 | if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) { | |
3775 | DPRINT(("debug registers already in use for [%d]\n", task->pid)); | |
3776 | return -EBUSY; | |
3777 | } | |
3778 | ||
3779 | /* | |
3780 | * check for debug registers in system wide mode | |
3781 | * | |
3782 | * If though a check is done in pfm_context_load(), | |
3783 | * we must repeat it here, in case the registers are | |
3784 | * written after the context is loaded | |
3785 | */ | |
3786 | if (is_loaded) { | |
3787 | LOCK_PFS(flags); | |
3788 | ||
3789 | if (first_time && is_system) { | |
3790 | if (pfm_sessions.pfs_ptrace_use_dbregs) | |
3791 | ret = -EBUSY; | |
3792 | else | |
3793 | pfm_sessions.pfs_sys_use_dbregs++; | |
3794 | } | |
3795 | UNLOCK_PFS(flags); | |
3796 | } | |
3797 | ||
3798 | if (ret != 0) return ret; | |
3799 | ||
3800 | /* | |
3801 | * mark ourself as user of the debug registers for | |
3802 | * perfmon purposes. | |
3803 | */ | |
3804 | ctx->ctx_fl_using_dbreg = 1; | |
3805 | ||
3806 | /* | |
3807 | * clear hardware registers to make sure we don't | |
3808 | * pick up stale state. | |
3809 | * | |
3810 | * for a system wide session, we do not use | |
3811 | * thread.dbr, thread.ibr because this process | |
3812 | * never leaves the current CPU and the state | |
3813 | * is shared by all processes running on it | |
3814 | */ | |
3815 | if (first_time && can_access_pmu) { | |
3816 | DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid)); | |
3817 | for (i=0; i < pmu_conf->num_ibrs; i++) { | |
3818 | ia64_set_ibr(i, 0UL); | |
3819 | ia64_dv_serialize_instruction(); | |
3820 | } | |
3821 | ia64_srlz_i(); | |
3822 | for (i=0; i < pmu_conf->num_dbrs; i++) { | |
3823 | ia64_set_dbr(i, 0UL); | |
3824 | ia64_dv_serialize_data(); | |
3825 | } | |
3826 | ia64_srlz_d(); | |
3827 | } | |
3828 | ||
3829 | /* | |
3830 | * Now install the values into the registers | |
3831 | */ | |
3832 | for (i = 0; i < count; i++, req++) { | |
3833 | ||
3834 | rnum = req->dbreg_num; | |
3835 | dbreg.val = req->dbreg_value; | |
3836 | ||
3837 | ret = -EINVAL; | |
3838 | ||
3839 | if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) { | |
3840 | DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n", | |
3841 | rnum, dbreg.val, mode, i, count)); | |
3842 | ||
3843 | goto abort_mission; | |
3844 | } | |
3845 | ||
3846 | /* | |
3847 | * make sure we do not install enabled breakpoint | |
3848 | */ | |
3849 | if (rnum & 0x1) { | |
3850 | if (mode == PFM_CODE_RR) | |
3851 | dbreg.ibr.ibr_x = 0; | |
3852 | else | |
3853 | dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0; | |
3854 | } | |
3855 | ||
3856 | PFM_REG_RETFLAG_SET(req->dbreg_flags, 0); | |
3857 | ||
3858 | /* | |
3859 | * Debug registers, just like PMC, can only be modified | |
3860 | * by a kernel call. Moreover, perfmon() access to those | |
3861 | * registers are centralized in this routine. The hardware | |
3862 | * does not modify the value of these registers, therefore, | |
3863 | * if we save them as they are written, we can avoid having | |
3864 | * to save them on context switch out. This is made possible | |
3865 | * by the fact that when perfmon uses debug registers, ptrace() | |
3866 | * won't be able to modify them concurrently. | |
3867 | */ | |
3868 | if (mode == PFM_CODE_RR) { | |
3869 | CTX_USED_IBR(ctx, rnum); | |
3870 | ||
3871 | if (can_access_pmu) { | |
3872 | ia64_set_ibr(rnum, dbreg.val); | |
3873 | ia64_dv_serialize_instruction(); | |
3874 | } | |
3875 | ||
3876 | ctx->ctx_ibrs[rnum] = dbreg.val; | |
3877 | ||
3878 | DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n", | |
3879 | rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu)); | |
3880 | } else { | |
3881 | CTX_USED_DBR(ctx, rnum); | |
3882 | ||
3883 | if (can_access_pmu) { | |
3884 | ia64_set_dbr(rnum, dbreg.val); | |
3885 | ia64_dv_serialize_data(); | |
3886 | } | |
3887 | ctx->ctx_dbrs[rnum] = dbreg.val; | |
3888 | ||
3889 | DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n", | |
3890 | rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu)); | |
3891 | } | |
3892 | } | |
3893 | ||
3894 | return 0; | |
3895 | ||
3896 | abort_mission: | |
3897 | /* | |
3898 | * in case it was our first attempt, we undo the global modifications | |
3899 | */ | |
3900 | if (first_time) { | |
3901 | LOCK_PFS(flags); | |
3902 | if (ctx->ctx_fl_system) { | |
3903 | pfm_sessions.pfs_sys_use_dbregs--; | |
3904 | } | |
3905 | UNLOCK_PFS(flags); | |
3906 | ctx->ctx_fl_using_dbreg = 0; | |
3907 | } | |
3908 | /* | |
3909 | * install error return flag | |
3910 | */ | |
3911 | PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL); | |
3912 | ||
3913 | return ret; | |
3914 | } | |
3915 | ||
3916 | static int | |
3917 | pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3918 | { | |
3919 | return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs); | |
3920 | } | |
3921 | ||
3922 | static int | |
3923 | pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3924 | { | |
3925 | return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs); | |
3926 | } | |
3927 | ||
3928 | int | |
3929 | pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) | |
3930 | { | |
3931 | pfm_context_t *ctx; | |
3932 | ||
3933 | if (req == NULL) return -EINVAL; | |
3934 | ||
3935 | ctx = GET_PMU_CTX(); | |
3936 | ||
3937 | if (ctx == NULL) return -EINVAL; | |
3938 | ||
3939 | /* | |
3940 | * for now limit to current task, which is enough when calling | |
3941 | * from overflow handler | |
3942 | */ | |
3943 | if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; | |
3944 | ||
3945 | return pfm_write_ibrs(ctx, req, nreq, regs); | |
3946 | } | |
3947 | EXPORT_SYMBOL(pfm_mod_write_ibrs); | |
3948 | ||
3949 | int | |
3950 | pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) | |
3951 | { | |
3952 | pfm_context_t *ctx; | |
3953 | ||
3954 | if (req == NULL) return -EINVAL; | |
3955 | ||
3956 | ctx = GET_PMU_CTX(); | |
3957 | ||
3958 | if (ctx == NULL) return -EINVAL; | |
3959 | ||
3960 | /* | |
3961 | * for now limit to current task, which is enough when calling | |
3962 | * from overflow handler | |
3963 | */ | |
3964 | if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; | |
3965 | ||
3966 | return pfm_write_dbrs(ctx, req, nreq, regs); | |
3967 | } | |
3968 | EXPORT_SYMBOL(pfm_mod_write_dbrs); | |
3969 | ||
3970 | ||
3971 | static int | |
3972 | pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3973 | { | |
3974 | pfarg_features_t *req = (pfarg_features_t *)arg; | |
3975 | ||
3976 | req->ft_version = PFM_VERSION; | |
3977 | return 0; | |
3978 | } | |
3979 | ||
3980 | static int | |
3981 | pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3982 | { | |
3983 | struct pt_regs *tregs; | |
3984 | struct task_struct *task = PFM_CTX_TASK(ctx); | |
3985 | int state, is_system; | |
3986 | ||
3987 | state = ctx->ctx_state; | |
3988 | is_system = ctx->ctx_fl_system; | |
3989 | ||
3990 | /* | |
3991 | * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE) | |
3992 | */ | |
3993 | if (state == PFM_CTX_UNLOADED) return -EINVAL; | |
3994 | ||
3995 | /* | |
3996 | * In system wide and when the context is loaded, access can only happen | |
3997 | * when the caller is running on the CPU being monitored by the session. | |
3998 | * It does not have to be the owner (ctx_task) of the context per se. | |
3999 | */ | |
4000 | if (is_system && ctx->ctx_cpu != smp_processor_id()) { | |
4001 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
4002 | return -EBUSY; | |
4003 | } | |
4004 | DPRINT(("task [%d] ctx_state=%d is_system=%d\n", | |
4005 | PFM_CTX_TASK(ctx)->pid, | |
4006 | state, | |
4007 | is_system)); | |
4008 | /* | |
4009 | * in system mode, we need to update the PMU directly | |
4010 | * and the user level state of the caller, which may not | |
4011 | * necessarily be the creator of the context. | |
4012 | */ | |
4013 | if (is_system) { | |
4014 | /* | |
4015 | * Update local PMU first | |
4016 | * | |
4017 | * disable dcr pp | |
4018 | */ | |
4019 | ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); | |
4020 | ia64_srlz_i(); | |
4021 | ||
4022 | /* | |
4023 | * update local cpuinfo | |
4024 | */ | |
4025 | PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP); | |
4026 | ||
4027 | /* | |
4028 | * stop monitoring, does srlz.i | |
4029 | */ | |
4030 | pfm_clear_psr_pp(); | |
4031 | ||
4032 | /* | |
4033 | * stop monitoring in the caller | |
4034 | */ | |
4035 | ia64_psr(regs)->pp = 0; | |
4036 | ||
4037 | return 0; | |
4038 | } | |
4039 | /* | |
4040 | * per-task mode | |
4041 | */ | |
4042 | ||
4043 | if (task == current) { | |
4044 | /* stop monitoring at kernel level */ | |
4045 | pfm_clear_psr_up(); | |
4046 | ||
4047 | /* | |
4048 | * stop monitoring at the user level | |
4049 | */ | |
4050 | ia64_psr(regs)->up = 0; | |
4051 | } else { | |
6450578f | 4052 | tregs = task_pt_regs(task); |
1da177e4 LT |
4053 | |
4054 | /* | |
4055 | * stop monitoring at the user level | |
4056 | */ | |
4057 | ia64_psr(tregs)->up = 0; | |
4058 | ||
4059 | /* | |
4060 | * monitoring disabled in kernel at next reschedule | |
4061 | */ | |
4062 | ctx->ctx_saved_psr_up = 0; | |
4063 | DPRINT(("task=[%d]\n", task->pid)); | |
4064 | } | |
4065 | return 0; | |
4066 | } | |
4067 | ||
4068 | ||
4069 | static int | |
4070 | pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
4071 | { | |
4072 | struct pt_regs *tregs; | |
4073 | int state, is_system; | |
4074 | ||
4075 | state = ctx->ctx_state; | |
4076 | is_system = ctx->ctx_fl_system; | |
4077 | ||
4078 | if (state != PFM_CTX_LOADED) return -EINVAL; | |
4079 | ||
4080 | /* | |
4081 | * In system wide and when the context is loaded, access can only happen | |
4082 | * when the caller is running on the CPU being monitored by the session. | |
4083 | * It does not have to be the owner (ctx_task) of the context per se. | |
4084 | */ | |
4085 | if (is_system && ctx->ctx_cpu != smp_processor_id()) { | |
4086 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
4087 | return -EBUSY; | |
4088 | } | |
4089 | ||
4090 | /* | |
4091 | * in system mode, we need to update the PMU directly | |
4092 | * and the user level state of the caller, which may not | |
4093 | * necessarily be the creator of the context. | |
4094 | */ | |
4095 | if (is_system) { | |
4096 | ||
4097 | /* | |
4098 | * set user level psr.pp for the caller | |
4099 | */ | |
4100 | ia64_psr(regs)->pp = 1; | |
4101 | ||
4102 | /* | |
4103 | * now update the local PMU and cpuinfo | |
4104 | */ | |
4105 | PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP); | |
4106 | ||
4107 | /* | |
4108 | * start monitoring at kernel level | |
4109 | */ | |
4110 | pfm_set_psr_pp(); | |
4111 | ||
4112 | /* enable dcr pp */ | |
4113 | ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); | |
4114 | ia64_srlz_i(); | |
4115 | ||
4116 | return 0; | |
4117 | } | |
4118 | ||
4119 | /* | |
4120 | * per-process mode | |
4121 | */ | |
4122 | ||
4123 | if (ctx->ctx_task == current) { | |
4124 | ||
4125 | /* start monitoring at kernel level */ | |
4126 | pfm_set_psr_up(); | |
4127 | ||
4128 | /* | |
4129 | * activate monitoring at user level | |
4130 | */ | |
4131 | ia64_psr(regs)->up = 1; | |
4132 | ||
4133 | } else { | |
6450578f | 4134 | tregs = task_pt_regs(ctx->ctx_task); |
1da177e4 LT |
4135 | |
4136 | /* | |
4137 | * start monitoring at the kernel level the next | |
4138 | * time the task is scheduled | |
4139 | */ | |
4140 | ctx->ctx_saved_psr_up = IA64_PSR_UP; | |
4141 | ||
4142 | /* | |
4143 | * activate monitoring at user level | |
4144 | */ | |
4145 | ia64_psr(tregs)->up = 1; | |
4146 | } | |
4147 | return 0; | |
4148 | } | |
4149 | ||
4150 | static int | |
4151 | pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
4152 | { | |
4153 | pfarg_reg_t *req = (pfarg_reg_t *)arg; | |
4154 | unsigned int cnum; | |
4155 | int i; | |
4156 | int ret = -EINVAL; | |
4157 | ||
4158 | for (i = 0; i < count; i++, req++) { | |
4159 | ||
4160 | cnum = req->reg_num; | |
4161 | ||
4162 | if (!PMC_IS_IMPL(cnum)) goto abort_mission; | |
4163 | ||
4164 | req->reg_value = PMC_DFL_VAL(cnum); | |
4165 | ||
4166 | PFM_REG_RETFLAG_SET(req->reg_flags, 0); | |
4167 | ||
4168 | DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value)); | |
4169 | } | |
4170 | return 0; | |
4171 | ||
4172 | abort_mission: | |
4173 | PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); | |
4174 | return ret; | |
4175 | } | |
4176 | ||
4177 | static int | |
4178 | pfm_check_task_exist(pfm_context_t *ctx) | |
4179 | { | |
4180 | struct task_struct *g, *t; | |
4181 | int ret = -ESRCH; | |
4182 | ||
4183 | read_lock(&tasklist_lock); | |
4184 | ||
4185 | do_each_thread (g, t) { | |
4186 | if (t->thread.pfm_context == ctx) { | |
4187 | ret = 0; | |
4188 | break; | |
4189 | } | |
4190 | } while_each_thread (g, t); | |
4191 | ||
4192 | read_unlock(&tasklist_lock); | |
4193 | ||
4194 | DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx)); | |
4195 | ||
4196 | return ret; | |
4197 | } | |
4198 | ||
4199 | static int | |
4200 | pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
4201 | { | |
4202 | struct task_struct *task; | |
4203 | struct thread_struct *thread; | |
4204 | struct pfm_context_t *old; | |
4205 | unsigned long flags; | |
4206 | #ifndef CONFIG_SMP | |
4207 | struct task_struct *owner_task = NULL; | |
4208 | #endif | |
4209 | pfarg_load_t *req = (pfarg_load_t *)arg; | |
4210 | unsigned long *pmcs_source, *pmds_source; | |
4211 | int the_cpu; | |
4212 | int ret = 0; | |
4213 | int state, is_system, set_dbregs = 0; | |
4214 | ||
4215 | state = ctx->ctx_state; | |
4216 | is_system = ctx->ctx_fl_system; | |
4217 | /* | |
4218 | * can only load from unloaded or terminated state | |
4219 | */ | |
4220 | if (state != PFM_CTX_UNLOADED) { | |
4221 | DPRINT(("cannot load to [%d], invalid ctx_state=%d\n", | |
4222 | req->load_pid, | |
4223 | ctx->ctx_state)); | |
a5a70b75 | 4224 | return -EBUSY; |
1da177e4 LT |
4225 | } |
4226 | ||
4227 | DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); | |
4228 | ||
4229 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) { | |
4230 | DPRINT(("cannot use blocking mode on self\n")); | |
4231 | return -EINVAL; | |
4232 | } | |
4233 | ||
4234 | ret = pfm_get_task(ctx, req->load_pid, &task); | |
4235 | if (ret) { | |
4236 | DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret)); | |
4237 | return ret; | |
4238 | } | |
4239 | ||
4240 | ret = -EINVAL; | |
4241 | ||
4242 | /* | |
4243 | * system wide is self monitoring only | |
4244 | */ | |
4245 | if (is_system && task != current) { | |
4246 | DPRINT(("system wide is self monitoring only load_pid=%d\n", | |
4247 | req->load_pid)); | |
4248 | goto error; | |
4249 | } | |
4250 | ||
4251 | thread = &task->thread; | |
4252 | ||
4253 | ret = 0; | |
4254 | /* | |
4255 | * cannot load a context which is using range restrictions, | |
4256 | * into a task that is being debugged. | |
4257 | */ | |
4258 | if (ctx->ctx_fl_using_dbreg) { | |
4259 | if (thread->flags & IA64_THREAD_DBG_VALID) { | |
4260 | ret = -EBUSY; | |
4261 | DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid)); | |
4262 | goto error; | |
4263 | } | |
4264 | LOCK_PFS(flags); | |
4265 | ||
4266 | if (is_system) { | |
4267 | if (pfm_sessions.pfs_ptrace_use_dbregs) { | |
4268 | DPRINT(("cannot load [%d] dbregs in use\n", task->pid)); | |
4269 | ret = -EBUSY; | |
4270 | } else { | |
4271 | pfm_sessions.pfs_sys_use_dbregs++; | |
4272 | DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs)); | |
4273 | set_dbregs = 1; | |
4274 | } | |
4275 | } | |
4276 | ||
4277 | UNLOCK_PFS(flags); | |
4278 | ||
4279 | if (ret) goto error; | |
4280 | } | |
4281 | ||
4282 | /* | |
4283 | * SMP system-wide monitoring implies self-monitoring. | |
4284 | * | |
4285 | * The programming model expects the task to | |
4286 | * be pinned on a CPU throughout the session. | |
4287 | * Here we take note of the current CPU at the | |
4288 | * time the context is loaded. No call from | |
4289 | * another CPU will be allowed. | |
4290 | * | |
4291 | * The pinning via shed_setaffinity() | |
4292 | * must be done by the calling task prior | |
4293 | * to this call. | |
4294 | * | |
4295 | * systemwide: keep track of CPU this session is supposed to run on | |
4296 | */ | |
4297 | the_cpu = ctx->ctx_cpu = smp_processor_id(); | |
4298 | ||
4299 | ret = -EBUSY; | |
4300 | /* | |
4301 | * now reserve the session | |
4302 | */ | |
4303 | ret = pfm_reserve_session(current, is_system, the_cpu); | |
4304 | if (ret) goto error; | |
4305 | ||
4306 | /* | |
4307 | * task is necessarily stopped at this point. | |
4308 | * | |
4309 | * If the previous context was zombie, then it got removed in | |
4310 | * pfm_save_regs(). Therefore we should not see it here. | |
4311 | * If we see a context, then this is an active context | |
4312 | * | |
4313 | * XXX: needs to be atomic | |
4314 | */ | |
4315 | DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n", | |
4316 | thread->pfm_context, ctx)); | |
4317 | ||
6bf11e8c | 4318 | ret = -EBUSY; |
1da177e4 LT |
4319 | old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *)); |
4320 | if (old != NULL) { | |
4321 | DPRINT(("load_pid [%d] already has a context\n", req->load_pid)); | |
4322 | goto error_unres; | |
4323 | } | |
4324 | ||
4325 | pfm_reset_msgq(ctx); | |
4326 | ||
4327 | ctx->ctx_state = PFM_CTX_LOADED; | |
4328 | ||
4329 | /* | |
4330 | * link context to task | |
4331 | */ | |
4332 | ctx->ctx_task = task; | |
4333 | ||
4334 | if (is_system) { | |
4335 | /* | |
4336 | * we load as stopped | |
4337 | */ | |
4338 | PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE); | |
4339 | PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP); | |
4340 | ||
4341 | if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE); | |
4342 | } else { | |
4343 | thread->flags |= IA64_THREAD_PM_VALID; | |
4344 | } | |
4345 | ||
4346 | /* | |
4347 | * propagate into thread-state | |
4348 | */ | |
4349 | pfm_copy_pmds(task, ctx); | |
4350 | pfm_copy_pmcs(task, ctx); | |
4351 | ||
35589a8f KA |
4352 | pmcs_source = ctx->th_pmcs; |
4353 | pmds_source = ctx->th_pmds; | |
1da177e4 LT |
4354 | |
4355 | /* | |
4356 | * always the case for system-wide | |
4357 | */ | |
4358 | if (task == current) { | |
4359 | ||
4360 | if (is_system == 0) { | |
4361 | ||
4362 | /* allow user level control */ | |
4363 | ia64_psr(regs)->sp = 0; | |
4364 | DPRINT(("clearing psr.sp for [%d]\n", task->pid)); | |
4365 | ||
4366 | SET_LAST_CPU(ctx, smp_processor_id()); | |
4367 | INC_ACTIVATION(); | |
4368 | SET_ACTIVATION(ctx); | |
4369 | #ifndef CONFIG_SMP | |
4370 | /* | |
4371 | * push the other task out, if any | |
4372 | */ | |
4373 | owner_task = GET_PMU_OWNER(); | |
4374 | if (owner_task) pfm_lazy_save_regs(owner_task); | |
4375 | #endif | |
4376 | } | |
4377 | /* | |
4378 | * load all PMD from ctx to PMU (as opposed to thread state) | |
4379 | * restore all PMC from ctx to PMU | |
4380 | */ | |
4381 | pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]); | |
4382 | pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]); | |
4383 | ||
4384 | ctx->ctx_reload_pmcs[0] = 0UL; | |
4385 | ctx->ctx_reload_pmds[0] = 0UL; | |
4386 | ||
4387 | /* | |
4388 | * guaranteed safe by earlier check against DBG_VALID | |
4389 | */ | |
4390 | if (ctx->ctx_fl_using_dbreg) { | |
4391 | pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); | |
4392 | pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); | |
4393 | } | |
4394 | /* | |
4395 | * set new ownership | |
4396 | */ | |
4397 | SET_PMU_OWNER(task, ctx); | |
4398 | ||
4399 | DPRINT(("context loaded on PMU for [%d]\n", task->pid)); | |
4400 | } else { | |
4401 | /* | |
4402 | * when not current, task MUST be stopped, so this is safe | |
4403 | */ | |
6450578f | 4404 | regs = task_pt_regs(task); |
1da177e4 LT |
4405 | |
4406 | /* force a full reload */ | |
4407 | ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; | |
4408 | SET_LAST_CPU(ctx, -1); | |
4409 | ||
4410 | /* initial saved psr (stopped) */ | |
4411 | ctx->ctx_saved_psr_up = 0UL; | |
4412 | ia64_psr(regs)->up = ia64_psr(regs)->pp = 0; | |
4413 | } | |
4414 | ||
4415 | ret = 0; | |
4416 | ||
4417 | error_unres: | |
4418 | if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu); | |
4419 | error: | |
4420 | /* | |
4421 | * we must undo the dbregs setting (for system-wide) | |
4422 | */ | |
4423 | if (ret && set_dbregs) { | |
4424 | LOCK_PFS(flags); | |
4425 | pfm_sessions.pfs_sys_use_dbregs--; | |
4426 | UNLOCK_PFS(flags); | |
4427 | } | |
4428 | /* | |
4429 | * release task, there is now a link with the context | |
4430 | */ | |
4431 | if (is_system == 0 && task != current) { | |
4432 | pfm_put_task(task); | |
4433 | ||
4434 | if (ret == 0) { | |
4435 | ret = pfm_check_task_exist(ctx); | |
4436 | if (ret) { | |
4437 | ctx->ctx_state = PFM_CTX_UNLOADED; | |
4438 | ctx->ctx_task = NULL; | |
4439 | } | |
4440 | } | |
4441 | } | |
4442 | return ret; | |
4443 | } | |
4444 | ||
4445 | /* | |
4446 | * in this function, we do not need to increase the use count | |
4447 | * for the task via get_task_struct(), because we hold the | |
4448 | * context lock. If the task were to disappear while having | |
4449 | * a context attached, it would go through pfm_exit_thread() | |
4450 | * which also grabs the context lock and would therefore be blocked | |
4451 | * until we are here. | |
4452 | */ | |
4453 | static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx); | |
4454 | ||
4455 | static int | |
4456 | pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
4457 | { | |
4458 | struct task_struct *task = PFM_CTX_TASK(ctx); | |
4459 | struct pt_regs *tregs; | |
4460 | int prev_state, is_system; | |
4461 | int ret; | |
4462 | ||
4463 | DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1)); | |
4464 | ||
4465 | prev_state = ctx->ctx_state; | |
4466 | is_system = ctx->ctx_fl_system; | |
4467 | ||
4468 | /* | |
4469 | * unload only when necessary | |
4470 | */ | |
4471 | if (prev_state == PFM_CTX_UNLOADED) { | |
4472 | DPRINT(("ctx_state=%d, nothing to do\n", prev_state)); | |
4473 | return 0; | |
4474 | } | |
4475 | ||
4476 | /* | |
4477 | * clear psr and dcr bits | |
4478 | */ | |
4479 | ret = pfm_stop(ctx, NULL, 0, regs); | |
4480 | if (ret) return ret; | |
4481 | ||
4482 | ctx->ctx_state = PFM_CTX_UNLOADED; | |
4483 | ||
4484 | /* | |
4485 | * in system mode, we need to update the PMU directly | |
4486 | * and the user level state of the caller, which may not | |
4487 | * necessarily be the creator of the context. | |
4488 | */ | |
4489 | if (is_system) { | |
4490 | ||
4491 | /* | |
4492 | * Update cpuinfo | |
4493 | * | |
4494 | * local PMU is taken care of in pfm_stop() | |
4495 | */ | |
4496 | PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE); | |
4497 | PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE); | |
4498 | ||
4499 | /* | |
4500 | * save PMDs in context | |
4501 | * release ownership | |
4502 | */ | |
4503 | pfm_flush_pmds(current, ctx); | |
4504 | ||
4505 | /* | |
4506 | * at this point we are done with the PMU | |
4507 | * so we can unreserve the resource. | |
4508 | */ | |
4509 | if (prev_state != PFM_CTX_ZOMBIE) | |
4510 | pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu); | |
4511 | ||
4512 | /* | |
4513 | * disconnect context from task | |
4514 | */ | |
4515 | task->thread.pfm_context = NULL; | |
4516 | /* | |
4517 | * disconnect task from context | |
4518 | */ | |
4519 | ctx->ctx_task = NULL; | |
4520 | ||
4521 | /* | |
4522 | * There is nothing more to cleanup here. | |
4523 | */ | |
4524 | return 0; | |
4525 | } | |
4526 | ||
4527 | /* | |
4528 | * per-task mode | |
4529 | */ | |
6450578f | 4530 | tregs = task == current ? regs : task_pt_regs(task); |
1da177e4 LT |
4531 | |
4532 | if (task == current) { | |
4533 | /* | |
4534 | * cancel user level control | |
4535 | */ | |
4536 | ia64_psr(regs)->sp = 1; | |
4537 | ||
4538 | DPRINT(("setting psr.sp for [%d]\n", task->pid)); | |
4539 | } | |
4540 | /* | |
4541 | * save PMDs to context | |
4542 | * release ownership | |
4543 | */ | |
4544 | pfm_flush_pmds(task, ctx); | |
4545 | ||
4546 | /* | |
4547 | * at this point we are done with the PMU | |
4548 | * so we can unreserve the resource. | |
4549 | * | |
4550 | * when state was ZOMBIE, we have already unreserved. | |
4551 | */ | |
4552 | if (prev_state != PFM_CTX_ZOMBIE) | |
4553 | pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu); | |
4554 | ||
4555 | /* | |
4556 | * reset activation counter and psr | |
4557 | */ | |
4558 | ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; | |
4559 | SET_LAST_CPU(ctx, -1); | |
4560 | ||
4561 | /* | |
4562 | * PMU state will not be restored | |
4563 | */ | |
4564 | task->thread.flags &= ~IA64_THREAD_PM_VALID; | |
4565 | ||
4566 | /* | |
4567 | * break links between context and task | |
4568 | */ | |
4569 | task->thread.pfm_context = NULL; | |
4570 | ctx->ctx_task = NULL; | |
4571 | ||
4572 | PFM_SET_WORK_PENDING(task, 0); | |
4573 | ||
4574 | ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; | |
4575 | ctx->ctx_fl_can_restart = 0; | |
4576 | ctx->ctx_fl_going_zombie = 0; | |
4577 | ||
4578 | DPRINT(("disconnected [%d] from context\n", task->pid)); | |
4579 | ||
4580 | return 0; | |
4581 | } | |
4582 | ||
4583 | ||
4584 | /* | |
4585 | * called only from exit_thread(): task == current | |
4586 | * we come here only if current has a context attached (loaded or masked) | |
4587 | */ | |
4588 | void | |
4589 | pfm_exit_thread(struct task_struct *task) | |
4590 | { | |
4591 | pfm_context_t *ctx; | |
4592 | unsigned long flags; | |
6450578f | 4593 | struct pt_regs *regs = task_pt_regs(task); |
1da177e4 LT |
4594 | int ret, state; |
4595 | int free_ok = 0; | |
4596 | ||
4597 | ctx = PFM_GET_CTX(task); | |
4598 | ||
4599 | PROTECT_CTX(ctx, flags); | |
4600 | ||
4601 | DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid)); | |
4602 | ||
4603 | state = ctx->ctx_state; | |
4604 | switch(state) { | |
4605 | case PFM_CTX_UNLOADED: | |
4606 | /* | |
4607 | * only comes to thios function if pfm_context is not NULL, i.e., cannot | |
4608 | * be in unloaded state | |
4609 | */ | |
4610 | printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid); | |
4611 | break; | |
4612 | case PFM_CTX_LOADED: | |
4613 | case PFM_CTX_MASKED: | |
4614 | ret = pfm_context_unload(ctx, NULL, 0, regs); | |
4615 | if (ret) { | |
4616 | printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret); | |
4617 | } | |
4618 | DPRINT(("ctx unloaded for current state was %d\n", state)); | |
4619 | ||
4620 | pfm_end_notify_user(ctx); | |
4621 | break; | |
4622 | case PFM_CTX_ZOMBIE: | |
4623 | ret = pfm_context_unload(ctx, NULL, 0, regs); | |
4624 | if (ret) { | |
4625 | printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret); | |
4626 | } | |
4627 | free_ok = 1; | |
4628 | break; | |
4629 | default: | |
4630 | printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state); | |
4631 | break; | |
4632 | } | |
4633 | UNPROTECT_CTX(ctx, flags); | |
4634 | ||
4635 | { u64 psr = pfm_get_psr(); | |
4636 | BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); | |
4637 | BUG_ON(GET_PMU_OWNER()); | |
4638 | BUG_ON(ia64_psr(regs)->up); | |
4639 | BUG_ON(ia64_psr(regs)->pp); | |
4640 | } | |
4641 | ||
4642 | /* | |
4643 | * All memory free operations (especially for vmalloc'ed memory) | |
4644 | * MUST be done with interrupts ENABLED. | |
4645 | */ | |
4646 | if (free_ok) pfm_context_free(ctx); | |
4647 | } | |
4648 | ||
4649 | /* | |
4650 | * functions MUST be listed in the increasing order of their index (see permfon.h) | |
4651 | */ | |
4652 | #define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz } | |
4653 | #define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL } | |
4654 | #define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP) | |
4655 | #define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW) | |
4656 | #define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL} | |
4657 | ||
4658 | static pfm_cmd_desc_t pfm_cmd_tab[]={ | |
4659 | /* 0 */PFM_CMD_NONE, | |
4660 | /* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), | |
4661 | /* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), | |
4662 | /* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), | |
4663 | /* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS), | |
4664 | /* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS), | |
4665 | /* 6 */PFM_CMD_NONE, | |
4666 | /* 7 */PFM_CMD_NONE, | |
4667 | /* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize), | |
4668 | /* 9 */PFM_CMD_NONE, | |
4669 | /* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW), | |
4670 | /* 11 */PFM_CMD_NONE, | |
4671 | /* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL), | |
4672 | /* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL), | |
4673 | /* 14 */PFM_CMD_NONE, | |
4674 | /* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), | |
4675 | /* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL), | |
4676 | /* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS), | |
4677 | /* 18 */PFM_CMD_NONE, | |
4678 | /* 19 */PFM_CMD_NONE, | |
4679 | /* 20 */PFM_CMD_NONE, | |
4680 | /* 21 */PFM_CMD_NONE, | |
4681 | /* 22 */PFM_CMD_NONE, | |
4682 | /* 23 */PFM_CMD_NONE, | |
4683 | /* 24 */PFM_CMD_NONE, | |
4684 | /* 25 */PFM_CMD_NONE, | |
4685 | /* 26 */PFM_CMD_NONE, | |
4686 | /* 27 */PFM_CMD_NONE, | |
4687 | /* 28 */PFM_CMD_NONE, | |
4688 | /* 29 */PFM_CMD_NONE, | |
4689 | /* 30 */PFM_CMD_NONE, | |
4690 | /* 31 */PFM_CMD_NONE, | |
4691 | /* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL), | |
4692 | /* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL) | |
4693 | }; | |
4694 | #define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t)) | |
4695 | ||
4696 | static int | |
4697 | pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags) | |
4698 | { | |
4699 | struct task_struct *task; | |
4700 | int state, old_state; | |
4701 | ||
4702 | recheck: | |
4703 | state = ctx->ctx_state; | |
4704 | task = ctx->ctx_task; | |
4705 | ||
4706 | if (task == NULL) { | |
4707 | DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state)); | |
4708 | return 0; | |
4709 | } | |
4710 | ||
4711 | DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n", | |
4712 | ctx->ctx_fd, | |
4713 | state, | |
4714 | task->pid, | |
4715 | task->state, PFM_CMD_STOPPED(cmd))); | |
4716 | ||
4717 | /* | |
4718 | * self-monitoring always ok. | |
4719 | * | |
4720 | * for system-wide the caller can either be the creator of the | |
4721 | * context (to one to which the context is attached to) OR | |
4722 | * a task running on the same CPU as the session. | |
4723 | */ | |
4724 | if (task == current || ctx->ctx_fl_system) return 0; | |
4725 | ||
4726 | /* | |
a5a70b75 | 4727 | * we are monitoring another thread |
1da177e4 | 4728 | */ |
a5a70b75 | 4729 | switch(state) { |
4730 | case PFM_CTX_UNLOADED: | |
4731 | /* | |
4732 | * if context is UNLOADED we are safe to go | |
4733 | */ | |
4734 | return 0; | |
4735 | case PFM_CTX_ZOMBIE: | |
4736 | /* | |
4737 | * no command can operate on a zombie context | |
4738 | */ | |
4739 | DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); | |
4740 | return -EINVAL; | |
4741 | case PFM_CTX_MASKED: | |
4742 | /* | |
4743 | * PMU state has been saved to software even though | |
4744 | * the thread may still be running. | |
4745 | */ | |
4746 | if (cmd != PFM_UNLOAD_CONTEXT) return 0; | |
1da177e4 LT |
4747 | } |
4748 | ||
4749 | /* | |
4750 | * context is LOADED or MASKED. Some commands may need to have | |
4751 | * the task stopped. | |
4752 | * | |
4753 | * We could lift this restriction for UP but it would mean that | |
4754 | * the user has no guarantee the task would not run between | |
4755 | * two successive calls to perfmonctl(). That's probably OK. | |
4756 | * If this user wants to ensure the task does not run, then | |
4757 | * the task must be stopped. | |
4758 | */ | |
4759 | if (PFM_CMD_STOPPED(cmd)) { | |
4760 | if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) { | |
4761 | DPRINT(("[%d] task not in stopped state\n", task->pid)); | |
4762 | return -EBUSY; | |
4763 | } | |
4764 | /* | |
4765 | * task is now stopped, wait for ctxsw out | |
4766 | * | |
4767 | * This is an interesting point in the code. | |
4768 | * We need to unprotect the context because | |
4769 | * the pfm_save_regs() routines needs to grab | |
4770 | * the same lock. There are danger in doing | |
4771 | * this because it leaves a window open for | |
4772 | * another task to get access to the context | |
4773 | * and possibly change its state. The one thing | |
4774 | * that is not possible is for the context to disappear | |
4775 | * because we are protected by the VFS layer, i.e., | |
4776 | * get_fd()/put_fd(). | |
4777 | */ | |
4778 | old_state = state; | |
4779 | ||
4780 | UNPROTECT_CTX(ctx, flags); | |
4781 | ||
4782 | wait_task_inactive(task); | |
4783 | ||
4784 | PROTECT_CTX(ctx, flags); | |
4785 | ||
4786 | /* | |
4787 | * we must recheck to verify if state has changed | |
4788 | */ | |
4789 | if (ctx->ctx_state != old_state) { | |
4790 | DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state)); | |
4791 | goto recheck; | |
4792 | } | |
4793 | } | |
4794 | return 0; | |
4795 | } | |
4796 | ||
4797 | /* | |
4798 | * system-call entry point (must return long) | |
4799 | */ | |
4800 | asmlinkage long | |
4801 | sys_perfmonctl (int fd, int cmd, void __user *arg, int count) | |
4802 | { | |
4803 | struct file *file = NULL; | |
4804 | pfm_context_t *ctx = NULL; | |
4805 | unsigned long flags = 0UL; | |
4806 | void *args_k = NULL; | |
4807 | long ret; /* will expand int return types */ | |
4808 | size_t base_sz, sz, xtra_sz = 0; | |
4809 | int narg, completed_args = 0, call_made = 0, cmd_flags; | |
4810 | int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); | |
4811 | int (*getsize)(void *arg, size_t *sz); | |
4812 | #define PFM_MAX_ARGSIZE 4096 | |
4813 | ||
4814 | /* | |
4815 | * reject any call if perfmon was disabled at initialization | |
4816 | */ | |
4817 | if (unlikely(pmu_conf == NULL)) return -ENOSYS; | |
4818 | ||
4819 | if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) { | |
4820 | DPRINT(("invalid cmd=%d\n", cmd)); | |
4821 | return -EINVAL; | |
4822 | } | |
4823 | ||
4824 | func = pfm_cmd_tab[cmd].cmd_func; | |
4825 | narg = pfm_cmd_tab[cmd].cmd_narg; | |
4826 | base_sz = pfm_cmd_tab[cmd].cmd_argsize; | |
4827 | getsize = pfm_cmd_tab[cmd].cmd_getsize; | |
4828 | cmd_flags = pfm_cmd_tab[cmd].cmd_flags; | |
4829 | ||
4830 | if (unlikely(func == NULL)) { | |
4831 | DPRINT(("invalid cmd=%d\n", cmd)); | |
4832 | return -EINVAL; | |
4833 | } | |
4834 | ||
4835 | DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n", | |
4836 | PFM_CMD_NAME(cmd), | |
4837 | cmd, | |
4838 | narg, | |
4839 | base_sz, | |
4840 | count)); | |
4841 | ||
4842 | /* | |
4843 | * check if number of arguments matches what the command expects | |
4844 | */ | |
4845 | if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count))) | |
4846 | return -EINVAL; | |
4847 | ||
4848 | restart_args: | |
4849 | sz = xtra_sz + base_sz*count; | |
4850 | /* | |
4851 | * limit abuse to min page size | |
4852 | */ | |
4853 | if (unlikely(sz > PFM_MAX_ARGSIZE)) { | |
4854 | printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", current->pid, sz); | |
4855 | return -E2BIG; | |
4856 | } | |
4857 | ||
4858 | /* | |
4859 | * allocate default-sized argument buffer | |
4860 | */ | |
4861 | if (likely(count && args_k == NULL)) { | |
4862 | args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL); | |
4863 | if (args_k == NULL) return -ENOMEM; | |
4864 | } | |
4865 | ||
4866 | ret = -EFAULT; | |
4867 | ||
4868 | /* | |
4869 | * copy arguments | |
4870 | * | |
4871 | * assume sz = 0 for command without parameters | |
4872 | */ | |
4873 | if (sz && copy_from_user(args_k, arg, sz)) { | |
4874 | DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg)); | |
4875 | goto error_args; | |
4876 | } | |
4877 | ||
4878 | /* | |
4879 | * check if command supports extra parameters | |
4880 | */ | |
4881 | if (completed_args == 0 && getsize) { | |
4882 | /* | |
4883 | * get extra parameters size (based on main argument) | |
4884 | */ | |
4885 | ret = (*getsize)(args_k, &xtra_sz); | |
4886 | if (ret) goto error_args; | |
4887 | ||
4888 | completed_args = 1; | |
4889 | ||
4890 | DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz)); | |
4891 | ||
4892 | /* retry if necessary */ | |
4893 | if (likely(xtra_sz)) goto restart_args; | |
4894 | } | |
4895 | ||
4896 | if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd; | |
4897 | ||
4898 | ret = -EBADF; | |
4899 | ||
4900 | file = fget(fd); | |
4901 | if (unlikely(file == NULL)) { | |
4902 | DPRINT(("invalid fd %d\n", fd)); | |
4903 | goto error_args; | |
4904 | } | |
4905 | if (unlikely(PFM_IS_FILE(file) == 0)) { | |
4906 | DPRINT(("fd %d not related to perfmon\n", fd)); | |
4907 | goto error_args; | |
4908 | } | |
4909 | ||
4910 | ctx = (pfm_context_t *)file->private_data; | |
4911 | if (unlikely(ctx == NULL)) { | |
4912 | DPRINT(("no context for fd %d\n", fd)); | |
4913 | goto error_args; | |
4914 | } | |
4915 | prefetch(&ctx->ctx_state); | |
4916 | ||
4917 | PROTECT_CTX(ctx, flags); | |
4918 | ||
4919 | /* | |
4920 | * check task is stopped | |
4921 | */ | |
4922 | ret = pfm_check_task_state(ctx, cmd, flags); | |
4923 | if (unlikely(ret)) goto abort_locked; | |
4924 | ||
4925 | skip_fd: | |
6450578f | 4926 | ret = (*func)(ctx, args_k, count, task_pt_regs(current)); |
1da177e4 LT |
4927 | |
4928 | call_made = 1; | |
4929 | ||
4930 | abort_locked: | |
4931 | if (likely(ctx)) { | |
4932 | DPRINT(("context unlocked\n")); | |
4933 | UNPROTECT_CTX(ctx, flags); | |
1da177e4 LT |
4934 | } |
4935 | ||
4936 | /* copy argument back to user, if needed */ | |
4937 | if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT; | |
4938 | ||
4939 | error_args: | |
b8444d00 SE |
4940 | if (file) |
4941 | fput(file); | |
4942 | ||
b2325fe1 | 4943 | kfree(args_k); |
1da177e4 LT |
4944 | |
4945 | DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret)); | |
4946 | ||
4947 | return ret; | |
4948 | } | |
4949 | ||
4950 | static void | |
4951 | pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs) | |
4952 | { | |
4953 | pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt; | |
4954 | pfm_ovfl_ctrl_t rst_ctrl; | |
4955 | int state; | |
4956 | int ret = 0; | |
4957 | ||
4958 | state = ctx->ctx_state; | |
4959 | /* | |
4960 | * Unlock sampling buffer and reset index atomically | |
4961 | * XXX: not really needed when blocking | |
4962 | */ | |
4963 | if (CTX_HAS_SMPL(ctx)) { | |
4964 | ||
4965 | rst_ctrl.bits.mask_monitoring = 0; | |
4966 | rst_ctrl.bits.reset_ovfl_pmds = 0; | |
4967 | ||
4968 | if (state == PFM_CTX_LOADED) | |
4969 | ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs); | |
4970 | else | |
4971 | ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs); | |
4972 | } else { | |
4973 | rst_ctrl.bits.mask_monitoring = 0; | |
4974 | rst_ctrl.bits.reset_ovfl_pmds = 1; | |
4975 | } | |
4976 | ||
4977 | if (ret == 0) { | |
4978 | if (rst_ctrl.bits.reset_ovfl_pmds) { | |
4979 | pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET); | |
4980 | } | |
4981 | if (rst_ctrl.bits.mask_monitoring == 0) { | |
4982 | DPRINT(("resuming monitoring\n")); | |
4983 | if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current); | |
4984 | } else { | |
4985 | DPRINT(("stopping monitoring\n")); | |
4986 | //pfm_stop_monitoring(current, regs); | |
4987 | } | |
4988 | ctx->ctx_state = PFM_CTX_LOADED; | |
4989 | } | |
4990 | } | |
4991 | ||
4992 | /* | |
4993 | * context MUST BE LOCKED when calling | |
4994 | * can only be called for current | |
4995 | */ | |
4996 | static void | |
4997 | pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs) | |
4998 | { | |
4999 | int ret; | |
5000 | ||
5001 | DPRINT(("entering for [%d]\n", current->pid)); | |
5002 | ||
5003 | ret = pfm_context_unload(ctx, NULL, 0, regs); | |
5004 | if (ret) { | |
5005 | printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", current->pid, ret); | |
5006 | } | |
5007 | ||
5008 | /* | |
5009 | * and wakeup controlling task, indicating we are now disconnected | |
5010 | */ | |
5011 | wake_up_interruptible(&ctx->ctx_zombieq); | |
5012 | ||
5013 | /* | |
5014 | * given that context is still locked, the controlling | |
5015 | * task will only get access when we return from | |
5016 | * pfm_handle_work(). | |
5017 | */ | |
5018 | } | |
5019 | ||
5020 | static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds); | |
4944930a SE |
5021 | /* |
5022 | * pfm_handle_work() can be called with interrupts enabled | |
5023 | * (TIF_NEED_RESCHED) or disabled. The down_interruptible | |
5024 | * call may sleep, therefore we must re-enable interrupts | |
5025 | * to avoid deadlocks. It is safe to do so because this function | |
5026 | * is called ONLY when returning to user level (PUStk=1), in which case | |
5027 | * there is no risk of kernel stack overflow due to deep | |
5028 | * interrupt nesting. | |
5029 | */ | |
1da177e4 LT |
5030 | void |
5031 | pfm_handle_work(void) | |
5032 | { | |
5033 | pfm_context_t *ctx; | |
5034 | struct pt_regs *regs; | |
4944930a | 5035 | unsigned long flags, dummy_flags; |
1da177e4 LT |
5036 | unsigned long ovfl_regs; |
5037 | unsigned int reason; | |
5038 | int ret; | |
5039 | ||
5040 | ctx = PFM_GET_CTX(current); | |
5041 | if (ctx == NULL) { | |
5042 | printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid); | |
5043 | return; | |
5044 | } | |
5045 | ||
5046 | PROTECT_CTX(ctx, flags); | |
5047 | ||
5048 | PFM_SET_WORK_PENDING(current, 0); | |
5049 | ||
5050 | pfm_clear_task_notify(); | |
5051 | ||
6450578f | 5052 | regs = task_pt_regs(current); |
1da177e4 LT |
5053 | |
5054 | /* | |
5055 | * extract reason for being here and clear | |
5056 | */ | |
5057 | reason = ctx->ctx_fl_trap_reason; | |
5058 | ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; | |
5059 | ovfl_regs = ctx->ctx_ovfl_regs[0]; | |
5060 | ||
5061 | DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state)); | |
5062 | ||
5063 | /* | |
5064 | * must be done before we check for simple-reset mode | |
5065 | */ | |
5066 | if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie; | |
5067 | ||
5068 | ||
5069 | //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; | |
5070 | if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking; | |
5071 | ||
4944930a SE |
5072 | /* |
5073 | * restore interrupt mask to what it was on entry. | |
5074 | * Could be enabled/diasbled. | |
5075 | */ | |
1da177e4 LT |
5076 | UNPROTECT_CTX(ctx, flags); |
5077 | ||
4944930a SE |
5078 | /* |
5079 | * force interrupt enable because of down_interruptible() | |
5080 | */ | |
1da177e4 LT |
5081 | local_irq_enable(); |
5082 | ||
5083 | DPRINT(("before block sleeping\n")); | |
5084 | ||
5085 | /* | |
5086 | * may go through without blocking on SMP systems | |
5087 | * if restart has been received already by the time we call down() | |
5088 | */ | |
60f1c444 | 5089 | ret = wait_for_completion_interruptible(&ctx->ctx_restart_done); |
1da177e4 LT |
5090 | |
5091 | DPRINT(("after block sleeping ret=%d\n", ret)); | |
5092 | ||
5093 | /* | |
4944930a SE |
5094 | * lock context and mask interrupts again |
5095 | * We save flags into a dummy because we may have | |
5096 | * altered interrupts mask compared to entry in this | |
5097 | * function. | |
1da177e4 | 5098 | */ |
4944930a | 5099 | PROTECT_CTX(ctx, dummy_flags); |
1da177e4 LT |
5100 | |
5101 | /* | |
5102 | * we need to read the ovfl_regs only after wake-up | |
5103 | * because we may have had pfm_write_pmds() in between | |
5104 | * and that can changed PMD values and therefore | |
5105 | * ovfl_regs is reset for these new PMD values. | |
5106 | */ | |
5107 | ovfl_regs = ctx->ctx_ovfl_regs[0]; | |
5108 | ||
5109 | if (ctx->ctx_fl_going_zombie) { | |
5110 | do_zombie: | |
5111 | DPRINT(("context is zombie, bailing out\n")); | |
5112 | pfm_context_force_terminate(ctx, regs); | |
5113 | goto nothing_to_do; | |
5114 | } | |
5115 | /* | |
5116 | * in case of interruption of down() we don't restart anything | |
5117 | */ | |
5118 | if (ret < 0) goto nothing_to_do; | |
5119 | ||
5120 | skip_blocking: | |
5121 | pfm_resume_after_ovfl(ctx, ovfl_regs, regs); | |
5122 | ctx->ctx_ovfl_regs[0] = 0UL; | |
5123 | ||
5124 | nothing_to_do: | |
4944930a SE |
5125 | /* |
5126 | * restore flags as they were upon entry | |
5127 | */ | |
1da177e4 LT |
5128 | UNPROTECT_CTX(ctx, flags); |
5129 | } | |
5130 | ||
5131 | static int | |
5132 | pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg) | |
5133 | { | |
5134 | if (ctx->ctx_state == PFM_CTX_ZOMBIE) { | |
5135 | DPRINT(("ignoring overflow notification, owner is zombie\n")); | |
5136 | return 0; | |
5137 | } | |
5138 | ||
5139 | DPRINT(("waking up somebody\n")); | |
5140 | ||
5141 | if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait); | |
5142 | ||
5143 | /* | |
5144 | * safe, we are not in intr handler, nor in ctxsw when | |
5145 | * we come here | |
5146 | */ | |
5147 | kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN); | |
5148 | ||
5149 | return 0; | |
5150 | } | |
5151 | ||
5152 | static int | |
5153 | pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds) | |
5154 | { | |
5155 | pfm_msg_t *msg = NULL; | |
5156 | ||
5157 | if (ctx->ctx_fl_no_msg == 0) { | |
5158 | msg = pfm_get_new_msg(ctx); | |
5159 | if (msg == NULL) { | |
5160 | printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n"); | |
5161 | return -1; | |
5162 | } | |
5163 | ||
5164 | msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL; | |
5165 | msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd; | |
5166 | msg->pfm_ovfl_msg.msg_active_set = 0; | |
5167 | msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds; | |
5168 | msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL; | |
5169 | msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL; | |
5170 | msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL; | |
5171 | msg->pfm_ovfl_msg.msg_tstamp = 0UL; | |
5172 | } | |
5173 | ||
5174 | DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n", | |
5175 | msg, | |
5176 | ctx->ctx_fl_no_msg, | |
5177 | ctx->ctx_fd, | |
5178 | ovfl_pmds)); | |
5179 | ||
5180 | return pfm_notify_user(ctx, msg); | |
5181 | } | |
5182 | ||
5183 | static int | |
5184 | pfm_end_notify_user(pfm_context_t *ctx) | |
5185 | { | |
5186 | pfm_msg_t *msg; | |
5187 | ||
5188 | msg = pfm_get_new_msg(ctx); | |
5189 | if (msg == NULL) { | |
5190 | printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n"); | |
5191 | return -1; | |
5192 | } | |
5193 | /* no leak */ | |
5194 | memset(msg, 0, sizeof(*msg)); | |
5195 | ||
5196 | msg->pfm_end_msg.msg_type = PFM_MSG_END; | |
5197 | msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd; | |
5198 | msg->pfm_ovfl_msg.msg_tstamp = 0UL; | |
5199 | ||
5200 | DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n", | |
5201 | msg, | |
5202 | ctx->ctx_fl_no_msg, | |
5203 | ctx->ctx_fd)); | |
5204 | ||
5205 | return pfm_notify_user(ctx, msg); | |
5206 | } | |
5207 | ||
5208 | /* | |
5209 | * main overflow processing routine. | |
5210 | * it can be called from the interrupt path or explicitely during the context switch code | |
5211 | */ | |
5212 | static void | |
5213 | pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs) | |
5214 | { | |
5215 | pfm_ovfl_arg_t *ovfl_arg; | |
5216 | unsigned long mask; | |
5217 | unsigned long old_val, ovfl_val, new_val; | |
5218 | unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds; | |
5219 | unsigned long tstamp; | |
5220 | pfm_ovfl_ctrl_t ovfl_ctrl; | |
5221 | unsigned int i, has_smpl; | |
5222 | int must_notify = 0; | |
5223 | ||
5224 | if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring; | |
5225 | ||
5226 | /* | |
5227 | * sanity test. Should never happen | |
5228 | */ | |
5229 | if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check; | |
5230 | ||
5231 | tstamp = ia64_get_itc(); | |
5232 | mask = pmc0 >> PMU_FIRST_COUNTER; | |
5233 | ovfl_val = pmu_conf->ovfl_val; | |
5234 | has_smpl = CTX_HAS_SMPL(ctx); | |
5235 | ||
5236 | DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s " | |
5237 | "used_pmds=0x%lx\n", | |
5238 | pmc0, | |
5239 | task ? task->pid: -1, | |
5240 | (regs ? regs->cr_iip : 0), | |
5241 | CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking", | |
5242 | ctx->ctx_used_pmds[0])); | |
5243 | ||
5244 | ||
5245 | /* | |
5246 | * first we update the virtual counters | |
5247 | * assume there was a prior ia64_srlz_d() issued | |
5248 | */ | |
5249 | for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) { | |
5250 | ||
5251 | /* skip pmd which did not overflow */ | |
5252 | if ((mask & 0x1) == 0) continue; | |
5253 | ||
5254 | /* | |
5255 | * Note that the pmd is not necessarily 0 at this point as qualified events | |
5256 | * may have happened before the PMU was frozen. The residual count is not | |
5257 | * taken into consideration here but will be with any read of the pmd via | |
5258 | * pfm_read_pmds(). | |
5259 | */ | |
5260 | old_val = new_val = ctx->ctx_pmds[i].val; | |
5261 | new_val += 1 + ovfl_val; | |
5262 | ctx->ctx_pmds[i].val = new_val; | |
5263 | ||
5264 | /* | |
5265 | * check for overflow condition | |
5266 | */ | |
5267 | if (likely(old_val > new_val)) { | |
5268 | ovfl_pmds |= 1UL << i; | |
5269 | if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i; | |
5270 | } | |
5271 | ||
5272 | DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n", | |
5273 | i, | |
5274 | new_val, | |
5275 | old_val, | |
5276 | ia64_get_pmd(i) & ovfl_val, | |
5277 | ovfl_pmds, | |
5278 | ovfl_notify)); | |
5279 | } | |
5280 | ||
5281 | /* | |
5282 | * there was no 64-bit overflow, nothing else to do | |
5283 | */ | |
5284 | if (ovfl_pmds == 0UL) return; | |
5285 | ||
5286 | /* | |
5287 | * reset all control bits | |
5288 | */ | |
5289 | ovfl_ctrl.val = 0; | |
5290 | reset_pmds = 0UL; | |
5291 | ||
5292 | /* | |
5293 | * if a sampling format module exists, then we "cache" the overflow by | |
5294 | * calling the module's handler() routine. | |
5295 | */ | |
5296 | if (has_smpl) { | |
5297 | unsigned long start_cycles, end_cycles; | |
5298 | unsigned long pmd_mask; | |
5299 | int j, k, ret = 0; | |
5300 | int this_cpu = smp_processor_id(); | |
5301 | ||
5302 | pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER; | |
5303 | ovfl_arg = &ctx->ctx_ovfl_arg; | |
5304 | ||
5305 | prefetch(ctx->ctx_smpl_hdr); | |
5306 | ||
5307 | for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) { | |
5308 | ||
5309 | mask = 1UL << i; | |
5310 | ||
5311 | if ((pmd_mask & 0x1) == 0) continue; | |
5312 | ||
5313 | ovfl_arg->ovfl_pmd = (unsigned char )i; | |
5314 | ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0; | |
5315 | ovfl_arg->active_set = 0; | |
5316 | ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */ | |
5317 | ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0]; | |
5318 | ||
5319 | ovfl_arg->pmd_value = ctx->ctx_pmds[i].val; | |
5320 | ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval; | |
5321 | ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid; | |
5322 | ||
5323 | /* | |
5324 | * copy values of pmds of interest. Sampling format may copy them | |
5325 | * into sampling buffer. | |
5326 | */ | |
5327 | if (smpl_pmds) { | |
5328 | for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) { | |
5329 | if ((smpl_pmds & 0x1) == 0) continue; | |
5330 | ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j); | |
5331 | DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1])); | |
5332 | } | |
5333 | } | |
5334 | ||
5335 | pfm_stats[this_cpu].pfm_smpl_handler_calls++; | |
5336 | ||
5337 | start_cycles = ia64_get_itc(); | |
5338 | ||
5339 | /* | |
5340 | * call custom buffer format record (handler) routine | |
5341 | */ | |
5342 | ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp); | |
5343 | ||
5344 | end_cycles = ia64_get_itc(); | |
5345 | ||
5346 | /* | |
5347 | * For those controls, we take the union because they have | |
5348 | * an all or nothing behavior. | |
5349 | */ | |
5350 | ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user; | |
5351 | ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task; | |
5352 | ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring; | |
5353 | /* | |
5354 | * build the bitmask of pmds to reset now | |
5355 | */ | |
5356 | if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask; | |
5357 | ||
5358 | pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles; | |
5359 | } | |
5360 | /* | |
5361 | * when the module cannot handle the rest of the overflows, we abort right here | |
5362 | */ | |
5363 | if (ret && pmd_mask) { | |
5364 | DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n", | |
5365 | pmd_mask<<PMU_FIRST_COUNTER)); | |
5366 | } | |
5367 | /* | |
5368 | * remove the pmds we reset now from the set of pmds to reset in pfm_restart() | |
5369 | */ | |
5370 | ovfl_pmds &= ~reset_pmds; | |
5371 | } else { | |
5372 | /* | |
5373 | * when no sampling module is used, then the default | |
5374 | * is to notify on overflow if requested by user | |
5375 | */ | |
5376 | ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0; | |
5377 | ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0; | |
5378 | ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */ | |
5379 | ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1; | |
5380 | /* | |
5381 | * if needed, we reset all overflowed pmds | |
5382 | */ | |
5383 | if (ovfl_notify == 0) reset_pmds = ovfl_pmds; | |
5384 | } | |
5385 | ||
5386 | DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds)); | |
5387 | ||
5388 | /* | |
5389 | * reset the requested PMD registers using the short reset values | |
5390 | */ | |
5391 | if (reset_pmds) { | |
5392 | unsigned long bm = reset_pmds; | |
5393 | pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET); | |
5394 | } | |
5395 | ||
5396 | if (ovfl_notify && ovfl_ctrl.bits.notify_user) { | |
5397 | /* | |
5398 | * keep track of what to reset when unblocking | |
5399 | */ | |
5400 | ctx->ctx_ovfl_regs[0] = ovfl_pmds; | |
5401 | ||
5402 | /* | |
5403 | * check for blocking context | |
5404 | */ | |
5405 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) { | |
5406 | ||
5407 | ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK; | |
5408 | ||
5409 | /* | |
5410 | * set the perfmon specific checking pending work for the task | |
5411 | */ | |
5412 | PFM_SET_WORK_PENDING(task, 1); | |
5413 | ||
5414 | /* | |
5415 | * when coming from ctxsw, current still points to the | |
5416 | * previous task, therefore we must work with task and not current. | |
5417 | */ | |
5418 | pfm_set_task_notify(task); | |
5419 | } | |
5420 | /* | |
5421 | * defer until state is changed (shorten spin window). the context is locked | |
5422 | * anyway, so the signal receiver would come spin for nothing. | |
5423 | */ | |
5424 | must_notify = 1; | |
5425 | } | |
5426 | ||
5427 | DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n", | |
5428 | GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1, | |
5429 | PFM_GET_WORK_PENDING(task), | |
5430 | ctx->ctx_fl_trap_reason, | |
5431 | ovfl_pmds, | |
5432 | ovfl_notify, | |
5433 | ovfl_ctrl.bits.mask_monitoring ? 1 : 0)); | |
5434 | /* | |
5435 | * in case monitoring must be stopped, we toggle the psr bits | |
5436 | */ | |
5437 | if (ovfl_ctrl.bits.mask_monitoring) { | |
5438 | pfm_mask_monitoring(task); | |
5439 | ctx->ctx_state = PFM_CTX_MASKED; | |
5440 | ctx->ctx_fl_can_restart = 1; | |
5441 | } | |
5442 | ||
5443 | /* | |
5444 | * send notification now | |
5445 | */ | |
5446 | if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify); | |
5447 | ||
5448 | return; | |
5449 | ||
5450 | sanity_check: | |
5451 | printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n", | |
5452 | smp_processor_id(), | |
5453 | task ? task->pid : -1, | |
5454 | pmc0); | |
5455 | return; | |
5456 | ||
5457 | stop_monitoring: | |
5458 | /* | |
5459 | * in SMP, zombie context is never restored but reclaimed in pfm_load_regs(). | |
5460 | * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can | |
5461 | * come here as zombie only if the task is the current task. In which case, we | |
5462 | * can access the PMU hardware directly. | |
5463 | * | |
5464 | * Note that zombies do have PM_VALID set. So here we do the minimal. | |
5465 | * | |
5466 | * In case the context was zombified it could not be reclaimed at the time | |
5467 | * the monitoring program exited. At this point, the PMU reservation has been | |
5468 | * returned, the sampiing buffer has been freed. We must convert this call | |
5469 | * into a spurious interrupt. However, we must also avoid infinite overflows | |
5470 | * by stopping monitoring for this task. We can only come here for a per-task | |
5471 | * context. All we need to do is to stop monitoring using the psr bits which | |
5472 | * are always task private. By re-enabling secure montioring, we ensure that | |
5473 | * the monitored task will not be able to re-activate monitoring. | |
5474 | * The task will eventually be context switched out, at which point the context | |
5475 | * will be reclaimed (that includes releasing ownership of the PMU). | |
5476 | * | |
5477 | * So there might be a window of time where the number of per-task session is zero | |
5478 | * yet one PMU might have a owner and get at most one overflow interrupt for a zombie | |
5479 | * context. This is safe because if a per-task session comes in, it will push this one | |
5480 | * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide | |
5481 | * session is force on that CPU, given that we use task pinning, pfm_save_regs() will | |
5482 | * also push our zombie context out. | |
5483 | * | |
5484 | * Overall pretty hairy stuff.... | |
5485 | */ | |
5486 | DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task->pid: -1)); | |
5487 | pfm_clear_psr_up(); | |
5488 | ia64_psr(regs)->up = 0; | |
5489 | ia64_psr(regs)->sp = 1; | |
5490 | return; | |
5491 | } | |
5492 | ||
5493 | static int | |
5494 | pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs) | |
5495 | { | |
5496 | struct task_struct *task; | |
5497 | pfm_context_t *ctx; | |
5498 | unsigned long flags; | |
5499 | u64 pmc0; | |
5500 | int this_cpu = smp_processor_id(); | |
5501 | int retval = 0; | |
5502 | ||
5503 | pfm_stats[this_cpu].pfm_ovfl_intr_count++; | |
5504 | ||
5505 | /* | |
5506 | * srlz.d done before arriving here | |
5507 | */ | |
5508 | pmc0 = ia64_get_pmc(0); | |
5509 | ||
5510 | task = GET_PMU_OWNER(); | |
5511 | ctx = GET_PMU_CTX(); | |
5512 | ||
5513 | /* | |
5514 | * if we have some pending bits set | |
5515 | * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1 | |
5516 | */ | |
5517 | if (PMC0_HAS_OVFL(pmc0) && task) { | |
5518 | /* | |
5519 | * we assume that pmc0.fr is always set here | |
5520 | */ | |
5521 | ||
5522 | /* sanity check */ | |
5523 | if (!ctx) goto report_spurious1; | |
5524 | ||
5525 | if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0) | |
5526 | goto report_spurious2; | |
5527 | ||
5528 | PROTECT_CTX_NOPRINT(ctx, flags); | |
5529 | ||
5530 | pfm_overflow_handler(task, ctx, pmc0, regs); | |
5531 | ||
5532 | UNPROTECT_CTX_NOPRINT(ctx, flags); | |
5533 | ||
5534 | } else { | |
5535 | pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++; | |
5536 | retval = -1; | |
5537 | } | |
5538 | /* | |
5539 | * keep it unfrozen at all times | |
5540 | */ | |
5541 | pfm_unfreeze_pmu(); | |
5542 | ||
5543 | return retval; | |
5544 | ||
5545 | report_spurious1: | |
5546 | printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n", | |
5547 | this_cpu, task->pid); | |
5548 | pfm_unfreeze_pmu(); | |
5549 | return -1; | |
5550 | report_spurious2: | |
5551 | printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n", | |
5552 | this_cpu, | |
5553 | task->pid); | |
5554 | pfm_unfreeze_pmu(); | |
5555 | return -1; | |
5556 | } | |
5557 | ||
5558 | static irqreturn_t | |
3bbe486b | 5559 | pfm_interrupt_handler(int irq, void *arg) |
1da177e4 LT |
5560 | { |
5561 | unsigned long start_cycles, total_cycles; | |
5562 | unsigned long min, max; | |
5563 | int this_cpu; | |
5564 | int ret; | |
3bbe486b | 5565 | struct pt_regs *regs = get_irq_regs(); |
1da177e4 LT |
5566 | |
5567 | this_cpu = get_cpu(); | |
a1ecf7f6 TL |
5568 | if (likely(!pfm_alt_intr_handler)) { |
5569 | min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min; | |
5570 | max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max; | |
1da177e4 | 5571 | |
a1ecf7f6 | 5572 | start_cycles = ia64_get_itc(); |
1da177e4 | 5573 | |
a1ecf7f6 | 5574 | ret = pfm_do_interrupt_handler(irq, arg, regs); |
1da177e4 | 5575 | |
a1ecf7f6 | 5576 | total_cycles = ia64_get_itc(); |
1da177e4 | 5577 | |
a1ecf7f6 TL |
5578 | /* |
5579 | * don't measure spurious interrupts | |
5580 | */ | |
5581 | if (likely(ret == 0)) { | |
5582 | total_cycles -= start_cycles; | |
1da177e4 | 5583 | |
a1ecf7f6 TL |
5584 | if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles; |
5585 | if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles; | |
1da177e4 | 5586 | |
a1ecf7f6 TL |
5587 | pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles; |
5588 | } | |
5589 | } | |
5590 | else { | |
5591 | (*pfm_alt_intr_handler->handler)(irq, arg, regs); | |
1da177e4 | 5592 | } |
a1ecf7f6 | 5593 | |
1da177e4 LT |
5594 | put_cpu_no_resched(); |
5595 | return IRQ_HANDLED; | |
5596 | } | |
5597 | ||
5598 | /* | |
5599 | * /proc/perfmon interface, for debug only | |
5600 | */ | |
5601 | ||
5602 | #define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1) | |
5603 | ||
5604 | static void * | |
5605 | pfm_proc_start(struct seq_file *m, loff_t *pos) | |
5606 | { | |
5607 | if (*pos == 0) { | |
5608 | return PFM_PROC_SHOW_HEADER; | |
5609 | } | |
5610 | ||
5611 | while (*pos <= NR_CPUS) { | |
5612 | if (cpu_online(*pos - 1)) { | |
5613 | return (void *)*pos; | |
5614 | } | |
5615 | ++*pos; | |
5616 | } | |
5617 | return NULL; | |
5618 | } | |
5619 | ||
5620 | static void * | |
5621 | pfm_proc_next(struct seq_file *m, void *v, loff_t *pos) | |
5622 | { | |
5623 | ++*pos; | |
5624 | return pfm_proc_start(m, pos); | |
5625 | } | |
5626 | ||
5627 | static void | |
5628 | pfm_proc_stop(struct seq_file *m, void *v) | |
5629 | { | |
5630 | } | |
5631 | ||
5632 | static void | |
5633 | pfm_proc_show_header(struct seq_file *m) | |
5634 | { | |
5635 | struct list_head * pos; | |
5636 | pfm_buffer_fmt_t * entry; | |
5637 | unsigned long flags; | |
5638 | ||
5639 | seq_printf(m, | |
5640 | "perfmon version : %u.%u\n" | |
5641 | "model : %s\n" | |
5642 | "fastctxsw : %s\n" | |
5643 | "expert mode : %s\n" | |
5644 | "ovfl_mask : 0x%lx\n" | |
5645 | "PMU flags : 0x%x\n", | |
5646 | PFM_VERSION_MAJ, PFM_VERSION_MIN, | |
5647 | pmu_conf->pmu_name, | |
5648 | pfm_sysctl.fastctxsw > 0 ? "Yes": "No", | |
5649 | pfm_sysctl.expert_mode > 0 ? "Yes": "No", | |
5650 | pmu_conf->ovfl_val, | |
5651 | pmu_conf->flags); | |
5652 | ||
5653 | LOCK_PFS(flags); | |
5654 | ||
5655 | seq_printf(m, | |
5656 | "proc_sessions : %u\n" | |
5657 | "sys_sessions : %u\n" | |
5658 | "sys_use_dbregs : %u\n" | |
5659 | "ptrace_use_dbregs : %u\n", | |
5660 | pfm_sessions.pfs_task_sessions, | |
5661 | pfm_sessions.pfs_sys_sessions, | |
5662 | pfm_sessions.pfs_sys_use_dbregs, | |
5663 | pfm_sessions.pfs_ptrace_use_dbregs); | |
5664 | ||
5665 | UNLOCK_PFS(flags); | |
5666 | ||
5667 | spin_lock(&pfm_buffer_fmt_lock); | |
5668 | ||
5669 | list_for_each(pos, &pfm_buffer_fmt_list) { | |
5670 | entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list); | |
5671 | seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n", | |
5672 | entry->fmt_uuid[0], | |
5673 | entry->fmt_uuid[1], | |
5674 | entry->fmt_uuid[2], | |
5675 | entry->fmt_uuid[3], | |
5676 | entry->fmt_uuid[4], | |
5677 | entry->fmt_uuid[5], | |
5678 | entry->fmt_uuid[6], | |
5679 | entry->fmt_uuid[7], | |
5680 | entry->fmt_uuid[8], | |
5681 | entry->fmt_uuid[9], | |
5682 | entry->fmt_uuid[10], | |
5683 | entry->fmt_uuid[11], | |
5684 | entry->fmt_uuid[12], | |
5685 | entry->fmt_uuid[13], | |
5686 | entry->fmt_uuid[14], | |
5687 | entry->fmt_uuid[15], | |
5688 | entry->fmt_name); | |
5689 | } | |
5690 | spin_unlock(&pfm_buffer_fmt_lock); | |
5691 | ||
5692 | } | |
5693 | ||
5694 | static int | |
5695 | pfm_proc_show(struct seq_file *m, void *v) | |
5696 | { | |
5697 | unsigned long psr; | |
5698 | unsigned int i; | |
5699 | int cpu; | |
5700 | ||
5701 | if (v == PFM_PROC_SHOW_HEADER) { | |
5702 | pfm_proc_show_header(m); | |
5703 | return 0; | |
5704 | } | |
5705 | ||
5706 | /* show info for CPU (v - 1) */ | |
5707 | ||
5708 | cpu = (long)v - 1; | |
5709 | seq_printf(m, | |
5710 | "CPU%-2d overflow intrs : %lu\n" | |
5711 | "CPU%-2d overflow cycles : %lu\n" | |
5712 | "CPU%-2d overflow min : %lu\n" | |
5713 | "CPU%-2d overflow max : %lu\n" | |
5714 | "CPU%-2d smpl handler calls : %lu\n" | |
5715 | "CPU%-2d smpl handler cycles : %lu\n" | |
5716 | "CPU%-2d spurious intrs : %lu\n" | |
5717 | "CPU%-2d replay intrs : %lu\n" | |
5718 | "CPU%-2d syst_wide : %d\n" | |
5719 | "CPU%-2d dcr_pp : %d\n" | |
5720 | "CPU%-2d exclude idle : %d\n" | |
5721 | "CPU%-2d owner : %d\n" | |
5722 | "CPU%-2d context : %p\n" | |
5723 | "CPU%-2d activations : %lu\n", | |
5724 | cpu, pfm_stats[cpu].pfm_ovfl_intr_count, | |
5725 | cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles, | |
5726 | cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min, | |
5727 | cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max, | |
5728 | cpu, pfm_stats[cpu].pfm_smpl_handler_calls, | |
5729 | cpu, pfm_stats[cpu].pfm_smpl_handler_cycles, | |
5730 | cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count, | |
5731 | cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count, | |
5732 | cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0, | |
5733 | cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0, | |
5734 | cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0, | |
5735 | cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1, | |
5736 | cpu, pfm_get_cpu_data(pmu_ctx, cpu), | |
5737 | cpu, pfm_get_cpu_data(pmu_activation_number, cpu)); | |
5738 | ||
5739 | if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) { | |
5740 | ||
5741 | psr = pfm_get_psr(); | |
5742 | ||
5743 | ia64_srlz_d(); | |
5744 | ||
5745 | seq_printf(m, | |
5746 | "CPU%-2d psr : 0x%lx\n" | |
5747 | "CPU%-2d pmc0 : 0x%lx\n", | |
5748 | cpu, psr, | |
5749 | cpu, ia64_get_pmc(0)); | |
5750 | ||
5751 | for (i=0; PMC_IS_LAST(i) == 0; i++) { | |
5752 | if (PMC_IS_COUNTING(i) == 0) continue; | |
5753 | seq_printf(m, | |
5754 | "CPU%-2d pmc%u : 0x%lx\n" | |
5755 | "CPU%-2d pmd%u : 0x%lx\n", | |
5756 | cpu, i, ia64_get_pmc(i), | |
5757 | cpu, i, ia64_get_pmd(i)); | |
5758 | } | |
5759 | } | |
5760 | return 0; | |
5761 | } | |
5762 | ||
5763 | struct seq_operations pfm_seq_ops = { | |
5764 | .start = pfm_proc_start, | |
5765 | .next = pfm_proc_next, | |
5766 | .stop = pfm_proc_stop, | |
5767 | .show = pfm_proc_show | |
5768 | }; | |
5769 | ||
5770 | static int | |
5771 | pfm_proc_open(struct inode *inode, struct file *file) | |
5772 | { | |
5773 | return seq_open(file, &pfm_seq_ops); | |
5774 | } | |
5775 | ||
5776 | ||
5777 | /* | |
5778 | * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens | |
5779 | * during pfm_enable() hence before pfm_start(). We cannot assume monitoring | |
5780 | * is active or inactive based on mode. We must rely on the value in | |
5781 | * local_cpu_data->pfm_syst_info | |
5782 | */ | |
5783 | void | |
5784 | pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin) | |
5785 | { | |
5786 | struct pt_regs *regs; | |
5787 | unsigned long dcr; | |
5788 | unsigned long dcr_pp; | |
5789 | ||
5790 | dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0; | |
5791 | ||
5792 | /* | |
5793 | * pid 0 is guaranteed to be the idle task. There is one such task with pid 0 | |
5794 | * on every CPU, so we can rely on the pid to identify the idle task. | |
5795 | */ | |
5796 | if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) { | |
6450578f | 5797 | regs = task_pt_regs(task); |
1da177e4 LT |
5798 | ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0; |
5799 | return; | |
5800 | } | |
5801 | /* | |
5802 | * if monitoring has started | |
5803 | */ | |
5804 | if (dcr_pp) { | |
5805 | dcr = ia64_getreg(_IA64_REG_CR_DCR); | |
5806 | /* | |
5807 | * context switching in? | |
5808 | */ | |
5809 | if (is_ctxswin) { | |
5810 | /* mask monitoring for the idle task */ | |
5811 | ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP); | |
5812 | pfm_clear_psr_pp(); | |
5813 | ia64_srlz_i(); | |
5814 | return; | |
5815 | } | |
5816 | /* | |
5817 | * context switching out | |
5818 | * restore monitoring for next task | |
5819 | * | |
5820 | * Due to inlining this odd if-then-else construction generates | |
5821 | * better code. | |
5822 | */ | |
5823 | ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP); | |
5824 | pfm_set_psr_pp(); | |
5825 | ia64_srlz_i(); | |
5826 | } | |
5827 | } | |
5828 | ||
5829 | #ifdef CONFIG_SMP | |
5830 | ||
5831 | static void | |
5832 | pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs) | |
5833 | { | |
5834 | struct task_struct *task = ctx->ctx_task; | |
5835 | ||
5836 | ia64_psr(regs)->up = 0; | |
5837 | ia64_psr(regs)->sp = 1; | |
5838 | ||
5839 | if (GET_PMU_OWNER() == task) { | |
5840 | DPRINT(("cleared ownership for [%d]\n", ctx->ctx_task->pid)); | |
5841 | SET_PMU_OWNER(NULL, NULL); | |
5842 | } | |
5843 | ||
5844 | /* | |
5845 | * disconnect the task from the context and vice-versa | |
5846 | */ | |
5847 | PFM_SET_WORK_PENDING(task, 0); | |
5848 | ||
5849 | task->thread.pfm_context = NULL; | |
5850 | task->thread.flags &= ~IA64_THREAD_PM_VALID; | |
5851 | ||
5852 | DPRINT(("force cleanup for [%d]\n", task->pid)); | |
5853 | } | |
5854 | ||
5855 | ||
5856 | /* | |
5857 | * in 2.6, interrupts are masked when we come here and the runqueue lock is held | |
5858 | */ | |
5859 | void | |
5860 | pfm_save_regs(struct task_struct *task) | |
5861 | { | |
5862 | pfm_context_t *ctx; | |
1da177e4 LT |
5863 | unsigned long flags; |
5864 | u64 psr; | |
5865 | ||
5866 | ||
5867 | ctx = PFM_GET_CTX(task); | |
5868 | if (ctx == NULL) return; | |
1da177e4 LT |
5869 | |
5870 | /* | |
5871 | * we always come here with interrupts ALREADY disabled by | |
5872 | * the scheduler. So we simply need to protect against concurrent | |
5873 | * access, not CPU concurrency. | |
5874 | */ | |
5875 | flags = pfm_protect_ctx_ctxsw(ctx); | |
5876 | ||
5877 | if (ctx->ctx_state == PFM_CTX_ZOMBIE) { | |
6450578f | 5878 | struct pt_regs *regs = task_pt_regs(task); |
1da177e4 LT |
5879 | |
5880 | pfm_clear_psr_up(); | |
5881 | ||
5882 | pfm_force_cleanup(ctx, regs); | |
5883 | ||
5884 | BUG_ON(ctx->ctx_smpl_hdr); | |
5885 | ||
5886 | pfm_unprotect_ctx_ctxsw(ctx, flags); | |
5887 | ||
5888 | pfm_context_free(ctx); | |
5889 | return; | |
5890 | } | |
5891 | ||
5892 | /* | |
5893 | * save current PSR: needed because we modify it | |
5894 | */ | |
5895 | ia64_srlz_d(); | |
5896 | psr = pfm_get_psr(); | |
5897 | ||
5898 | BUG_ON(psr & (IA64_PSR_I)); | |
5899 | ||
5900 | /* | |
5901 | * stop monitoring: | |
5902 | * This is the last instruction which may generate an overflow | |
5903 | * | |
5904 | * We do not need to set psr.sp because, it is irrelevant in kernel. | |
5905 | * It will be restored from ipsr when going back to user level | |
5906 | */ | |
5907 | pfm_clear_psr_up(); | |
5908 | ||
5909 | /* | |
5910 | * keep a copy of psr.up (for reload) | |
5911 | */ | |
5912 | ctx->ctx_saved_psr_up = psr & IA64_PSR_UP; | |
5913 | ||
5914 | /* | |
5915 | * release ownership of this PMU. | |
5916 | * PM interrupts are masked, so nothing | |
5917 | * can happen. | |
5918 | */ | |
5919 | SET_PMU_OWNER(NULL, NULL); | |
5920 | ||
5921 | /* | |
5922 | * we systematically save the PMD as we have no | |
5923 | * guarantee we will be schedule at that same | |
5924 | * CPU again. | |
5925 | */ | |
35589a8f | 5926 | pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]); |
1da177e4 LT |
5927 | |
5928 | /* | |
5929 | * save pmc0 ia64_srlz_d() done in pfm_save_pmds() | |
5930 | * we will need it on the restore path to check | |
5931 | * for pending overflow. | |
5932 | */ | |
35589a8f | 5933 | ctx->th_pmcs[0] = ia64_get_pmc(0); |
1da177e4 LT |
5934 | |
5935 | /* | |
5936 | * unfreeze PMU if had pending overflows | |
5937 | */ | |
35589a8f | 5938 | if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); |
1da177e4 LT |
5939 | |
5940 | /* | |
5941 | * finally, allow context access. | |
5942 | * interrupts will still be masked after this call. | |
5943 | */ | |
5944 | pfm_unprotect_ctx_ctxsw(ctx, flags); | |
5945 | } | |
5946 | ||
5947 | #else /* !CONFIG_SMP */ | |
5948 | void | |
5949 | pfm_save_regs(struct task_struct *task) | |
5950 | { | |
5951 | pfm_context_t *ctx; | |
5952 | u64 psr; | |
5953 | ||
5954 | ctx = PFM_GET_CTX(task); | |
5955 | if (ctx == NULL) return; | |
5956 | ||
5957 | /* | |
5958 | * save current PSR: needed because we modify it | |
5959 | */ | |
5960 | psr = pfm_get_psr(); | |
5961 | ||
5962 | BUG_ON(psr & (IA64_PSR_I)); | |
5963 | ||
5964 | /* | |
5965 | * stop monitoring: | |
5966 | * This is the last instruction which may generate an overflow | |
5967 | * | |
5968 | * We do not need to set psr.sp because, it is irrelevant in kernel. | |
5969 | * It will be restored from ipsr when going back to user level | |
5970 | */ | |
5971 | pfm_clear_psr_up(); | |
5972 | ||
5973 | /* | |
5974 | * keep a copy of psr.up (for reload) | |
5975 | */ | |
5976 | ctx->ctx_saved_psr_up = psr & IA64_PSR_UP; | |
5977 | } | |
5978 | ||
5979 | static void | |
5980 | pfm_lazy_save_regs (struct task_struct *task) | |
5981 | { | |
5982 | pfm_context_t *ctx; | |
1da177e4 LT |
5983 | unsigned long flags; |
5984 | ||
5985 | { u64 psr = pfm_get_psr(); | |
5986 | BUG_ON(psr & IA64_PSR_UP); | |
5987 | } | |
5988 | ||
5989 | ctx = PFM_GET_CTX(task); | |
1da177e4 LT |
5990 | |
5991 | /* | |
5992 | * we need to mask PMU overflow here to | |
5993 | * make sure that we maintain pmc0 until | |
5994 | * we save it. overflow interrupts are | |
5995 | * treated as spurious if there is no | |
5996 | * owner. | |
5997 | * | |
5998 | * XXX: I don't think this is necessary | |
5999 | */ | |
6000 | PROTECT_CTX(ctx,flags); | |
6001 | ||
6002 | /* | |
6003 | * release ownership of this PMU. | |
6004 | * must be done before we save the registers. | |
6005 | * | |
6006 | * after this call any PMU interrupt is treated | |
6007 | * as spurious. | |
6008 | */ | |
6009 | SET_PMU_OWNER(NULL, NULL); | |
6010 | ||
6011 | /* | |
6012 | * save all the pmds we use | |
6013 | */ | |
35589a8f | 6014 | pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]); |
1da177e4 LT |
6015 | |
6016 | /* | |
6017 | * save pmc0 ia64_srlz_d() done in pfm_save_pmds() | |
6018 | * it is needed to check for pended overflow | |
6019 | * on the restore path | |
6020 | */ | |
35589a8f | 6021 | ctx->th_pmcs[0] = ia64_get_pmc(0); |
1da177e4 LT |
6022 | |
6023 | /* | |
6024 | * unfreeze PMU if had pending overflows | |
6025 | */ | |
35589a8f | 6026 | if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); |
1da177e4 LT |
6027 | |
6028 | /* | |
6029 | * now get can unmask PMU interrupts, they will | |
6030 | * be treated as purely spurious and we will not | |
6031 | * lose any information | |
6032 | */ | |
6033 | UNPROTECT_CTX(ctx,flags); | |
6034 | } | |
6035 | #endif /* CONFIG_SMP */ | |
6036 | ||
6037 | #ifdef CONFIG_SMP | |
6038 | /* | |
6039 | * in 2.6, interrupts are masked when we come here and the runqueue lock is held | |
6040 | */ | |
6041 | void | |
6042 | pfm_load_regs (struct task_struct *task) | |
6043 | { | |
6044 | pfm_context_t *ctx; | |
1da177e4 LT |
6045 | unsigned long pmc_mask = 0UL, pmd_mask = 0UL; |
6046 | unsigned long flags; | |
6047 | u64 psr, psr_up; | |
6048 | int need_irq_resend; | |
6049 | ||
6050 | ctx = PFM_GET_CTX(task); | |
6051 | if (unlikely(ctx == NULL)) return; | |
6052 | ||
6053 | BUG_ON(GET_PMU_OWNER()); | |
6054 | ||
1da177e4 LT |
6055 | /* |
6056 | * possible on unload | |
6057 | */ | |
35589a8f | 6058 | if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return; |
1da177e4 LT |
6059 | |
6060 | /* | |
6061 | * we always come here with interrupts ALREADY disabled by | |
6062 | * the scheduler. So we simply need to protect against concurrent | |
6063 | * access, not CPU concurrency. | |
6064 | */ | |
6065 | flags = pfm_protect_ctx_ctxsw(ctx); | |
6066 | psr = pfm_get_psr(); | |
6067 | ||
6068 | need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND; | |
6069 | ||
6070 | BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); | |
6071 | BUG_ON(psr & IA64_PSR_I); | |
6072 | ||
6073 | if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) { | |
6450578f | 6074 | struct pt_regs *regs = task_pt_regs(task); |
1da177e4 LT |
6075 | |
6076 | BUG_ON(ctx->ctx_smpl_hdr); | |
6077 | ||
6078 | pfm_force_cleanup(ctx, regs); | |
6079 | ||
6080 | pfm_unprotect_ctx_ctxsw(ctx, flags); | |
6081 | ||
6082 | /* | |
6083 | * this one (kmalloc'ed) is fine with interrupts disabled | |
6084 | */ | |
6085 | pfm_context_free(ctx); | |
6086 | ||
6087 | return; | |
6088 | } | |
6089 | ||
6090 | /* | |
6091 | * we restore ALL the debug registers to avoid picking up | |
6092 | * stale state. | |
6093 | */ | |
6094 | if (ctx->ctx_fl_using_dbreg) { | |
6095 | pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); | |
6096 | pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); | |
6097 | } | |
6098 | /* | |
6099 | * retrieve saved psr.up | |
6100 | */ | |
6101 | psr_up = ctx->ctx_saved_psr_up; | |
6102 | ||
6103 | /* | |
6104 | * if we were the last user of the PMU on that CPU, | |
6105 | * then nothing to do except restore psr | |
6106 | */ | |
6107 | if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) { | |
6108 | ||
6109 | /* | |
6110 | * retrieve partial reload masks (due to user modifications) | |
6111 | */ | |
6112 | pmc_mask = ctx->ctx_reload_pmcs[0]; | |
6113 | pmd_mask = ctx->ctx_reload_pmds[0]; | |
6114 | ||
6115 | } else { | |
6116 | /* | |
6117 | * To avoid leaking information to the user level when psr.sp=0, | |
6118 | * we must reload ALL implemented pmds (even the ones we don't use). | |
6119 | * In the kernel we only allow PFM_READ_PMDS on registers which | |
6120 | * we initialized or requested (sampling) so there is no risk there. | |
6121 | */ | |
6122 | pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0]; | |
6123 | ||
6124 | /* | |
6125 | * ALL accessible PMCs are systematically reloaded, unused registers | |
6126 | * get their default (from pfm_reset_pmu_state()) values to avoid picking | |
6127 | * up stale configuration. | |
6128 | * | |
6129 | * PMC0 is never in the mask. It is always restored separately. | |
6130 | */ | |
6131 | pmc_mask = ctx->ctx_all_pmcs[0]; | |
6132 | } | |
6133 | /* | |
6134 | * when context is MASKED, we will restore PMC with plm=0 | |
6135 | * and PMD with stale information, but that's ok, nothing | |
6136 | * will be captured. | |
6137 | * | |
6138 | * XXX: optimize here | |
6139 | */ | |
35589a8f KA |
6140 | if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask); |
6141 | if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask); | |
1da177e4 LT |
6142 | |
6143 | /* | |
6144 | * check for pending overflow at the time the state | |
6145 | * was saved. | |
6146 | */ | |
35589a8f | 6147 | if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) { |
1da177e4 LT |
6148 | /* |
6149 | * reload pmc0 with the overflow information | |
6150 | * On McKinley PMU, this will trigger a PMU interrupt | |
6151 | */ | |
35589a8f | 6152 | ia64_set_pmc(0, ctx->th_pmcs[0]); |
1da177e4 | 6153 | ia64_srlz_d(); |
35589a8f | 6154 | ctx->th_pmcs[0] = 0UL; |
1da177e4 LT |
6155 | |
6156 | /* | |
6157 | * will replay the PMU interrupt | |
6158 | */ | |
c0ad90a3 | 6159 | if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR); |
1da177e4 LT |
6160 | |
6161 | pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; | |
6162 | } | |
6163 | ||
6164 | /* | |
6165 | * we just did a reload, so we reset the partial reload fields | |
6166 | */ | |
6167 | ctx->ctx_reload_pmcs[0] = 0UL; | |
6168 | ctx->ctx_reload_pmds[0] = 0UL; | |
6169 | ||
6170 | SET_LAST_CPU(ctx, smp_processor_id()); | |
6171 | ||
6172 | /* | |
6173 | * dump activation value for this PMU | |
6174 | */ | |
6175 | INC_ACTIVATION(); | |
6176 | /* | |
6177 | * record current activation for this context | |
6178 | */ | |
6179 | SET_ACTIVATION(ctx); | |
6180 | ||
6181 | /* | |
6182 | * establish new ownership. | |
6183 | */ | |
6184 | SET_PMU_OWNER(task, ctx); | |
6185 | ||
6186 | /* | |
6187 | * restore the psr.up bit. measurement | |
6188 | * is active again. | |
6189 | * no PMU interrupt can happen at this point | |
6190 | * because we still have interrupts disabled. | |
6191 | */ | |
6192 | if (likely(psr_up)) pfm_set_psr_up(); | |
6193 | ||
6194 | /* | |
6195 | * allow concurrent access to context | |
6196 | */ | |
6197 | pfm_unprotect_ctx_ctxsw(ctx, flags); | |
6198 | } | |
6199 | #else /* !CONFIG_SMP */ | |
6200 | /* | |
6201 | * reload PMU state for UP kernels | |
6202 | * in 2.5 we come here with interrupts disabled | |
6203 | */ | |
6204 | void | |
6205 | pfm_load_regs (struct task_struct *task) | |
6206 | { | |
1da177e4 LT |
6207 | pfm_context_t *ctx; |
6208 | struct task_struct *owner; | |
6209 | unsigned long pmd_mask, pmc_mask; | |
6210 | u64 psr, psr_up; | |
6211 | int need_irq_resend; | |
6212 | ||
6213 | owner = GET_PMU_OWNER(); | |
6214 | ctx = PFM_GET_CTX(task); | |
1da177e4 LT |
6215 | psr = pfm_get_psr(); |
6216 | ||
6217 | BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); | |
6218 | BUG_ON(psr & IA64_PSR_I); | |
6219 | ||
6220 | /* | |
6221 | * we restore ALL the debug registers to avoid picking up | |
6222 | * stale state. | |
6223 | * | |
6224 | * This must be done even when the task is still the owner | |
6225 | * as the registers may have been modified via ptrace() | |
6226 | * (not perfmon) by the previous task. | |
6227 | */ | |
6228 | if (ctx->ctx_fl_using_dbreg) { | |
6229 | pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); | |
6230 | pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); | |
6231 | } | |
6232 | ||
6233 | /* | |
6234 | * retrieved saved psr.up | |
6235 | */ | |
6236 | psr_up = ctx->ctx_saved_psr_up; | |
6237 | need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND; | |
6238 | ||
6239 | /* | |
6240 | * short path, our state is still there, just | |
6241 | * need to restore psr and we go | |
6242 | * | |
6243 | * we do not touch either PMC nor PMD. the psr is not touched | |
6244 | * by the overflow_handler. So we are safe w.r.t. to interrupt | |
6245 | * concurrency even without interrupt masking. | |
6246 | */ | |
6247 | if (likely(owner == task)) { | |
6248 | if (likely(psr_up)) pfm_set_psr_up(); | |
6249 | return; | |
6250 | } | |
6251 | ||
6252 | /* | |
6253 | * someone else is still using the PMU, first push it out and | |
6254 | * then we'll be able to install our stuff ! | |
6255 | * | |
6256 | * Upon return, there will be no owner for the current PMU | |
6257 | */ | |
6258 | if (owner) pfm_lazy_save_regs(owner); | |
6259 | ||
6260 | /* | |
6261 | * To avoid leaking information to the user level when psr.sp=0, | |
6262 | * we must reload ALL implemented pmds (even the ones we don't use). | |
6263 | * In the kernel we only allow PFM_READ_PMDS on registers which | |
6264 | * we initialized or requested (sampling) so there is no risk there. | |
6265 | */ | |
6266 | pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0]; | |
6267 | ||
6268 | /* | |
6269 | * ALL accessible PMCs are systematically reloaded, unused registers | |
6270 | * get their default (from pfm_reset_pmu_state()) values to avoid picking | |
6271 | * up stale configuration. | |
6272 | * | |
6273 | * PMC0 is never in the mask. It is always restored separately | |
6274 | */ | |
6275 | pmc_mask = ctx->ctx_all_pmcs[0]; | |
6276 | ||
35589a8f KA |
6277 | pfm_restore_pmds(ctx->th_pmds, pmd_mask); |
6278 | pfm_restore_pmcs(ctx->th_pmcs, pmc_mask); | |
1da177e4 LT |
6279 | |
6280 | /* | |
6281 | * check for pending overflow at the time the state | |
6282 | * was saved. | |
6283 | */ | |
35589a8f | 6284 | if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) { |
1da177e4 LT |
6285 | /* |
6286 | * reload pmc0 with the overflow information | |
6287 | * On McKinley PMU, this will trigger a PMU interrupt | |
6288 | */ | |
35589a8f | 6289 | ia64_set_pmc(0, ctx->th_pmcs[0]); |
1da177e4 LT |
6290 | ia64_srlz_d(); |
6291 | ||
35589a8f | 6292 | ctx->th_pmcs[0] = 0UL; |
1da177e4 LT |
6293 | |
6294 | /* | |
6295 | * will replay the PMU interrupt | |
6296 | */ | |
c0ad90a3 | 6297 | if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR); |
1da177e4 LT |
6298 | |
6299 | pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; | |
6300 | } | |
6301 | ||
6302 | /* | |
6303 | * establish new ownership. | |
6304 | */ | |
6305 | SET_PMU_OWNER(task, ctx); | |
6306 | ||
6307 | /* | |
6308 | * restore the psr.up bit. measurement | |
6309 | * is active again. | |
6310 | * no PMU interrupt can happen at this point | |
6311 | * because we still have interrupts disabled. | |
6312 | */ | |
6313 | if (likely(psr_up)) pfm_set_psr_up(); | |
6314 | } | |
6315 | #endif /* CONFIG_SMP */ | |
6316 | ||
6317 | /* | |
6318 | * this function assumes monitoring is stopped | |
6319 | */ | |
6320 | static void | |
6321 | pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) | |
6322 | { | |
6323 | u64 pmc0; | |
6324 | unsigned long mask2, val, pmd_val, ovfl_val; | |
6325 | int i, can_access_pmu = 0; | |
6326 | int is_self; | |
6327 | ||
6328 | /* | |
6329 | * is the caller the task being monitored (or which initiated the | |
6330 | * session for system wide measurements) | |
6331 | */ | |
6332 | is_self = ctx->ctx_task == task ? 1 : 0; | |
6333 | ||
6334 | /* | |
6335 | * can access PMU is task is the owner of the PMU state on the current CPU | |
6336 | * or if we are running on the CPU bound to the context in system-wide mode | |
6337 | * (that is not necessarily the task the context is attached to in this mode). | |
6338 | * In system-wide we always have can_access_pmu true because a task running on an | |
6339 | * invalid processor is flagged earlier in the call stack (see pfm_stop). | |
6340 | */ | |
6341 | can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id()); | |
6342 | if (can_access_pmu) { | |
6343 | /* | |
6344 | * Mark the PMU as not owned | |
6345 | * This will cause the interrupt handler to do nothing in case an overflow | |
6346 | * interrupt was in-flight | |
6347 | * This also guarantees that pmc0 will contain the final state | |
6348 | * It virtually gives us full control on overflow processing from that point | |
6349 | * on. | |
6350 | */ | |
6351 | SET_PMU_OWNER(NULL, NULL); | |
6352 | DPRINT(("releasing ownership\n")); | |
6353 | ||
6354 | /* | |
6355 | * read current overflow status: | |
6356 | * | |
6357 | * we are guaranteed to read the final stable state | |
6358 | */ | |
6359 | ia64_srlz_d(); | |
6360 | pmc0 = ia64_get_pmc(0); /* slow */ | |
6361 | ||
6362 | /* | |
6363 | * reset freeze bit, overflow status information destroyed | |
6364 | */ | |
6365 | pfm_unfreeze_pmu(); | |
6366 | } else { | |
35589a8f | 6367 | pmc0 = ctx->th_pmcs[0]; |
1da177e4 LT |
6368 | /* |
6369 | * clear whatever overflow status bits there were | |
6370 | */ | |
35589a8f | 6371 | ctx->th_pmcs[0] = 0; |
1da177e4 LT |
6372 | } |
6373 | ovfl_val = pmu_conf->ovfl_val; | |
6374 | /* | |
6375 | * we save all the used pmds | |
6376 | * we take care of overflows for counting PMDs | |
6377 | * | |
6378 | * XXX: sampling situation is not taken into account here | |
6379 | */ | |
6380 | mask2 = ctx->ctx_used_pmds[0]; | |
6381 | ||
6382 | DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2)); | |
6383 | ||
6384 | for (i = 0; mask2; i++, mask2>>=1) { | |
6385 | ||
6386 | /* skip non used pmds */ | |
6387 | if ((mask2 & 0x1) == 0) continue; | |
6388 | ||
6389 | /* | |
6390 | * can access PMU always true in system wide mode | |
6391 | */ | |
35589a8f | 6392 | val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i]; |
1da177e4 LT |
6393 | |
6394 | if (PMD_IS_COUNTING(i)) { | |
6395 | DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n", | |
6396 | task->pid, | |
6397 | i, | |
6398 | ctx->ctx_pmds[i].val, | |
6399 | val & ovfl_val)); | |
6400 | ||
6401 | /* | |
6402 | * we rebuild the full 64 bit value of the counter | |
6403 | */ | |
6404 | val = ctx->ctx_pmds[i].val + (val & ovfl_val); | |
6405 | ||
6406 | /* | |
6407 | * now everything is in ctx_pmds[] and we need | |
6408 | * to clear the saved context from save_regs() such that | |
6409 | * pfm_read_pmds() gets the correct value | |
6410 | */ | |
6411 | pmd_val = 0UL; | |
6412 | ||
6413 | /* | |
6414 | * take care of overflow inline | |
6415 | */ | |
6416 | if (pmc0 & (1UL << i)) { | |
6417 | val += 1 + ovfl_val; | |
6418 | DPRINT(("[%d] pmd[%d] overflowed\n", task->pid, i)); | |
6419 | } | |
6420 | } | |
6421 | ||
6422 | DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val)); | |
6423 | ||
35589a8f | 6424 | if (is_self) ctx->th_pmds[i] = pmd_val; |
1da177e4 LT |
6425 | |
6426 | ctx->ctx_pmds[i].val = val; | |
6427 | } | |
6428 | } | |
6429 | ||
6430 | static struct irqaction perfmon_irqaction = { | |
6431 | .handler = pfm_interrupt_handler, | |
121a4226 | 6432 | .flags = IRQF_DISABLED, |
1da177e4 LT |
6433 | .name = "perfmon" |
6434 | }; | |
6435 | ||
a1ecf7f6 TL |
6436 | static void |
6437 | pfm_alt_save_pmu_state(void *data) | |
6438 | { | |
6439 | struct pt_regs *regs; | |
6440 | ||
6450578f | 6441 | regs = task_pt_regs(current); |
a1ecf7f6 TL |
6442 | |
6443 | DPRINT(("called\n")); | |
6444 | ||
6445 | /* | |
6446 | * should not be necessary but | |
6447 | * let's take not risk | |
6448 | */ | |
6449 | pfm_clear_psr_up(); | |
6450 | pfm_clear_psr_pp(); | |
6451 | ia64_psr(regs)->pp = 0; | |
6452 | ||
6453 | /* | |
6454 | * This call is required | |
6455 | * May cause a spurious interrupt on some processors | |
6456 | */ | |
6457 | pfm_freeze_pmu(); | |
6458 | ||
6459 | ia64_srlz_d(); | |
6460 | } | |
6461 | ||
6462 | void | |
6463 | pfm_alt_restore_pmu_state(void *data) | |
6464 | { | |
6465 | struct pt_regs *regs; | |
6466 | ||
6450578f | 6467 | regs = task_pt_regs(current); |
a1ecf7f6 TL |
6468 | |
6469 | DPRINT(("called\n")); | |
6470 | ||
6471 | /* | |
6472 | * put PMU back in state expected | |
6473 | * by perfmon | |
6474 | */ | |
6475 | pfm_clear_psr_up(); | |
6476 | pfm_clear_psr_pp(); | |
6477 | ia64_psr(regs)->pp = 0; | |
6478 | ||
6479 | /* | |
6480 | * perfmon runs with PMU unfrozen at all times | |
6481 | */ | |
6482 | pfm_unfreeze_pmu(); | |
6483 | ||
6484 | ia64_srlz_d(); | |
6485 | } | |
6486 | ||
6487 | int | |
6488 | pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | |
6489 | { | |
6490 | int ret, i; | |
6491 | int reserve_cpu; | |
6492 | ||
6493 | /* some sanity checks */ | |
6494 | if (hdl == NULL || hdl->handler == NULL) return -EINVAL; | |
6495 | ||
6496 | /* do the easy test first */ | |
6497 | if (pfm_alt_intr_handler) return -EBUSY; | |
6498 | ||
6499 | /* one at a time in the install or remove, just fail the others */ | |
6500 | if (!spin_trylock(&pfm_alt_install_check)) { | |
6501 | return -EBUSY; | |
6502 | } | |
6503 | ||
6504 | /* reserve our session */ | |
6505 | for_each_online_cpu(reserve_cpu) { | |
6506 | ret = pfm_reserve_session(NULL, 1, reserve_cpu); | |
6507 | if (ret) goto cleanup_reserve; | |
6508 | } | |
6509 | ||
6510 | /* save the current system wide pmu states */ | |
6511 | ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1); | |
6512 | if (ret) { | |
6513 | DPRINT(("on_each_cpu() failed: %d\n", ret)); | |
6514 | goto cleanup_reserve; | |
6515 | } | |
6516 | ||
6517 | /* officially change to the alternate interrupt handler */ | |
6518 | pfm_alt_intr_handler = hdl; | |
6519 | ||
6520 | spin_unlock(&pfm_alt_install_check); | |
6521 | ||
6522 | return 0; | |
6523 | ||
6524 | cleanup_reserve: | |
6525 | for_each_online_cpu(i) { | |
6526 | /* don't unreserve more than we reserved */ | |
6527 | if (i >= reserve_cpu) break; | |
6528 | ||
6529 | pfm_unreserve_session(NULL, 1, i); | |
6530 | } | |
6531 | ||
6532 | spin_unlock(&pfm_alt_install_check); | |
6533 | ||
6534 | return ret; | |
6535 | } | |
6536 | EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt); | |
6537 | ||
6538 | int | |
6539 | pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | |
6540 | { | |
6541 | int i; | |
6542 | int ret; | |
6543 | ||
6544 | if (hdl == NULL) return -EINVAL; | |
6545 | ||
6546 | /* cannot remove someone else's handler! */ | |
6547 | if (pfm_alt_intr_handler != hdl) return -EINVAL; | |
6548 | ||
6549 | /* one at a time in the install or remove, just fail the others */ | |
6550 | if (!spin_trylock(&pfm_alt_install_check)) { | |
6551 | return -EBUSY; | |
6552 | } | |
6553 | ||
6554 | pfm_alt_intr_handler = NULL; | |
6555 | ||
6556 | ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1); | |
6557 | if (ret) { | |
6558 | DPRINT(("on_each_cpu() failed: %d\n", ret)); | |
6559 | } | |
6560 | ||
6561 | for_each_online_cpu(i) { | |
6562 | pfm_unreserve_session(NULL, 1, i); | |
6563 | } | |
6564 | ||
6565 | spin_unlock(&pfm_alt_install_check); | |
6566 | ||
6567 | return 0; | |
6568 | } | |
6569 | EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt); | |
6570 | ||
1da177e4 LT |
6571 | /* |
6572 | * perfmon initialization routine, called from the initcall() table | |
6573 | */ | |
6574 | static int init_pfm_fs(void); | |
6575 | ||
6576 | static int __init | |
6577 | pfm_probe_pmu(void) | |
6578 | { | |
6579 | pmu_config_t **p; | |
6580 | int family; | |
6581 | ||
6582 | family = local_cpu_data->family; | |
6583 | p = pmu_confs; | |
6584 | ||
6585 | while(*p) { | |
6586 | if ((*p)->probe) { | |
6587 | if ((*p)->probe() == 0) goto found; | |
6588 | } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) { | |
6589 | goto found; | |
6590 | } | |
6591 | p++; | |
6592 | } | |
6593 | return -1; | |
6594 | found: | |
6595 | pmu_conf = *p; | |
6596 | return 0; | |
6597 | } | |
6598 | ||
5dfe4c96 | 6599 | static const struct file_operations pfm_proc_fops = { |
1da177e4 LT |
6600 | .open = pfm_proc_open, |
6601 | .read = seq_read, | |
6602 | .llseek = seq_lseek, | |
6603 | .release = seq_release, | |
6604 | }; | |
6605 | ||
6606 | int __init | |
6607 | pfm_init(void) | |
6608 | { | |
6609 | unsigned int n, n_counters, i; | |
6610 | ||
6611 | printk("perfmon: version %u.%u IRQ %u\n", | |
6612 | PFM_VERSION_MAJ, | |
6613 | PFM_VERSION_MIN, | |
6614 | IA64_PERFMON_VECTOR); | |
6615 | ||
6616 | if (pfm_probe_pmu()) { | |
6617 | printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n", | |
6618 | local_cpu_data->family); | |
6619 | return -ENODEV; | |
6620 | } | |
6621 | ||
6622 | /* | |
6623 | * compute the number of implemented PMD/PMC from the | |
6624 | * description tables | |
6625 | */ | |
6626 | n = 0; | |
6627 | for (i=0; PMC_IS_LAST(i) == 0; i++) { | |
6628 | if (PMC_IS_IMPL(i) == 0) continue; | |
6629 | pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63); | |
6630 | n++; | |
6631 | } | |
6632 | pmu_conf->num_pmcs = n; | |
6633 | ||
6634 | n = 0; n_counters = 0; | |
6635 | for (i=0; PMD_IS_LAST(i) == 0; i++) { | |
6636 | if (PMD_IS_IMPL(i) == 0) continue; | |
6637 | pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63); | |
6638 | n++; | |
6639 | if (PMD_IS_COUNTING(i)) n_counters++; | |
6640 | } | |
6641 | pmu_conf->num_pmds = n; | |
6642 | pmu_conf->num_counters = n_counters; | |
6643 | ||
6644 | /* | |
6645 | * sanity checks on the number of debug registers | |
6646 | */ | |
6647 | if (pmu_conf->use_rr_dbregs) { | |
6648 | if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) { | |
6649 | printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs); | |
6650 | pmu_conf = NULL; | |
6651 | return -1; | |
6652 | } | |
6653 | if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) { | |
6654 | printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs); | |
6655 | pmu_conf = NULL; | |
6656 | return -1; | |
6657 | } | |
6658 | } | |
6659 | ||
6660 | printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n", | |
6661 | pmu_conf->pmu_name, | |
6662 | pmu_conf->num_pmcs, | |
6663 | pmu_conf->num_pmds, | |
6664 | pmu_conf->num_counters, | |
6665 | ffz(pmu_conf->ovfl_val)); | |
6666 | ||
6667 | /* sanity check */ | |
35589a8f | 6668 | if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) { |
1da177e4 LT |
6669 | printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n"); |
6670 | pmu_conf = NULL; | |
6671 | return -1; | |
6672 | } | |
6673 | ||
6674 | /* | |
6675 | * create /proc/perfmon (mostly for debugging purposes) | |
6676 | */ | |
6677 | perfmon_dir = create_proc_entry("perfmon", S_IRUGO, NULL); | |
6678 | if (perfmon_dir == NULL) { | |
6679 | printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n"); | |
6680 | pmu_conf = NULL; | |
6681 | return -1; | |
6682 | } | |
6683 | /* | |
6684 | * install customized file operations for /proc/perfmon entry | |
6685 | */ | |
6686 | perfmon_dir->proc_fops = &pfm_proc_fops; | |
6687 | ||
6688 | /* | |
6689 | * create /proc/sys/kernel/perfmon (for debugging purposes) | |
6690 | */ | |
6691 | pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root, 0); | |
6692 | ||
6693 | /* | |
6694 | * initialize all our spinlocks | |
6695 | */ | |
6696 | spin_lock_init(&pfm_sessions.pfs_lock); | |
6697 | spin_lock_init(&pfm_buffer_fmt_lock); | |
6698 | ||
6699 | init_pfm_fs(); | |
6700 | ||
6701 | for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL; | |
6702 | ||
6703 | return 0; | |
6704 | } | |
6705 | ||
6706 | __initcall(pfm_init); | |
6707 | ||
6708 | /* | |
6709 | * this function is called before pfm_init() | |
6710 | */ | |
6711 | void | |
6712 | pfm_init_percpu (void) | |
6713 | { | |
ff741906 | 6714 | static int first_time=1; |
1da177e4 LT |
6715 | /* |
6716 | * make sure no measurement is active | |
6717 | * (may inherit programmed PMCs from EFI). | |
6718 | */ | |
6719 | pfm_clear_psr_pp(); | |
6720 | pfm_clear_psr_up(); | |
6721 | ||
6722 | /* | |
6723 | * we run with the PMU not frozen at all times | |
6724 | */ | |
6725 | pfm_unfreeze_pmu(); | |
6726 | ||
ff741906 | 6727 | if (first_time) { |
1da177e4 | 6728 | register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction); |
ff741906 AR |
6729 | first_time=0; |
6730 | } | |
1da177e4 LT |
6731 | |
6732 | ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR); | |
6733 | ia64_srlz_d(); | |
6734 | } | |
6735 | ||
6736 | /* | |
6737 | * used for debug purposes only | |
6738 | */ | |
6739 | void | |
6740 | dump_pmu_state(const char *from) | |
6741 | { | |
6742 | struct task_struct *task; | |
1da177e4 LT |
6743 | struct pt_regs *regs; |
6744 | pfm_context_t *ctx; | |
6745 | unsigned long psr, dcr, info, flags; | |
6746 | int i, this_cpu; | |
6747 | ||
6748 | local_irq_save(flags); | |
6749 | ||
6750 | this_cpu = smp_processor_id(); | |
6450578f | 6751 | regs = task_pt_regs(current); |
1da177e4 LT |
6752 | info = PFM_CPUINFO_GET(); |
6753 | dcr = ia64_getreg(_IA64_REG_CR_DCR); | |
6754 | ||
6755 | if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) { | |
6756 | local_irq_restore(flags); | |
6757 | return; | |
6758 | } | |
6759 | ||
6760 | printk("CPU%d from %s() current [%d] iip=0x%lx %s\n", | |
6761 | this_cpu, | |
6762 | from, | |
6763 | current->pid, | |
6764 | regs->cr_iip, | |
6765 | current->comm); | |
6766 | ||
6767 | task = GET_PMU_OWNER(); | |
6768 | ctx = GET_PMU_CTX(); | |
6769 | ||
6770 | printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task->pid : -1, ctx); | |
6771 | ||
6772 | psr = pfm_get_psr(); | |
6773 | ||
6774 | printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n", | |
6775 | this_cpu, | |
6776 | ia64_get_pmc(0), | |
6777 | psr & IA64_PSR_PP ? 1 : 0, | |
6778 | psr & IA64_PSR_UP ? 1 : 0, | |
6779 | dcr & IA64_DCR_PP ? 1 : 0, | |
6780 | info, | |
6781 | ia64_psr(regs)->up, | |
6782 | ia64_psr(regs)->pp); | |
6783 | ||
6784 | ia64_psr(regs)->up = 0; | |
6785 | ia64_psr(regs)->pp = 0; | |
6786 | ||
1da177e4 LT |
6787 | for (i=1; PMC_IS_LAST(i) == 0; i++) { |
6788 | if (PMC_IS_IMPL(i) == 0) continue; | |
35589a8f | 6789 | printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]); |
1da177e4 LT |
6790 | } |
6791 | ||
6792 | for (i=1; PMD_IS_LAST(i) == 0; i++) { | |
6793 | if (PMD_IS_IMPL(i) == 0) continue; | |
35589a8f | 6794 | printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]); |
1da177e4 LT |
6795 | } |
6796 | ||
6797 | if (ctx) { | |
6798 | printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n", | |
6799 | this_cpu, | |
6800 | ctx->ctx_state, | |
6801 | ctx->ctx_smpl_vaddr, | |
6802 | ctx->ctx_smpl_hdr, | |
6803 | ctx->ctx_msgq_head, | |
6804 | ctx->ctx_msgq_tail, | |
6805 | ctx->ctx_saved_psr_up); | |
6806 | } | |
6807 | local_irq_restore(flags); | |
6808 | } | |
6809 | ||
6810 | /* | |
6811 | * called from process.c:copy_thread(). task is new child. | |
6812 | */ | |
6813 | void | |
6814 | pfm_inherit(struct task_struct *task, struct pt_regs *regs) | |
6815 | { | |
6816 | struct thread_struct *thread; | |
6817 | ||
6818 | DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task->pid)); | |
6819 | ||
6820 | thread = &task->thread; | |
6821 | ||
6822 | /* | |
6823 | * cut links inherited from parent (current) | |
6824 | */ | |
6825 | thread->pfm_context = NULL; | |
6826 | ||
6827 | PFM_SET_WORK_PENDING(task, 0); | |
6828 | ||
6829 | /* | |
6830 | * the psr bits are already set properly in copy_threads() | |
6831 | */ | |
6832 | } | |
6833 | #else /* !CONFIG_PERFMON */ | |
6834 | asmlinkage long | |
6835 | sys_perfmonctl (int fd, int cmd, void *arg, int count) | |
6836 | { | |
6837 | return -ENOSYS; | |
6838 | } | |
6839 | #endif /* CONFIG_PERFMON */ |