Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski...
[linux-2.6-block.git] / arch / x86 / events / msr.c
CommitLineData
b7b7c782
AL
1#include <linux/perf_event.h>
2
3enum perf_msr_id {
4 PERF_MSR_TSC = 0,
5 PERF_MSR_APERF = 1,
6 PERF_MSR_MPERF = 2,
7 PERF_MSR_PPERF = 3,
8 PERF_MSR_SMI = 4,
8a224261 9 PERF_MSR_PTSC = 5,
aaf24884 10 PERF_MSR_IRPERF = 6,
b7b7c782
AL
11
12 PERF_MSR_EVENT_MAX,
13};
14
7e5560a5 15static bool test_aperfmperf(int idx)
19b3340c
PZ
16{
17 return boot_cpu_has(X86_FEATURE_APERFMPERF);
18}
19
8a224261
HR
20static bool test_ptsc(int idx)
21{
22 return boot_cpu_has(X86_FEATURE_PTSC);
23}
24
aaf24884
HR
25static bool test_irperf(int idx)
26{
27 return boot_cpu_has(X86_FEATURE_IRPERF);
28}
29
7e5560a5 30static bool test_intel(int idx)
19b3340c
PZ
31{
32 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
33 boot_cpu_data.x86 != 6)
34 return false;
35
36 switch (boot_cpu_data.x86_model) {
37 case 30: /* 45nm Nehalem */
38 case 26: /* 45nm Nehalem-EP */
39 case 46: /* 45nm Nehalem-EX */
40
41 case 37: /* 32nm Westmere */
42 case 44: /* 32nm Westmere-EP */
43 case 47: /* 32nm Westmere-EX */
44
45 case 42: /* 32nm SandyBridge */
46 case 45: /* 32nm SandyBridge-E/EN/EP */
47
48 case 58: /* 22nm IvyBridge */
49 case 62: /* 22nm IvyBridge-EP/EX */
50
51 case 60: /* 22nm Haswell Core */
52 case 63: /* 22nm Haswell Server */
53 case 69: /* 22nm Haswell ULT */
54 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
55
56 case 61: /* 14nm Broadwell Core-M */
57 case 86: /* 14nm Broadwell Xeon D */
58 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
59 case 79: /* 14nm Broadwell Server */
60
61 case 55: /* 22nm Atom "Silvermont" */
62 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
63 case 76: /* 14nm Atom "Airmont" */
64 if (idx == PERF_MSR_SMI)
65 return true;
66 break;
67
68 case 78: /* 14nm Skylake Mobile */
69 case 94: /* 14nm Skylake Desktop */
70 if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
71 return true;
72 break;
73 }
74
75 return false;
76}
77
b7b7c782 78struct perf_msr {
b7b7c782 79 u64 msr;
19b3340c
PZ
80 struct perf_pmu_events_attr *attr;
81 bool (*test)(int idx);
b7b7c782
AL
82};
83
aaf24884
HR
84PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00");
85PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01");
86PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02");
87PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03");
88PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04");
89PMU_EVENT_ATTR_STRING(ptsc, evattr_ptsc, "event=0x05");
90PMU_EVENT_ATTR_STRING(irperf, evattr_irperf, "event=0x06");
b7b7c782 91
19b3340c 92static struct perf_msr msr[] = {
aaf24884
HR
93 [PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, },
94 [PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, },
95 [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, },
96 [PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, },
97 [PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, },
8a224261 98 [PERF_MSR_PTSC] = { MSR_F15H_PTSC, &evattr_ptsc, test_ptsc, },
aaf24884 99 [PERF_MSR_IRPERF] = { MSR_F17H_IRPERF, &evattr_irperf, test_irperf, },
19b3340c
PZ
100};
101
b7b7c782 102static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
19b3340c 103 NULL,
b7b7c782
AL
104};
105
106static struct attribute_group events_attr_group = {
107 .name = "events",
108 .attrs = events_attrs,
109};
110
111PMU_FORMAT_ATTR(event, "config:0-63");
112static struct attribute *format_attrs[] = {
113 &format_attr_event.attr,
114 NULL,
115};
116static struct attribute_group format_attr_group = {
117 .name = "format",
118 .attrs = format_attrs,
119};
120
121static const struct attribute_group *attr_groups[] = {
122 &events_attr_group,
123 &format_attr_group,
124 NULL,
125};
126
127static int msr_event_init(struct perf_event *event)
128{
129 u64 cfg = event->attr.config;
130
131 if (event->attr.type != event->pmu->type)
132 return -ENOENT;
133
134 if (cfg >= PERF_MSR_EVENT_MAX)
135 return -EINVAL;
136
137 /* unsupported modes and filters */
138 if (event->attr.exclude_user ||
139 event->attr.exclude_kernel ||
140 event->attr.exclude_hv ||
141 event->attr.exclude_idle ||
142 event->attr.exclude_host ||
143 event->attr.exclude_guest ||
144 event->attr.sample_period) /* no sampling */
145 return -EINVAL;
146
19b3340c
PZ
147 if (!msr[cfg].attr)
148 return -EINVAL;
149
b7b7c782
AL
150 event->hw.idx = -1;
151 event->hw.event_base = msr[cfg].msr;
152 event->hw.config = cfg;
153
154 return 0;
155}
156
157static inline u64 msr_read_counter(struct perf_event *event)
158{
159 u64 now;
160
161 if (event->hw.event_base)
162 rdmsrl(event->hw.event_base, now);
163 else
82819ffb 164 rdtscll(now);
b7b7c782
AL
165
166 return now;
167}
168static void msr_event_update(struct perf_event *event)
169{
170 u64 prev, now;
171 s64 delta;
172
173 /* Careful, an NMI might modify the previous event value. */
174again:
175 prev = local64_read(&event->hw.prev_count);
176 now = msr_read_counter(event);
177
178 if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
179 goto again;
180
181 delta = now - prev;
78e3c795
MK
182 if (unlikely(event->hw.event_base == MSR_SMI_COUNT))
183 delta = sign_extend64(delta, 31);
184
3c3116b7 185 local64_add(delta, &event->count);
b7b7c782
AL
186}
187
188static void msr_event_start(struct perf_event *event, int flags)
189{
190 u64 now;
191
192 now = msr_read_counter(event);
193 local64_set(&event->hw.prev_count, now);
194}
195
196static void msr_event_stop(struct perf_event *event, int flags)
197{
198 msr_event_update(event);
199}
200
201static void msr_event_del(struct perf_event *event, int flags)
202{
203 msr_event_stop(event, PERF_EF_UPDATE);
204}
205
206static int msr_event_add(struct perf_event *event, int flags)
207{
208 if (flags & PERF_EF_START)
209 msr_event_start(event, flags);
210
211 return 0;
212}
213
214static struct pmu pmu_msr = {
215 .task_ctx_nr = perf_sw_context,
216 .attr_groups = attr_groups,
217 .event_init = msr_event_init,
218 .add = msr_event_add,
219 .del = msr_event_del,
220 .start = msr_event_start,
221 .stop = msr_event_stop,
222 .read = msr_event_update,
223 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
224};
225
b7b7c782
AL
226static int __init msr_init(void)
227{
19b3340c 228 int i, j = 0;
b7b7c782 229
19b3340c
PZ
230 if (!boot_cpu_has(X86_FEATURE_TSC)) {
231 pr_cont("no MSR PMU driver.\n");
232 return 0;
b7b7c782
AL
233 }
234
19b3340c
PZ
235 /* Probe the MSRs. */
236 for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) {
237 u64 val;
b7b7c782 238
19b3340c
PZ
239 /*
240 * Virt sucks arse; you cannot tell if a R/O MSR is present :/
241 */
242 if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
243 msr[i].attr = NULL;
b7b7c782
AL
244 }
245
19b3340c
PZ
246 /* List remaining MSRs in the sysfs attrs. */
247 for (i = 0; i < PERF_MSR_EVENT_MAX; i++) {
248 if (msr[i].attr)
249 events_attrs[j++] = &msr[i].attr->attr.attr;
b7b7c782 250 }
19b3340c 251 events_attrs[j] = NULL;
b7b7c782
AL
252
253 perf_pmu_register(&pmu_msr, "msr", -1);
254
255 return 0;
256}
257device_initcall(msr_init);