perf/x86: Add an MSR PMU driver
[linux-2.6-block.git] / arch / x86 / kernel / cpu / perf_event_msr.c
CommitLineData
b7b7c782
AL
1#include <linux/perf_event.h>
2
3enum perf_msr_id {
4 PERF_MSR_TSC = 0,
5 PERF_MSR_APERF = 1,
6 PERF_MSR_MPERF = 2,
7 PERF_MSR_PPERF = 3,
8 PERF_MSR_SMI = 4,
9
10 PERF_MSR_EVENT_MAX,
11};
12
13struct perf_msr {
14 int id;
15 u64 msr;
16};
17
18static struct perf_msr msr[] = {
19 { PERF_MSR_TSC, 0 },
20 { PERF_MSR_APERF, MSR_IA32_APERF },
21 { PERF_MSR_MPERF, MSR_IA32_MPERF },
22 { PERF_MSR_PPERF, MSR_PPERF },
23 { PERF_MSR_SMI, MSR_SMI_COUNT },
24};
25
26PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00");
27PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01");
28PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02");
29PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03");
30PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04");
31
32static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
33 &evattr_tsc.attr.attr,
34};
35
36static struct attribute_group events_attr_group = {
37 .name = "events",
38 .attrs = events_attrs,
39};
40
41PMU_FORMAT_ATTR(event, "config:0-63");
42static struct attribute *format_attrs[] = {
43 &format_attr_event.attr,
44 NULL,
45};
46static struct attribute_group format_attr_group = {
47 .name = "format",
48 .attrs = format_attrs,
49};
50
51static const struct attribute_group *attr_groups[] = {
52 &events_attr_group,
53 &format_attr_group,
54 NULL,
55};
56
57static int msr_event_init(struct perf_event *event)
58{
59 u64 cfg = event->attr.config;
60
61 if (event->attr.type != event->pmu->type)
62 return -ENOENT;
63
64 if (cfg >= PERF_MSR_EVENT_MAX)
65 return -EINVAL;
66
67 /* unsupported modes and filters */
68 if (event->attr.exclude_user ||
69 event->attr.exclude_kernel ||
70 event->attr.exclude_hv ||
71 event->attr.exclude_idle ||
72 event->attr.exclude_host ||
73 event->attr.exclude_guest ||
74 event->attr.sample_period) /* no sampling */
75 return -EINVAL;
76
77 event->hw.idx = -1;
78 event->hw.event_base = msr[cfg].msr;
79 event->hw.config = cfg;
80
81 return 0;
82}
83
84static inline u64 msr_read_counter(struct perf_event *event)
85{
86 u64 now;
87
88 if (event->hw.event_base)
89 rdmsrl(event->hw.event_base, now);
90 else
91 now = rdtsc();
92
93 return now;
94}
95static void msr_event_update(struct perf_event *event)
96{
97 u64 prev, now;
98 s64 delta;
99
100 /* Careful, an NMI might modify the previous event value. */
101again:
102 prev = local64_read(&event->hw.prev_count);
103 now = msr_read_counter(event);
104
105 if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
106 goto again;
107
108 delta = now - prev;
109 if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) {
110 delta <<= 32;
111 delta >>= 32; /* sign extend */
112 }
113 local64_add(now - prev, &event->count);
114}
115
116static void msr_event_start(struct perf_event *event, int flags)
117{
118 u64 now;
119
120 now = msr_read_counter(event);
121 local64_set(&event->hw.prev_count, now);
122}
123
124static void msr_event_stop(struct perf_event *event, int flags)
125{
126 msr_event_update(event);
127}
128
129static void msr_event_del(struct perf_event *event, int flags)
130{
131 msr_event_stop(event, PERF_EF_UPDATE);
132}
133
134static int msr_event_add(struct perf_event *event, int flags)
135{
136 if (flags & PERF_EF_START)
137 msr_event_start(event, flags);
138
139 return 0;
140}
141
142static struct pmu pmu_msr = {
143 .task_ctx_nr = perf_sw_context,
144 .attr_groups = attr_groups,
145 .event_init = msr_event_init,
146 .add = msr_event_add,
147 .del = msr_event_del,
148 .start = msr_event_start,
149 .stop = msr_event_stop,
150 .read = msr_event_update,
151 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
152};
153
154static int __init intel_msr_init(int idx)
155{
156 if (boot_cpu_data.x86 != 6)
157 return 0;
158
159 switch (boot_cpu_data.x86_model) {
160 case 30: /* 45nm Nehalem */
161 case 26: /* 45nm Nehalem-EP */
162 case 46: /* 45nm Nehalem-EX */
163
164 case 37: /* 32nm Westmere */
165 case 44: /* 32nm Westmere-EP */
166 case 47: /* 32nm Westmere-EX */
167
168 case 42: /* 32nm SandyBridge */
169 case 45: /* 32nm SandyBridge-E/EN/EP */
170
171 case 58: /* 22nm IvyBridge */
172 case 62: /* 22nm IvyBridge-EP/EX */
173
174 case 60: /* 22nm Haswell Core */
175 case 63: /* 22nm Haswell Server */
176 case 69: /* 22nm Haswell ULT */
177 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
178
179 case 61: /* 14nm Broadwell Core-M */
180 case 86: /* 14nm Broadwell Xeon D */
181 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
182 case 79: /* 14nm Broadwell Server */
183 events_attrs[idx++] = &evattr_smi.attr.attr;
184 break;
185
186 case 78: /* 14nm Skylake Mobile */
187 case 94: /* 14nm Skylake Desktop */
188 events_attrs[idx++] = &evattr_pperf.attr.attr;
189 events_attrs[idx++] = &evattr_smi.attr.attr;
190 break;
191
192 case 55: /* 22nm Atom "Silvermont" */
193 case 76: /* 14nm Atom "Airmont" */
194 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
195 events_attrs[idx++] = &evattr_smi.attr.attr;
196 break;
197 }
198
199 events_attrs[idx] = NULL;
200
201 return 0;
202}
203
204static int __init amd_msr_init(int idx)
205{
206 return 0;
207}
208
209static int __init msr_init(void)
210{
211 int err;
212 int idx = 1;
213
214 if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
215 events_attrs[idx++] = &evattr_aperf.attr.attr;
216 events_attrs[idx++] = &evattr_mperf.attr.attr;
217 events_attrs[idx] = NULL;
218 }
219
220 switch (boot_cpu_data.x86_vendor) {
221 case X86_VENDOR_INTEL:
222 err = intel_msr_init(idx);
223 break;
224
225 case X86_VENDOR_AMD:
226 err = amd_msr_init(idx);
227 break;
228
229 default:
230 err = -ENOTSUPP;
231 }
232
233 if (err != 0) {
234 pr_cont("no msr PMU driver.\n");
235 return 0;
236 }
237
238 perf_pmu_register(&pmu_msr, "msr", -1);
239
240 return 0;
241}
242device_initcall(msr_init);