Commit | Line | Data |
---|---|---|
eec688e1 RB |
1 | /* |
2 | * Copyright © 2015-2016 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Robert Bragg <robert@sixbynine.org> | |
25 | */ | |
26 | ||
7abbd8d6 RB |
27 | |
28 | /** | |
16d98b31 | 29 | * DOC: i915 Perf Overview |
7abbd8d6 RB |
30 | * |
31 | * Gen graphics supports a large number of performance counters that can help | |
32 | * driver and application developers understand and optimize their use of the | |
33 | * GPU. | |
34 | * | |
35 | * This i915 perf interface enables userspace to configure and open a file | |
36 | * descriptor representing a stream of GPU metrics which can then be read() as | |
37 | * a stream of sample records. | |
38 | * | |
39 | * The interface is particularly suited to exposing buffered metrics that are | |
40 | * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU. | |
41 | * | |
42 | * Streams representing a single context are accessible to applications with a | |
43 | * corresponding drm file descriptor, such that OpenGL can use the interface | |
44 | * without special privileges. Access to system-wide metrics requires root | |
45 | * privileges by default, unless changed via the dev.i915.perf_event_paranoid | |
46 | * sysctl option. | |
47 | * | |
16d98b31 RB |
48 | */ |
49 | ||
50 | /** | |
51 | * DOC: i915 Perf History and Comparison with Core Perf | |
7abbd8d6 RB |
52 | * |
53 | * The interface was initially inspired by the core Perf infrastructure but | |
54 | * some notable differences are: | |
55 | * | |
56 | * i915 perf file descriptors represent a "stream" instead of an "event"; where | |
57 | * a perf event primarily corresponds to a single 64bit value, while a stream | |
58 | * might sample sets of tightly-coupled counters, depending on the | |
59 | * configuration. For example the Gen OA unit isn't designed to support | |
60 | * orthogonal configurations of individual counters; it's configured for a set | |
61 | * of related counters. Samples for an i915 perf stream capturing OA metrics | |
62 | * will include a set of counter values packed in a compact HW specific format. | |
63 | * The OA unit supports a number of different packing formats which can be | |
64 | * selected by the user opening the stream. Perf has support for grouping | |
65 | * events, but each event in the group is configured, validated and | |
66 | * authenticated individually with separate system calls. | |
67 | * | |
68 | * i915 perf stream configurations are provided as an array of u64 (key,value) | |
69 | * pairs, instead of a fixed struct with multiple miscellaneous config members, | |
70 | * interleaved with event-type specific members. | |
71 | * | |
72 | * i915 perf doesn't support exposing metrics via an mmap'd circular buffer. | |
73 | * The supported metrics are being written to memory by the GPU unsynchronized | |
74 | * with the CPU, using HW specific packing formats for counter sets. Sometimes | |
75 | * the constraints on HW configuration require reports to be filtered before it | |
76 | * would be acceptable to expose them to unprivileged applications - to hide | |
77 | * the metrics of other processes/contexts. For these use cases a read() based | |
78 | * interface is a good fit, and provides an opportunity to filter data as it | |
79 | * gets copied from the GPU mapped buffers to userspace buffers. | |
80 | * | |
81 | * | |
16d98b31 RB |
82 | * Issues hit with first prototype based on Core Perf |
83 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
7abbd8d6 RB |
84 | * |
85 | * The first prototype of this driver was based on the core perf | |
86 | * infrastructure, and while we did make that mostly work, with some changes to | |
87 | * perf, we found we were breaking or working around too many assumptions baked | |
88 | * into perf's currently cpu centric design. | |
89 | * | |
90 | * In the end we didn't see a clear benefit to making perf's implementation and | |
91 | * interface more complex by changing design assumptions while we knew we still | |
92 | * wouldn't be able to use any existing perf based userspace tools. | |
93 | * | |
94 | * Also considering the Gen specific nature of the Observability hardware and | |
95 | * how userspace will sometimes need to combine i915 perf OA metrics with | |
96 | * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're | |
97 | * expecting the interface to be used by a platform specific userspace such as | |
98 | * OpenGL or tools. This is to say; we aren't inherently missing out on having | |
99 | * a standard vendor/architecture agnostic interface by not using perf. | |
100 | * | |
101 | * | |
102 | * For posterity, in case we might re-visit trying to adapt core perf to be | |
103 | * better suited to exposing i915 metrics these were the main pain points we | |
104 | * hit: | |
105 | * | |
106 | * - The perf based OA PMU driver broke some significant design assumptions: | |
107 | * | |
108 | * Existing perf pmus are used for profiling work on a cpu and we were | |
109 | * introducing the idea of _IS_DEVICE pmus with different security | |
110 | * implications, the need to fake cpu-related data (such as user/kernel | |
111 | * registers) to fit with perf's current design, and adding _DEVICE records | |
112 | * as a way to forward device-specific status records. | |
113 | * | |
114 | * The OA unit writes reports of counters into a circular buffer, without | |
115 | * involvement from the CPU, making our PMU driver the first of a kind. | |
116 | * | |
117 | * Given the way we were periodically forward data from the GPU-mapped, OA | |
118 | * buffer to perf's buffer, those bursts of sample writes looked to perf like | |
119 | * we were sampling too fast and so we had to subvert its throttling checks. | |
120 | * | |
121 | * Perf supports groups of counters and allows those to be read via | |
122 | * transactions internally but transactions currently seem designed to be | |
123 | * explicitly initiated from the cpu (say in response to a userspace read()) | |
124 | * and while we could pull a report out of the OA buffer we can't | |
125 | * trigger a report from the cpu on demand. | |
126 | * | |
127 | * Related to being report based; the OA counters are configured in HW as a | |
128 | * set while perf generally expects counter configurations to be orthogonal. | |
129 | * Although counters can be associated with a group leader as they are | |
130 | * opened, there's no clear precedent for being able to provide group-wide | |
131 | * configuration attributes (for example we want to let userspace choose the | |
132 | * OA unit report format used to capture all counters in a set, or specify a | |
133 | * GPU context to filter metrics on). We avoided using perf's grouping | |
134 | * feature and forwarded OA reports to userspace via perf's 'raw' sample | |
135 | * field. This suited our userspace well considering how coupled the counters | |
136 | * are when dealing with normalizing. It would be inconvenient to split | |
137 | * counters up into separate events, only to require userspace to recombine | |
138 | * them. For Mesa it's also convenient to be forwarded raw, periodic reports | |
139 | * for combining with the side-band raw reports it captures using | |
140 | * MI_REPORT_PERF_COUNT commands. | |
141 | * | |
16d98b31 | 142 | * - As a side note on perf's grouping feature; there was also some concern |
7abbd8d6 RB |
143 | * that using PERF_FORMAT_GROUP as a way to pack together counter values |
144 | * would quite drastically inflate our sample sizes, which would likely | |
145 | * lower the effective sampling resolutions we could use when the available | |
146 | * memory bandwidth is limited. | |
147 | * | |
148 | * With the OA unit's report formats, counters are packed together as 32 | |
149 | * or 40bit values, with the largest report size being 256 bytes. | |
150 | * | |
151 | * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a | |
152 | * documented ordering to the values, implying PERF_FORMAT_ID must also be | |
153 | * used to add a 64bit ID before each value; giving 16 bytes per counter. | |
154 | * | |
155 | * Related to counter orthogonality; we can't time share the OA unit, while | |
156 | * event scheduling is a central design idea within perf for allowing | |
157 | * userspace to open + enable more events than can be configured in HW at any | |
158 | * one time. The OA unit is not designed to allow re-configuration while in | |
159 | * use. We can't reconfigure the OA unit without losing internal OA unit | |
160 | * state which we can't access explicitly to save and restore. Reconfiguring | |
161 | * the OA unit is also relatively slow, involving ~100 register writes. From | |
162 | * userspace Mesa also depends on a stable OA configuration when emitting | |
163 | * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be | |
164 | * disabled while there are outstanding MI_RPC commands lest we hang the | |
165 | * command streamer. | |
166 | * | |
167 | * The contents of sample records aren't extensible by device drivers (i.e. | |
168 | * the sample_type bits). As an example; Sourab Gupta had been looking to | |
169 | * attach GPU timestamps to our OA samples. We were shoehorning OA reports | |
170 | * into sample records by using the 'raw' field, but it's tricky to pack more | |
171 | * than one thing into this field because events/core.c currently only lets a | |
172 | * pmu give a single raw data pointer plus len which will be copied into the | |
173 | * ring buffer. To include more than the OA report we'd have to copy the | |
174 | * report into an intermediate larger buffer. I'd been considering allowing a | |
175 | * vector of data+len values to be specified for copying the raw data, but | |
176 | * it felt like a kludge to being using the raw field for this purpose. | |
177 | * | |
178 | * - It felt like our perf based PMU was making some technical compromises | |
179 | * just for the sake of using perf: | |
180 | * | |
181 | * perf_event_open() requires events to either relate to a pid or a specific | |
182 | * cpu core, while our device pmu related to neither. Events opened with a | |
183 | * pid will be automatically enabled/disabled according to the scheduling of | |
184 | * that process - so not appropriate for us. When an event is related to a | |
185 | * cpu id, perf ensures pmu methods will be invoked via an inter process | |
186 | * interrupt on that core. To avoid invasive changes our userspace opened OA | |
187 | * perf events for a specific cpu. This was workable but it meant the | |
188 | * majority of the OA driver ran in atomic context, including all OA report | |
189 | * forwarding, which wasn't really necessary in our case and seems to make | |
190 | * our locking requirements somewhat complex as we handled the interaction | |
191 | * with the rest of the i915 driver. | |
192 | */ | |
193 | ||
eec688e1 | 194 | #include <linux/anon_inodes.h> |
d7965152 | 195 | #include <linux/sizes.h> |
f89823c2 | 196 | #include <linux/uuid.h> |
eec688e1 RB |
197 | |
198 | #include "i915_drv.h" | |
d7965152 | 199 | #include "i915_oa_hsw.h" |
19f81df2 RB |
200 | #include "i915_oa_bdw.h" |
201 | #include "i915_oa_chv.h" | |
202 | #include "i915_oa_sklgt2.h" | |
203 | #include "i915_oa_sklgt3.h" | |
204 | #include "i915_oa_sklgt4.h" | |
205 | #include "i915_oa_bxt.h" | |
6c5c1d89 LL |
206 | #include "i915_oa_kblgt2.h" |
207 | #include "i915_oa_kblgt3.h" | |
28c7ef9e | 208 | #include "i915_oa_glk.h" |
22ea4f35 | 209 | #include "i915_oa_cflgt2.h" |
4407eaa9 | 210 | #include "i915_oa_cflgt3.h" |
d7965152 RB |
211 | |
212 | /* HW requires this to be a power of two, between 128k and 16M, though driver | |
213 | * is currently generally designed assuming the largest 16M size is used such | |
214 | * that the overflow cases are unlikely in normal operation. | |
215 | */ | |
216 | #define OA_BUFFER_SIZE SZ_16M | |
217 | ||
218 | #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1)) | |
219 | ||
0dd860cf RB |
220 | /** |
221 | * DOC: OA Tail Pointer Race | |
222 | * | |
223 | * There's a HW race condition between OA unit tail pointer register updates and | |
d7965152 | 224 | * writes to memory whereby the tail pointer can sometimes get ahead of what's |
0dd860cf RB |
225 | * been written out to the OA buffer so far (in terms of what's visible to the |
226 | * CPU). | |
227 | * | |
228 | * Although this can be observed explicitly while copying reports to userspace | |
229 | * by checking for a zeroed report-id field in tail reports, we want to account | |
19f81df2 | 230 | * for this earlier, as part of the oa_buffer_check to avoid lots of redundant |
0dd860cf RB |
231 | * read() attempts. |
232 | * | |
233 | * In effect we define a tail pointer for reading that lags the real tail | |
234 | * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough | |
235 | * time for the corresponding reports to become visible to the CPU. | |
236 | * | |
237 | * To manage this we actually track two tail pointers: | |
238 | * 1) An 'aging' tail with an associated timestamp that is tracked until we | |
239 | * can trust the corresponding data is visible to the CPU; at which point | |
240 | * it is considered 'aged'. | |
241 | * 2) An 'aged' tail that can be used for read()ing. | |
d7965152 | 242 | * |
0dd860cf | 243 | * The two separate pointers let us decouple read()s from tail pointer aging. |
d7965152 | 244 | * |
0dd860cf RB |
245 | * The tail pointers are checked and updated at a limited rate within a hrtimer |
246 | * callback (the same callback that is used for delivering POLLIN events) | |
d7965152 | 247 | * |
0dd860cf RB |
248 | * Initially the tails are marked invalid with %INVALID_TAIL_PTR which |
249 | * indicates that an updated tail pointer is needed. | |
250 | * | |
251 | * Most of the implementation details for this workaround are in | |
19f81df2 | 252 | * oa_buffer_check_unlocked() and _append_oa_reports() |
0dd860cf RB |
253 | * |
254 | * Note for posterity: previously the driver used to define an effective tail | |
255 | * pointer that lagged the real pointer by a 'tail margin' measured in bytes | |
256 | * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency. | |
257 | * This was flawed considering that the OA unit may also automatically generate | |
258 | * non-periodic reports (such as on context switch) or the OA unit may be | |
259 | * enabled without any periodic sampling. | |
d7965152 RB |
260 | */ |
261 | #define OA_TAIL_MARGIN_NSEC 100000ULL | |
0dd860cf | 262 | #define INVALID_TAIL_PTR 0xffffffff |
d7965152 RB |
263 | |
264 | /* frequency for checking whether the OA unit has written new reports to the | |
265 | * circular OA buffer... | |
266 | */ | |
267 | #define POLL_FREQUENCY 200 | |
268 | #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY) | |
269 | ||
ccdf6341 RB |
270 | /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ |
271 | static int zero; | |
272 | static int one = 1; | |
273 | static u32 i915_perf_stream_paranoid = true; | |
274 | ||
d7965152 RB |
275 | /* The maximum exponent the hardware accepts is 63 (essentially it selects one |
276 | * of the 64bit timestamp bits to trigger reports from) but there's currently | |
277 | * no known use case for sampling as infrequently as once per 47 thousand years. | |
278 | * | |
279 | * Since the timestamps included in OA reports are only 32bits it seems | |
280 | * reasonable to limit the OA exponent where it's still possible to account for | |
281 | * overflow in OA report timestamps. | |
282 | */ | |
283 | #define OA_EXPONENT_MAX 31 | |
284 | ||
285 | #define INVALID_CTX_ID 0xffffffff | |
286 | ||
19f81df2 RB |
287 | /* On Gen8+ automatically triggered OA reports include a 'reason' field... */ |
288 | #define OAREPORT_REASON_MASK 0x3f | |
289 | #define OAREPORT_REASON_SHIFT 19 | |
290 | #define OAREPORT_REASON_TIMER (1<<0) | |
291 | #define OAREPORT_REASON_CTX_SWITCH (1<<3) | |
292 | #define OAREPORT_REASON_CLK_RATIO (1<<5) | |
293 | ||
d7965152 | 294 | |
00319ba0 RB |
295 | /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate |
296 | * | |
155e941f RB |
297 | * The highest sampling frequency we can theoretically program the OA unit |
298 | * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell. | |
299 | * | |
300 | * Initialized just before we register the sysctl parameter. | |
00319ba0 | 301 | */ |
155e941f | 302 | static int oa_sample_rate_hard_limit; |
00319ba0 RB |
303 | |
304 | /* Theoretically we can program the OA unit to sample every 160ns but don't | |
305 | * allow that by default unless root... | |
306 | * | |
307 | * The default threshold of 100000Hz is based on perf's similar | |
308 | * kernel.perf_event_max_sample_rate sysctl parameter. | |
309 | */ | |
310 | static u32 i915_oa_max_sample_rate = 100000; | |
311 | ||
d7965152 RB |
312 | /* XXX: beware if future OA HW adds new report formats that the current |
313 | * code assumes all reports have a power-of-two size and ~(size - 1) can | |
314 | * be used as a mask to align the OA tail pointer. | |
315 | */ | |
316 | static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { | |
317 | [I915_OA_FORMAT_A13] = { 0, 64 }, | |
318 | [I915_OA_FORMAT_A29] = { 1, 128 }, | |
319 | [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, | |
320 | /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */ | |
321 | [I915_OA_FORMAT_B4_C8] = { 4, 64 }, | |
322 | [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 }, | |
323 | [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 }, | |
324 | [I915_OA_FORMAT_C4_B8] = { 7, 64 }, | |
325 | }; | |
326 | ||
19f81df2 RB |
327 | static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = { |
328 | [I915_OA_FORMAT_A12] = { 0, 64 }, | |
329 | [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 }, | |
330 | [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, | |
331 | [I915_OA_FORMAT_C4_B8] = { 7, 64 }, | |
332 | }; | |
333 | ||
d7965152 | 334 | #define SAMPLE_OA_REPORT (1<<0) |
eec688e1 | 335 | |
16d98b31 RB |
336 | /** |
337 | * struct perf_open_properties - for validated properties given to open a stream | |
338 | * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags | |
339 | * @single_context: Whether a single or all gpu contexts should be monitored | |
340 | * @ctx_handle: A gem ctx handle for use with @single_context | |
341 | * @metrics_set: An ID for an OA unit metric set advertised via sysfs | |
342 | * @oa_format: An OA unit HW report format | |
343 | * @oa_periodic: Whether to enable periodic OA unit sampling | |
344 | * @oa_period_exponent: The OA unit sampling period is derived from this | |
345 | * | |
346 | * As read_properties_unlocked() enumerates and validates the properties given | |
347 | * to open a stream of metrics the configuration is built up in the structure | |
348 | * which starts out zero initialized. | |
349 | */ | |
eec688e1 RB |
350 | struct perf_open_properties { |
351 | u32 sample_flags; | |
352 | ||
353 | u64 single_context:1; | |
354 | u64 ctx_handle; | |
d7965152 RB |
355 | |
356 | /* OA sampling state */ | |
357 | int metrics_set; | |
358 | int oa_format; | |
359 | bool oa_periodic; | |
360 | int oa_period_exponent; | |
361 | }; | |
362 | ||
f89823c2 LL |
363 | static void free_oa_config(struct drm_i915_private *dev_priv, |
364 | struct i915_oa_config *oa_config) | |
365 | { | |
366 | if (!PTR_ERR(oa_config->flex_regs)) | |
367 | kfree(oa_config->flex_regs); | |
368 | if (!PTR_ERR(oa_config->b_counter_regs)) | |
369 | kfree(oa_config->b_counter_regs); | |
370 | if (!PTR_ERR(oa_config->mux_regs)) | |
371 | kfree(oa_config->mux_regs); | |
372 | kfree(oa_config); | |
373 | } | |
374 | ||
375 | static void put_oa_config(struct drm_i915_private *dev_priv, | |
376 | struct i915_oa_config *oa_config) | |
377 | { | |
378 | if (!atomic_dec_and_test(&oa_config->ref_count)) | |
379 | return; | |
380 | ||
381 | free_oa_config(dev_priv, oa_config); | |
382 | } | |
383 | ||
384 | static int get_oa_config(struct drm_i915_private *dev_priv, | |
385 | int metrics_set, | |
386 | struct i915_oa_config **out_config) | |
387 | { | |
388 | int ret; | |
389 | ||
390 | if (metrics_set == 1) { | |
391 | *out_config = &dev_priv->perf.oa.test_config; | |
392 | atomic_inc(&dev_priv->perf.oa.test_config.ref_count); | |
393 | return 0; | |
394 | } | |
395 | ||
396 | ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock); | |
397 | if (ret) | |
398 | return ret; | |
399 | ||
400 | *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set); | |
401 | if (!*out_config) | |
402 | ret = -EINVAL; | |
403 | else | |
404 | atomic_inc(&(*out_config)->ref_count); | |
405 | ||
406 | mutex_unlock(&dev_priv->perf.metrics_lock); | |
407 | ||
408 | return ret; | |
409 | } | |
410 | ||
19f81df2 RB |
411 | static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv) |
412 | { | |
413 | return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK; | |
414 | } | |
415 | ||
416 | static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv) | |
417 | { | |
418 | u32 oastatus1 = I915_READ(GEN7_OASTATUS1); | |
419 | ||
420 | return oastatus1 & GEN7_OASTATUS1_TAIL_MASK; | |
421 | } | |
422 | ||
0dd860cf | 423 | /** |
19f81df2 | 424 | * oa_buffer_check_unlocked - check for data and update tail ptr state |
0dd860cf | 425 | * @dev_priv: i915 device instance |
d7965152 | 426 | * |
0dd860cf RB |
427 | * This is either called via fops (for blocking reads in user ctx) or the poll |
428 | * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check | |
429 | * if there is data available for userspace to read. | |
d7965152 | 430 | * |
0dd860cf RB |
431 | * This function is central to providing a workaround for the OA unit tail |
432 | * pointer having a race with respect to what data is visible to the CPU. | |
433 | * It is responsible for reading tail pointers from the hardware and giving | |
434 | * the pointers time to 'age' before they are made available for reading. | |
435 | * (See description of OA_TAIL_MARGIN_NSEC above for further details.) | |
436 | * | |
437 | * Besides returning true when there is data available to read() this function | |
438 | * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp | |
439 | * and .aged_tail_idx state used for reading. | |
440 | * | |
441 | * Note: It's safe to read OA config state here unlocked, assuming that this is | |
442 | * only called while the stream is enabled, while the global OA configuration | |
443 | * can't be modified. | |
444 | * | |
445 | * Returns: %true if the OA buffer contains data, else %false | |
d7965152 | 446 | */ |
19f81df2 | 447 | static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv) |
d7965152 RB |
448 | { |
449 | int report_size = dev_priv->perf.oa.oa_buffer.format_size; | |
0dd860cf RB |
450 | unsigned long flags; |
451 | unsigned int aged_idx; | |
0dd860cf RB |
452 | u32 head, hw_tail, aged_tail, aging_tail; |
453 | u64 now; | |
454 | ||
455 | /* We have to consider the (unlikely) possibility that read() errors | |
456 | * could result in an OA buffer reset which might reset the head, | |
457 | * tails[] and aged_tail state. | |
458 | */ | |
459 | spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); | |
460 | ||
461 | /* NB: The head we observe here might effectively be a little out of | |
462 | * date (between head and tails[aged_idx].offset if there is currently | |
463 | * a read() in progress. | |
464 | */ | |
465 | head = dev_priv->perf.oa.oa_buffer.head; | |
466 | ||
467 | aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx; | |
468 | aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset; | |
469 | aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset; | |
470 | ||
19f81df2 | 471 | hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv); |
0dd860cf RB |
472 | |
473 | /* The tail pointer increases in 64 byte increments, | |
474 | * not in report_size steps... | |
475 | */ | |
476 | hw_tail &= ~(report_size - 1); | |
477 | ||
478 | now = ktime_get_mono_fast_ns(); | |
479 | ||
4117ebc7 RB |
480 | /* Update the aged tail |
481 | * | |
482 | * Flip the tail pointer available for read()s once the aging tail is | |
483 | * old enough to trust that the corresponding data will be visible to | |
484 | * the CPU... | |
485 | * | |
486 | * Do this before updating the aging pointer in case we may be able to | |
487 | * immediately start aging a new pointer too (if new data has become | |
488 | * available) without needing to wait for a later hrtimer callback. | |
489 | */ | |
490 | if (aging_tail != INVALID_TAIL_PTR && | |
491 | ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) > | |
492 | OA_TAIL_MARGIN_NSEC)) { | |
19f81df2 | 493 | |
4117ebc7 RB |
494 | aged_idx ^= 1; |
495 | dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx; | |
496 | ||
497 | aged_tail = aging_tail; | |
498 | ||
499 | /* Mark that we need a new pointer to start aging... */ | |
500 | dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR; | |
501 | aging_tail = INVALID_TAIL_PTR; | |
502 | } | |
503 | ||
0dd860cf RB |
504 | /* Update the aging tail |
505 | * | |
506 | * We throttle aging tail updates until we have a new tail that | |
507 | * represents >= one report more data than is already available for | |
508 | * reading. This ensures there will be enough data for a successful | |
509 | * read once this new pointer has aged and ensures we will give the new | |
510 | * pointer time to age. | |
511 | */ | |
512 | if (aging_tail == INVALID_TAIL_PTR && | |
513 | (aged_tail == INVALID_TAIL_PTR || | |
514 | OA_TAKEN(hw_tail, aged_tail) >= report_size)) { | |
515 | struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma; | |
516 | u32 gtt_offset = i915_ggtt_offset(vma); | |
517 | ||
518 | /* Be paranoid and do a bounds check on the pointer read back | |
519 | * from hardware, just in case some spurious hardware condition | |
520 | * could put the tail out of bounds... | |
521 | */ | |
522 | if (hw_tail >= gtt_offset && | |
523 | hw_tail < (gtt_offset + OA_BUFFER_SIZE)) { | |
524 | dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = | |
525 | aging_tail = hw_tail; | |
526 | dev_priv->perf.oa.oa_buffer.aging_timestamp = now; | |
527 | } else { | |
528 | DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n", | |
529 | hw_tail); | |
530 | } | |
531 | } | |
532 | ||
0dd860cf RB |
533 | spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); |
534 | ||
535 | return aged_tail == INVALID_TAIL_PTR ? | |
536 | false : OA_TAKEN(aged_tail, head) >= report_size; | |
d7965152 RB |
537 | } |
538 | ||
539 | /** | |
16d98b31 RB |
540 | * append_oa_status - Appends a status record to a userspace read() buffer. |
541 | * @stream: An i915-perf stream opened for OA metrics | |
542 | * @buf: destination buffer given by userspace | |
543 | * @count: the number of bytes userspace wants to read | |
544 | * @offset: (inout): the current position for writing into @buf | |
545 | * @type: The kind of status to report to userspace | |
546 | * | |
547 | * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`) | |
548 | * into the userspace read() buffer. | |
549 | * | |
550 | * The @buf @offset will only be updated on success. | |
551 | * | |
552 | * Returns: 0 on success, negative error code on failure. | |
d7965152 RB |
553 | */ |
554 | static int append_oa_status(struct i915_perf_stream *stream, | |
555 | char __user *buf, | |
556 | size_t count, | |
557 | size_t *offset, | |
558 | enum drm_i915_perf_record_type type) | |
559 | { | |
560 | struct drm_i915_perf_record_header header = { type, 0, sizeof(header) }; | |
561 | ||
562 | if ((count - *offset) < header.size) | |
563 | return -ENOSPC; | |
564 | ||
565 | if (copy_to_user(buf + *offset, &header, sizeof(header))) | |
566 | return -EFAULT; | |
567 | ||
568 | (*offset) += header.size; | |
569 | ||
570 | return 0; | |
571 | } | |
572 | ||
573 | /** | |
16d98b31 RB |
574 | * append_oa_sample - Copies single OA report into userspace read() buffer. |
575 | * @stream: An i915-perf stream opened for OA metrics | |
576 | * @buf: destination buffer given by userspace | |
577 | * @count: the number of bytes userspace wants to read | |
578 | * @offset: (inout): the current position for writing into @buf | |
579 | * @report: A single OA report to (optionally) include as part of the sample | |
580 | * | |
581 | * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*` | |
582 | * properties when opening a stream, tracked as `stream->sample_flags`. This | |
583 | * function copies the requested components of a single sample to the given | |
584 | * read() @buf. | |
585 | * | |
586 | * The @buf @offset will only be updated on success. | |
587 | * | |
588 | * Returns: 0 on success, negative error code on failure. | |
d7965152 RB |
589 | */ |
590 | static int append_oa_sample(struct i915_perf_stream *stream, | |
591 | char __user *buf, | |
592 | size_t count, | |
593 | size_t *offset, | |
594 | const u8 *report) | |
595 | { | |
596 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
597 | int report_size = dev_priv->perf.oa.oa_buffer.format_size; | |
598 | struct drm_i915_perf_record_header header; | |
599 | u32 sample_flags = stream->sample_flags; | |
600 | ||
601 | header.type = DRM_I915_PERF_RECORD_SAMPLE; | |
602 | header.pad = 0; | |
603 | header.size = stream->sample_size; | |
604 | ||
605 | if ((count - *offset) < header.size) | |
606 | return -ENOSPC; | |
607 | ||
608 | buf += *offset; | |
609 | if (copy_to_user(buf, &header, sizeof(header))) | |
610 | return -EFAULT; | |
611 | buf += sizeof(header); | |
612 | ||
613 | if (sample_flags & SAMPLE_OA_REPORT) { | |
614 | if (copy_to_user(buf, report, report_size)) | |
615 | return -EFAULT; | |
616 | } | |
617 | ||
618 | (*offset) += header.size; | |
619 | ||
620 | return 0; | |
621 | } | |
622 | ||
19f81df2 RB |
623 | /** |
624 | * Copies all buffered OA reports into userspace read() buffer. | |
625 | * @stream: An i915-perf stream opened for OA metrics | |
626 | * @buf: destination buffer given by userspace | |
627 | * @count: the number of bytes userspace wants to read | |
628 | * @offset: (inout): the current position for writing into @buf | |
629 | * | |
630 | * Notably any error condition resulting in a short read (-%ENOSPC or | |
631 | * -%EFAULT) will be returned even though one or more records may | |
632 | * have been successfully copied. In this case it's up to the caller | |
633 | * to decide if the error should be squashed before returning to | |
634 | * userspace. | |
635 | * | |
636 | * Note: reports are consumed from the head, and appended to the | |
637 | * tail, so the tail chases the head?... If you think that's mad | |
638 | * and back-to-front you're not alone, but this follows the | |
639 | * Gen PRM naming convention. | |
640 | * | |
641 | * Returns: 0 on success, negative error code on failure. | |
642 | */ | |
643 | static int gen8_append_oa_reports(struct i915_perf_stream *stream, | |
644 | char __user *buf, | |
645 | size_t count, | |
646 | size_t *offset) | |
647 | { | |
648 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
649 | int report_size = dev_priv->perf.oa.oa_buffer.format_size; | |
650 | u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr; | |
651 | u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); | |
652 | u32 mask = (OA_BUFFER_SIZE - 1); | |
653 | size_t start_offset = *offset; | |
654 | unsigned long flags; | |
655 | unsigned int aged_tail_idx; | |
656 | u32 head, tail; | |
657 | u32 taken; | |
658 | int ret = 0; | |
659 | ||
660 | if (WARN_ON(!stream->enabled)) | |
661 | return -EIO; | |
662 | ||
663 | spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); | |
664 | ||
665 | head = dev_priv->perf.oa.oa_buffer.head; | |
666 | aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx; | |
667 | tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset; | |
668 | ||
669 | spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); | |
670 | ||
671 | /* | |
672 | * An invalid tail pointer here means we're still waiting for the poll | |
673 | * hrtimer callback to give us a pointer | |
674 | */ | |
675 | if (tail == INVALID_TAIL_PTR) | |
676 | return -EAGAIN; | |
677 | ||
678 | /* | |
679 | * NB: oa_buffer.head/tail include the gtt_offset which we don't want | |
680 | * while indexing relative to oa_buf_base. | |
681 | */ | |
682 | head -= gtt_offset; | |
683 | tail -= gtt_offset; | |
684 | ||
685 | /* | |
686 | * An out of bounds or misaligned head or tail pointer implies a driver | |
687 | * bug since we validate + align the tail pointers we read from the | |
688 | * hardware and we are in full control of the head pointer which should | |
689 | * only be incremented by multiples of the report size (notably also | |
690 | * all a power of two). | |
691 | */ | |
692 | if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || | |
693 | tail > OA_BUFFER_SIZE || tail % report_size, | |
694 | "Inconsistent OA buffer pointers: head = %u, tail = %u\n", | |
695 | head, tail)) | |
696 | return -EIO; | |
697 | ||
698 | ||
699 | for (/* none */; | |
700 | (taken = OA_TAKEN(tail, head)); | |
701 | head = (head + report_size) & mask) { | |
702 | u8 *report = oa_buf_base + head; | |
703 | u32 *report32 = (void *)report; | |
704 | u32 ctx_id; | |
705 | u32 reason; | |
706 | ||
707 | /* | |
708 | * All the report sizes factor neatly into the buffer | |
709 | * size so we never expect to see a report split | |
710 | * between the beginning and end of the buffer. | |
711 | * | |
712 | * Given the initial alignment check a misalignment | |
713 | * here would imply a driver bug that would result | |
714 | * in an overrun. | |
715 | */ | |
716 | if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { | |
717 | DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); | |
718 | break; | |
719 | } | |
720 | ||
721 | /* | |
722 | * The reason field includes flags identifying what | |
723 | * triggered this specific report (mostly timer | |
724 | * triggered or e.g. due to a context switch). | |
725 | * | |
726 | * This field is never expected to be zero so we can | |
727 | * check that the report isn't invalid before copying | |
728 | * it to userspace... | |
729 | */ | |
730 | reason = ((report32[0] >> OAREPORT_REASON_SHIFT) & | |
731 | OAREPORT_REASON_MASK); | |
732 | if (reason == 0) { | |
733 | if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs)) | |
734 | DRM_NOTE("Skipping spurious, invalid OA report\n"); | |
735 | continue; | |
736 | } | |
737 | ||
738 | /* | |
739 | * XXX: Just keep the lower 21 bits for now since I'm not | |
740 | * entirely sure if the HW touches any of the higher bits in | |
741 | * this field | |
742 | */ | |
743 | ctx_id = report32[2] & 0x1fffff; | |
744 | ||
745 | /* | |
746 | * Squash whatever is in the CTX_ID field if it's marked as | |
747 | * invalid to be sure we avoid false-positive, single-context | |
748 | * filtering below... | |
749 | * | |
750 | * Note: that we don't clear the valid_ctx_bit so userspace can | |
751 | * understand that the ID has been squashed by the kernel. | |
752 | */ | |
753 | if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit)) | |
754 | ctx_id = report32[2] = INVALID_CTX_ID; | |
755 | ||
756 | /* | |
757 | * NB: For Gen 8 the OA unit no longer supports clock gating | |
758 | * off for a specific context and the kernel can't securely | |
759 | * stop the counters from updating as system-wide / global | |
760 | * values. | |
761 | * | |
762 | * Automatic reports now include a context ID so reports can be | |
763 | * filtered on the cpu but it's not worth trying to | |
764 | * automatically subtract/hide counter progress for other | |
765 | * contexts while filtering since we can't stop userspace | |
766 | * issuing MI_REPORT_PERF_COUNT commands which would still | |
767 | * provide a side-band view of the real values. | |
768 | * | |
769 | * To allow userspace (such as Mesa/GL_INTEL_performance_query) | |
770 | * to normalize counters for a single filtered context then it | |
771 | * needs be forwarded bookend context-switch reports so that it | |
772 | * can track switches in between MI_REPORT_PERF_COUNT commands | |
773 | * and can itself subtract/ignore the progress of counters | |
774 | * associated with other contexts. Note that the hardware | |
775 | * automatically triggers reports when switching to a new | |
776 | * context which are tagged with the ID of the newly active | |
777 | * context. To avoid the complexity (and likely fragility) of | |
778 | * reading ahead while parsing reports to try and minimize | |
779 | * forwarding redundant context switch reports (i.e. between | |
780 | * other, unrelated contexts) we simply elect to forward them | |
781 | * all. | |
782 | * | |
783 | * We don't rely solely on the reason field to identify context | |
784 | * switches since it's not-uncommon for periodic samples to | |
785 | * identify a switch before any 'context switch' report. | |
786 | */ | |
787 | if (!dev_priv->perf.oa.exclusive_stream->ctx || | |
788 | dev_priv->perf.oa.specific_ctx_id == ctx_id || | |
789 | (dev_priv->perf.oa.oa_buffer.last_ctx_id == | |
790 | dev_priv->perf.oa.specific_ctx_id) || | |
791 | reason & OAREPORT_REASON_CTX_SWITCH) { | |
792 | ||
793 | /* | |
794 | * While filtering for a single context we avoid | |
795 | * leaking the IDs of other contexts. | |
796 | */ | |
797 | if (dev_priv->perf.oa.exclusive_stream->ctx && | |
798 | dev_priv->perf.oa.specific_ctx_id != ctx_id) { | |
799 | report32[2] = INVALID_CTX_ID; | |
800 | } | |
801 | ||
802 | ret = append_oa_sample(stream, buf, count, offset, | |
803 | report); | |
804 | if (ret) | |
805 | break; | |
806 | ||
807 | dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id; | |
808 | } | |
809 | ||
810 | /* | |
811 | * The above reason field sanity check is based on | |
812 | * the assumption that the OA buffer is initially | |
813 | * zeroed and we reset the field after copying so the | |
814 | * check is still meaningful once old reports start | |
815 | * being overwritten. | |
816 | */ | |
817 | report32[0] = 0; | |
818 | } | |
819 | ||
820 | if (start_offset != *offset) { | |
821 | spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); | |
822 | ||
823 | /* | |
824 | * We removed the gtt_offset for the copy loop above, indexing | |
825 | * relative to oa_buf_base so put back here... | |
826 | */ | |
827 | head += gtt_offset; | |
828 | ||
829 | I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK); | |
830 | dev_priv->perf.oa.oa_buffer.head = head; | |
831 | ||
832 | spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); | |
833 | } | |
834 | ||
835 | return ret; | |
836 | } | |
837 | ||
838 | /** | |
839 | * gen8_oa_read - copy status records then buffered OA reports | |
840 | * @stream: An i915-perf stream opened for OA metrics | |
841 | * @buf: destination buffer given by userspace | |
842 | * @count: the number of bytes userspace wants to read | |
843 | * @offset: (inout): the current position for writing into @buf | |
844 | * | |
845 | * Checks OA unit status registers and if necessary appends corresponding | |
846 | * status records for userspace (such as for a buffer full condition) and then | |
847 | * initiate appending any buffered OA reports. | |
848 | * | |
849 | * Updates @offset according to the number of bytes successfully copied into | |
850 | * the userspace buffer. | |
851 | * | |
852 | * NB: some data may be successfully copied to the userspace buffer | |
853 | * even if an error is returned, and this is reflected in the | |
854 | * updated @offset. | |
855 | * | |
856 | * Returns: zero on success or a negative error code | |
857 | */ | |
858 | static int gen8_oa_read(struct i915_perf_stream *stream, | |
859 | char __user *buf, | |
860 | size_t count, | |
861 | size_t *offset) | |
862 | { | |
863 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
864 | u32 oastatus; | |
865 | int ret; | |
866 | ||
867 | if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr)) | |
868 | return -EIO; | |
869 | ||
870 | oastatus = I915_READ(GEN8_OASTATUS); | |
871 | ||
872 | /* | |
873 | * We treat OABUFFER_OVERFLOW as a significant error: | |
874 | * | |
875 | * Although theoretically we could handle this more gracefully | |
876 | * sometimes, some Gens don't correctly suppress certain | |
877 | * automatically triggered reports in this condition and so we | |
878 | * have to assume that old reports are now being trampled | |
879 | * over. | |
880 | * | |
881 | * Considering how we don't currently give userspace control | |
882 | * over the OA buffer size and always configure a large 16MB | |
883 | * buffer, then a buffer overflow does anyway likely indicate | |
884 | * that something has gone quite badly wrong. | |
885 | */ | |
886 | if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) { | |
887 | ret = append_oa_status(stream, buf, count, offset, | |
888 | DRM_I915_PERF_RECORD_OA_BUFFER_LOST); | |
889 | if (ret) | |
890 | return ret; | |
891 | ||
892 | DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", | |
893 | dev_priv->perf.oa.period_exponent); | |
894 | ||
895 | dev_priv->perf.oa.ops.oa_disable(dev_priv); | |
896 | dev_priv->perf.oa.ops.oa_enable(dev_priv); | |
897 | ||
898 | /* | |
899 | * Note: .oa_enable() is expected to re-init the oabuffer and | |
900 | * reset GEN8_OASTATUS for us | |
901 | */ | |
902 | oastatus = I915_READ(GEN8_OASTATUS); | |
903 | } | |
904 | ||
905 | if (oastatus & GEN8_OASTATUS_REPORT_LOST) { | |
906 | ret = append_oa_status(stream, buf, count, offset, | |
907 | DRM_I915_PERF_RECORD_OA_REPORT_LOST); | |
908 | if (ret) | |
909 | return ret; | |
910 | I915_WRITE(GEN8_OASTATUS, | |
911 | oastatus & ~GEN8_OASTATUS_REPORT_LOST); | |
912 | } | |
913 | ||
914 | return gen8_append_oa_reports(stream, buf, count, offset); | |
915 | } | |
916 | ||
d7965152 RB |
917 | /** |
918 | * Copies all buffered OA reports into userspace read() buffer. | |
919 | * @stream: An i915-perf stream opened for OA metrics | |
920 | * @buf: destination buffer given by userspace | |
921 | * @count: the number of bytes userspace wants to read | |
922 | * @offset: (inout): the current position for writing into @buf | |
d7965152 | 923 | * |
16d98b31 RB |
924 | * Notably any error condition resulting in a short read (-%ENOSPC or |
925 | * -%EFAULT) will be returned even though one or more records may | |
d7965152 RB |
926 | * have been successfully copied. In this case it's up to the caller |
927 | * to decide if the error should be squashed before returning to | |
928 | * userspace. | |
929 | * | |
930 | * Note: reports are consumed from the head, and appended to the | |
e81b3a55 | 931 | * tail, so the tail chases the head?... If you think that's mad |
d7965152 RB |
932 | * and back-to-front you're not alone, but this follows the |
933 | * Gen PRM naming convention. | |
16d98b31 RB |
934 | * |
935 | * Returns: 0 on success, negative error code on failure. | |
d7965152 RB |
936 | */ |
937 | static int gen7_append_oa_reports(struct i915_perf_stream *stream, | |
938 | char __user *buf, | |
939 | size_t count, | |
3bb335c1 | 940 | size_t *offset) |
d7965152 RB |
941 | { |
942 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
943 | int report_size = dev_priv->perf.oa.oa_buffer.format_size; | |
944 | u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr; | |
d7965152 RB |
945 | u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); |
946 | u32 mask = (OA_BUFFER_SIZE - 1); | |
3bb335c1 | 947 | size_t start_offset = *offset; |
0dd860cf RB |
948 | unsigned long flags; |
949 | unsigned int aged_tail_idx; | |
950 | u32 head, tail; | |
d7965152 RB |
951 | u32 taken; |
952 | int ret = 0; | |
953 | ||
954 | if (WARN_ON(!stream->enabled)) | |
955 | return -EIO; | |
956 | ||
0dd860cf | 957 | spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); |
f279020a | 958 | |
0dd860cf RB |
959 | head = dev_priv->perf.oa.oa_buffer.head; |
960 | aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx; | |
961 | tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset; | |
f279020a | 962 | |
0dd860cf | 963 | spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); |
d7965152 | 964 | |
0dd860cf RB |
965 | /* An invalid tail pointer here means we're still waiting for the poll |
966 | * hrtimer callback to give us a pointer | |
d7965152 | 967 | */ |
0dd860cf RB |
968 | if (tail == INVALID_TAIL_PTR) |
969 | return -EAGAIN; | |
d7965152 | 970 | |
0dd860cf RB |
971 | /* NB: oa_buffer.head/tail include the gtt_offset which we don't want |
972 | * while indexing relative to oa_buf_base. | |
d7965152 | 973 | */ |
0dd860cf RB |
974 | head -= gtt_offset; |
975 | tail -= gtt_offset; | |
d7965152 | 976 | |
0dd860cf RB |
977 | /* An out of bounds or misaligned head or tail pointer implies a driver |
978 | * bug since we validate + align the tail pointers we read from the | |
979 | * hardware and we are in full control of the head pointer which should | |
980 | * only be incremented by multiples of the report size (notably also | |
981 | * all a power of two). | |
d7965152 | 982 | */ |
0dd860cf RB |
983 | if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || |
984 | tail > OA_BUFFER_SIZE || tail % report_size, | |
985 | "Inconsistent OA buffer pointers: head = %u, tail = %u\n", | |
986 | head, tail)) | |
987 | return -EIO; | |
d7965152 | 988 | |
d7965152 RB |
989 | |
990 | for (/* none */; | |
991 | (taken = OA_TAKEN(tail, head)); | |
992 | head = (head + report_size) & mask) { | |
993 | u8 *report = oa_buf_base + head; | |
994 | u32 *report32 = (void *)report; | |
995 | ||
996 | /* All the report sizes factor neatly into the buffer | |
997 | * size so we never expect to see a report split | |
998 | * between the beginning and end of the buffer. | |
999 | * | |
1000 | * Given the initial alignment check a misalignment | |
1001 | * here would imply a driver bug that would result | |
1002 | * in an overrun. | |
1003 | */ | |
1004 | if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { | |
1005 | DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); | |
1006 | break; | |
1007 | } | |
1008 | ||
1009 | /* The report-ID field for periodic samples includes | |
1010 | * some undocumented flags related to what triggered | |
1011 | * the report and is never expected to be zero so we | |
1012 | * can check that the report isn't invalid before | |
1013 | * copying it to userspace... | |
1014 | */ | |
1015 | if (report32[0] == 0) { | |
712122ea RB |
1016 | if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs)) |
1017 | DRM_NOTE("Skipping spurious, invalid OA report\n"); | |
d7965152 RB |
1018 | continue; |
1019 | } | |
1020 | ||
1021 | ret = append_oa_sample(stream, buf, count, offset, report); | |
1022 | if (ret) | |
1023 | break; | |
1024 | ||
1025 | /* The above report-id field sanity check is based on | |
1026 | * the assumption that the OA buffer is initially | |
1027 | * zeroed and we reset the field after copying so the | |
1028 | * check is still meaningful once old reports start | |
1029 | * being overwritten. | |
1030 | */ | |
1031 | report32[0] = 0; | |
1032 | } | |
1033 | ||
3bb335c1 | 1034 | if (start_offset != *offset) { |
0dd860cf RB |
1035 | spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); |
1036 | ||
3bb335c1 RB |
1037 | /* We removed the gtt_offset for the copy loop above, indexing |
1038 | * relative to oa_buf_base so put back here... | |
1039 | */ | |
1040 | head += gtt_offset; | |
1041 | ||
1042 | I915_WRITE(GEN7_OASTATUS2, | |
1043 | ((head & GEN7_OASTATUS2_HEAD_MASK) | | |
1044 | OA_MEM_SELECT_GGTT)); | |
1045 | dev_priv->perf.oa.oa_buffer.head = head; | |
0dd860cf RB |
1046 | |
1047 | spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); | |
3bb335c1 | 1048 | } |
d7965152 RB |
1049 | |
1050 | return ret; | |
1051 | } | |
1052 | ||
16d98b31 RB |
1053 | /** |
1054 | * gen7_oa_read - copy status records then buffered OA reports | |
1055 | * @stream: An i915-perf stream opened for OA metrics | |
1056 | * @buf: destination buffer given by userspace | |
1057 | * @count: the number of bytes userspace wants to read | |
1058 | * @offset: (inout): the current position for writing into @buf | |
1059 | * | |
1060 | * Checks Gen 7 specific OA unit status registers and if necessary appends | |
1061 | * corresponding status records for userspace (such as for a buffer full | |
1062 | * condition) and then initiate appending any buffered OA reports. | |
1063 | * | |
1064 | * Updates @offset according to the number of bytes successfully copied into | |
1065 | * the userspace buffer. | |
1066 | * | |
1067 | * Returns: zero on success or a negative error code | |
1068 | */ | |
d7965152 RB |
1069 | static int gen7_oa_read(struct i915_perf_stream *stream, |
1070 | char __user *buf, | |
1071 | size_t count, | |
1072 | size_t *offset) | |
1073 | { | |
1074 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
d7965152 | 1075 | u32 oastatus1; |
d7965152 RB |
1076 | int ret; |
1077 | ||
1078 | if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr)) | |
1079 | return -EIO; | |
1080 | ||
d7965152 RB |
1081 | oastatus1 = I915_READ(GEN7_OASTATUS1); |
1082 | ||
d7965152 RB |
1083 | /* XXX: On Haswell we don't have a safe way to clear oastatus1 |
1084 | * bits while the OA unit is enabled (while the tail pointer | |
1085 | * may be updated asynchronously) so we ignore status bits | |
1086 | * that have already been reported to userspace. | |
1087 | */ | |
1088 | oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1; | |
1089 | ||
1090 | /* We treat OABUFFER_OVERFLOW as a significant error: | |
1091 | * | |
1092 | * - The status can be interpreted to mean that the buffer is | |
1093 | * currently full (with a higher precedence than OA_TAKEN() | |
1094 | * which will start to report a near-empty buffer after an | |
1095 | * overflow) but it's awkward that we can't clear the status | |
1096 | * on Haswell, so without a reset we won't be able to catch | |
1097 | * the state again. | |
1098 | * | |
1099 | * - Since it also implies the HW has started overwriting old | |
1100 | * reports it may also affect our sanity checks for invalid | |
1101 | * reports when copying to userspace that assume new reports | |
1102 | * are being written to cleared memory. | |
1103 | * | |
1104 | * - In the future we may want to introduce a flight recorder | |
1105 | * mode where the driver will automatically maintain a safe | |
1106 | * guard band between head/tail, avoiding this overflow | |
1107 | * condition, but we avoid the added driver complexity for | |
1108 | * now. | |
1109 | */ | |
1110 | if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) { | |
1111 | ret = append_oa_status(stream, buf, count, offset, | |
1112 | DRM_I915_PERF_RECORD_OA_BUFFER_LOST); | |
1113 | if (ret) | |
1114 | return ret; | |
1115 | ||
19f81df2 RB |
1116 | DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", |
1117 | dev_priv->perf.oa.period_exponent); | |
d7965152 RB |
1118 | |
1119 | dev_priv->perf.oa.ops.oa_disable(dev_priv); | |
1120 | dev_priv->perf.oa.ops.oa_enable(dev_priv); | |
1121 | ||
d7965152 | 1122 | oastatus1 = I915_READ(GEN7_OASTATUS1); |
d7965152 RB |
1123 | } |
1124 | ||
1125 | if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) { | |
1126 | ret = append_oa_status(stream, buf, count, offset, | |
1127 | DRM_I915_PERF_RECORD_OA_REPORT_LOST); | |
1128 | if (ret) | |
1129 | return ret; | |
1130 | dev_priv->perf.oa.gen7_latched_oastatus1 |= | |
1131 | GEN7_OASTATUS1_REPORT_LOST; | |
1132 | } | |
1133 | ||
3bb335c1 | 1134 | return gen7_append_oa_reports(stream, buf, count, offset); |
d7965152 RB |
1135 | } |
1136 | ||
16d98b31 RB |
1137 | /** |
1138 | * i915_oa_wait_unlocked - handles blocking IO until OA data available | |
1139 | * @stream: An i915-perf stream opened for OA metrics | |
1140 | * | |
1141 | * Called when userspace tries to read() from a blocking stream FD opened | |
1142 | * for OA metrics. It waits until the hrtimer callback finds a non-empty | |
1143 | * OA buffer and wakes us. | |
1144 | * | |
1145 | * Note: it's acceptable to have this return with some false positives | |
1146 | * since any subsequent read handling will return -EAGAIN if there isn't | |
1147 | * really data ready for userspace yet. | |
1148 | * | |
1149 | * Returns: zero on success or a negative error code | |
1150 | */ | |
d7965152 RB |
1151 | static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) |
1152 | { | |
1153 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
1154 | ||
1155 | /* We would wait indefinitely if periodic sampling is not enabled */ | |
1156 | if (!dev_priv->perf.oa.periodic) | |
1157 | return -EIO; | |
1158 | ||
d7965152 | 1159 | return wait_event_interruptible(dev_priv->perf.oa.poll_wq, |
19f81df2 | 1160 | oa_buffer_check_unlocked(dev_priv)); |
d7965152 RB |
1161 | } |
1162 | ||
16d98b31 RB |
1163 | /** |
1164 | * i915_oa_poll_wait - call poll_wait() for an OA stream poll() | |
1165 | * @stream: An i915-perf stream opened for OA metrics | |
1166 | * @file: An i915 perf stream file | |
1167 | * @wait: poll() state table | |
1168 | * | |
1169 | * For handling userspace polling on an i915 perf stream opened for OA metrics, | |
1170 | * this starts a poll_wait with the wait queue that our hrtimer callback wakes | |
1171 | * when it sees data ready to read in the circular OA buffer. | |
1172 | */ | |
d7965152 RB |
1173 | static void i915_oa_poll_wait(struct i915_perf_stream *stream, |
1174 | struct file *file, | |
1175 | poll_table *wait) | |
1176 | { | |
1177 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
1178 | ||
1179 | poll_wait(file, &dev_priv->perf.oa.poll_wq, wait); | |
1180 | } | |
1181 | ||
16d98b31 RB |
1182 | /** |
1183 | * i915_oa_read - just calls through to &i915_oa_ops->read | |
1184 | * @stream: An i915-perf stream opened for OA metrics | |
1185 | * @buf: destination buffer given by userspace | |
1186 | * @count: the number of bytes userspace wants to read | |
1187 | * @offset: (inout): the current position for writing into @buf | |
1188 | * | |
1189 | * Updates @offset according to the number of bytes successfully copied into | |
1190 | * the userspace buffer. | |
1191 | * | |
1192 | * Returns: zero on success or a negative error code | |
1193 | */ | |
d7965152 RB |
1194 | static int i915_oa_read(struct i915_perf_stream *stream, |
1195 | char __user *buf, | |
1196 | size_t count, | |
1197 | size_t *offset) | |
1198 | { | |
1199 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
1200 | ||
1201 | return dev_priv->perf.oa.ops.read(stream, buf, count, offset); | |
1202 | } | |
1203 | ||
16d98b31 RB |
1204 | /** |
1205 | * oa_get_render_ctx_id - determine and hold ctx hw id | |
1206 | * @stream: An i915-perf stream opened for OA metrics | |
1207 | * | |
1208 | * Determine the render context hw id, and ensure it remains fixed for the | |
d7965152 RB |
1209 | * lifetime of the stream. This ensures that we don't have to worry about |
1210 | * updating the context ID in OACONTROL on the fly. | |
16d98b31 RB |
1211 | * |
1212 | * Returns: zero on success or a negative error code | |
d7965152 RB |
1213 | */ |
1214 | static int oa_get_render_ctx_id(struct i915_perf_stream *stream) | |
1215 | { | |
1216 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
d7965152 | 1217 | |
4f044a88 | 1218 | if (i915_modparams.enable_execlists) |
19f81df2 RB |
1219 | dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id; |
1220 | else { | |
1221 | struct intel_engine_cs *engine = dev_priv->engine[RCS]; | |
1222 | struct intel_ring *ring; | |
1223 | int ret; | |
d7965152 | 1224 | |
19f81df2 RB |
1225 | ret = i915_mutex_lock_interruptible(&dev_priv->drm); |
1226 | if (ret) | |
1227 | return ret; | |
1228 | ||
1229 | /* | |
1230 | * As the ID is the gtt offset of the context's vma we | |
1231 | * pin the vma to ensure the ID remains fixed. | |
1232 | * | |
1233 | * NB: implied RCS engine... | |
1234 | */ | |
1235 | ring = engine->context_pin(engine, stream->ctx); | |
1236 | mutex_unlock(&dev_priv->drm.struct_mutex); | |
1237 | if (IS_ERR(ring)) | |
1238 | return PTR_ERR(ring); | |
d7965152 | 1239 | |
19f81df2 RB |
1240 | |
1241 | /* | |
1242 | * Explicitly track the ID (instead of calling | |
1243 | * i915_ggtt_offset() on the fly) considering the difference | |
1244 | * with gen8+ and execlists | |
1245 | */ | |
1246 | dev_priv->perf.oa.specific_ctx_id = | |
1247 | i915_ggtt_offset(stream->ctx->engine[engine->id].state); | |
1248 | } | |
d7965152 | 1249 | |
266a240b | 1250 | return 0; |
d7965152 RB |
1251 | } |
1252 | ||
16d98b31 RB |
1253 | /** |
1254 | * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold | |
1255 | * @stream: An i915-perf stream opened for OA metrics | |
1256 | * | |
1257 | * In case anything needed doing to ensure the context HW ID would remain valid | |
1258 | * for the lifetime of the stream, then that can be undone here. | |
1259 | */ | |
d7965152 RB |
1260 | static void oa_put_render_ctx_id(struct i915_perf_stream *stream) |
1261 | { | |
1262 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
1263 | ||
4f044a88 | 1264 | if (i915_modparams.enable_execlists) { |
19f81df2 RB |
1265 | dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; |
1266 | } else { | |
1267 | struct intel_engine_cs *engine = dev_priv->engine[RCS]; | |
d7965152 | 1268 | |
19f81df2 | 1269 | mutex_lock(&dev_priv->drm.struct_mutex); |
d7965152 | 1270 | |
19f81df2 RB |
1271 | dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; |
1272 | engine->context_unpin(engine, stream->ctx); | |
1273 | ||
1274 | mutex_unlock(&dev_priv->drm.struct_mutex); | |
1275 | } | |
d7965152 RB |
1276 | } |
1277 | ||
1278 | static void | |
1279 | free_oa_buffer(struct drm_i915_private *i915) | |
1280 | { | |
1281 | mutex_lock(&i915->drm.struct_mutex); | |
1282 | ||
1283 | i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj); | |
1284 | i915_vma_unpin(i915->perf.oa.oa_buffer.vma); | |
1285 | i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj); | |
1286 | ||
1287 | i915->perf.oa.oa_buffer.vma = NULL; | |
1288 | i915->perf.oa.oa_buffer.vaddr = NULL; | |
1289 | ||
1290 | mutex_unlock(&i915->drm.struct_mutex); | |
1291 | } | |
1292 | ||
1293 | static void i915_oa_stream_destroy(struct i915_perf_stream *stream) | |
1294 | { | |
1295 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
1296 | ||
1297 | BUG_ON(stream != dev_priv->perf.oa.exclusive_stream); | |
1298 | ||
19f81df2 | 1299 | /* |
f89823c2 LL |
1300 | * Unset exclusive_stream first, it will be checked while disabling |
1301 | * the metric set on gen8+. | |
19f81df2 | 1302 | */ |
701f8231 | 1303 | mutex_lock(&dev_priv->drm.struct_mutex); |
19f81df2 | 1304 | dev_priv->perf.oa.exclusive_stream = NULL; |
701f8231 | 1305 | mutex_unlock(&dev_priv->drm.struct_mutex); |
19f81df2 | 1306 | |
d7965152 RB |
1307 | dev_priv->perf.oa.ops.disable_metric_set(dev_priv); |
1308 | ||
1309 | free_oa_buffer(dev_priv); | |
1310 | ||
1311 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | |
1312 | intel_runtime_pm_put(dev_priv); | |
1313 | ||
1314 | if (stream->ctx) | |
1315 | oa_put_render_ctx_id(stream); | |
1316 | ||
f89823c2 LL |
1317 | put_oa_config(dev_priv, stream->oa_config); |
1318 | ||
712122ea RB |
1319 | if (dev_priv->perf.oa.spurious_report_rs.missed) { |
1320 | DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n", | |
1321 | dev_priv->perf.oa.spurious_report_rs.missed); | |
1322 | } | |
d7965152 RB |
1323 | } |
1324 | ||
1325 | static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv) | |
1326 | { | |
1327 | u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); | |
0dd860cf RB |
1328 | unsigned long flags; |
1329 | ||
1330 | spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); | |
d7965152 RB |
1331 | |
1332 | /* Pre-DevBDW: OABUFFER must be set with counters off, | |
1333 | * before OASTATUS1, but after OASTATUS2 | |
1334 | */ | |
1335 | I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */ | |
f279020a RB |
1336 | dev_priv->perf.oa.oa_buffer.head = gtt_offset; |
1337 | ||
d7965152 | 1338 | I915_WRITE(GEN7_OABUFFER, gtt_offset); |
f279020a | 1339 | |
d7965152 RB |
1340 | I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */ |
1341 | ||
0dd860cf RB |
1342 | /* Mark that we need updated tail pointers to read from... */ |
1343 | dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR; | |
1344 | dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR; | |
1345 | ||
1346 | spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); | |
1347 | ||
d7965152 RB |
1348 | /* On Haswell we have to track which OASTATUS1 flags we've |
1349 | * already seen since they can't be cleared while periodic | |
1350 | * sampling is enabled. | |
1351 | */ | |
1352 | dev_priv->perf.oa.gen7_latched_oastatus1 = 0; | |
1353 | ||
1354 | /* NB: although the OA buffer will initially be allocated | |
1355 | * zeroed via shmfs (and so this memset is redundant when | |
1356 | * first allocating), we may re-init the OA buffer, either | |
1357 | * when re-enabling a stream or in error/reset paths. | |
1358 | * | |
1359 | * The reason we clear the buffer for each re-init is for the | |
1360 | * sanity check in gen7_append_oa_reports() that looks at the | |
1361 | * report-id field to make sure it's non-zero which relies on | |
1362 | * the assumption that new reports are being written to zeroed | |
1363 | * memory... | |
1364 | */ | |
1365 | memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE); | |
1366 | ||
1367 | /* Maybe make ->pollin per-stream state if we support multiple | |
1368 | * concurrent streams in the future. | |
1369 | */ | |
1370 | dev_priv->perf.oa.pollin = false; | |
1371 | } | |
1372 | ||
19f81df2 RB |
1373 | static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv) |
1374 | { | |
1375 | u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); | |
1376 | unsigned long flags; | |
1377 | ||
1378 | spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); | |
1379 | ||
1380 | I915_WRITE(GEN8_OASTATUS, 0); | |
1381 | I915_WRITE(GEN8_OAHEADPTR, gtt_offset); | |
1382 | dev_priv->perf.oa.oa_buffer.head = gtt_offset; | |
1383 | ||
1384 | I915_WRITE(GEN8_OABUFFER_UDW, 0); | |
1385 | ||
1386 | /* | |
1387 | * PRM says: | |
1388 | * | |
1389 | * "This MMIO must be set before the OATAILPTR | |
1390 | * register and after the OAHEADPTR register. This is | |
1391 | * to enable proper functionality of the overflow | |
1392 | * bit." | |
1393 | */ | |
1394 | I915_WRITE(GEN8_OABUFFER, gtt_offset | | |
1395 | OABUFFER_SIZE_16M | OA_MEM_SELECT_GGTT); | |
1396 | I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); | |
1397 | ||
1398 | /* Mark that we need updated tail pointers to read from... */ | |
1399 | dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR; | |
1400 | dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR; | |
1401 | ||
1402 | /* | |
1403 | * Reset state used to recognise context switches, affecting which | |
1404 | * reports we will forward to userspace while filtering for a single | |
1405 | * context. | |
1406 | */ | |
1407 | dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID; | |
1408 | ||
1409 | spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); | |
1410 | ||
1411 | /* | |
1412 | * NB: although the OA buffer will initially be allocated | |
1413 | * zeroed via shmfs (and so this memset is redundant when | |
1414 | * first allocating), we may re-init the OA buffer, either | |
1415 | * when re-enabling a stream or in error/reset paths. | |
1416 | * | |
1417 | * The reason we clear the buffer for each re-init is for the | |
1418 | * sanity check in gen8_append_oa_reports() that looks at the | |
1419 | * reason field to make sure it's non-zero which relies on | |
1420 | * the assumption that new reports are being written to zeroed | |
1421 | * memory... | |
1422 | */ | |
1423 | memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE); | |
1424 | ||
1425 | /* | |
1426 | * Maybe make ->pollin per-stream state if we support multiple | |
1427 | * concurrent streams in the future. | |
1428 | */ | |
1429 | dev_priv->perf.oa.pollin = false; | |
1430 | } | |
1431 | ||
d7965152 RB |
1432 | static int alloc_oa_buffer(struct drm_i915_private *dev_priv) |
1433 | { | |
1434 | struct drm_i915_gem_object *bo; | |
1435 | struct i915_vma *vma; | |
1436 | int ret; | |
1437 | ||
1438 | if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma)) | |
1439 | return -ENODEV; | |
1440 | ||
1441 | ret = i915_mutex_lock_interruptible(&dev_priv->drm); | |
1442 | if (ret) | |
1443 | return ret; | |
1444 | ||
1445 | BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); | |
1446 | BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); | |
1447 | ||
12d79d78 | 1448 | bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE); |
d7965152 RB |
1449 | if (IS_ERR(bo)) { |
1450 | DRM_ERROR("Failed to allocate OA buffer\n"); | |
1451 | ret = PTR_ERR(bo); | |
1452 | goto unlock; | |
1453 | } | |
1454 | ||
1455 | ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC); | |
1456 | if (ret) | |
1457 | goto err_unref; | |
1458 | ||
1459 | /* PreHSW required 512K alignment, HSW requires 16M */ | |
1460 | vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); | |
1461 | if (IS_ERR(vma)) { | |
1462 | ret = PTR_ERR(vma); | |
1463 | goto err_unref; | |
1464 | } | |
1465 | dev_priv->perf.oa.oa_buffer.vma = vma; | |
1466 | ||
1467 | dev_priv->perf.oa.oa_buffer.vaddr = | |
1468 | i915_gem_object_pin_map(bo, I915_MAP_WB); | |
1469 | if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) { | |
1470 | ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr); | |
1471 | goto err_unpin; | |
1472 | } | |
1473 | ||
1474 | dev_priv->perf.oa.ops.init_oa_buffer(dev_priv); | |
1475 | ||
1476 | DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n", | |
1477 | i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma), | |
1478 | dev_priv->perf.oa.oa_buffer.vaddr); | |
1479 | ||
1480 | goto unlock; | |
1481 | ||
1482 | err_unpin: | |
1483 | __i915_vma_unpin(vma); | |
1484 | ||
1485 | err_unref: | |
1486 | i915_gem_object_put(bo); | |
1487 | ||
1488 | dev_priv->perf.oa.oa_buffer.vaddr = NULL; | |
1489 | dev_priv->perf.oa.oa_buffer.vma = NULL; | |
1490 | ||
1491 | unlock: | |
1492 | mutex_unlock(&dev_priv->drm.struct_mutex); | |
1493 | return ret; | |
1494 | } | |
1495 | ||
1496 | static void config_oa_regs(struct drm_i915_private *dev_priv, | |
1497 | const struct i915_oa_reg *regs, | |
701f8231 | 1498 | u32 n_regs) |
d7965152 | 1499 | { |
701f8231 | 1500 | u32 i; |
d7965152 RB |
1501 | |
1502 | for (i = 0; i < n_regs; i++) { | |
1503 | const struct i915_oa_reg *reg = regs + i; | |
1504 | ||
1505 | I915_WRITE(reg->addr, reg->value); | |
1506 | } | |
1507 | } | |
1508 | ||
701f8231 LL |
1509 | static int hsw_enable_metric_set(struct drm_i915_private *dev_priv, |
1510 | const struct i915_oa_config *oa_config) | |
d7965152 | 1511 | { |
d7965152 RB |
1512 | /* PRM: |
1513 | * | |
1514 | * OA unit is using “crclk” for its functionality. When trunk | |
1515 | * level clock gating takes place, OA clock would be gated, | |
1516 | * unable to count the events from non-render clock domain. | |
1517 | * Render clock gating must be disabled when OA is enabled to | |
1518 | * count the events from non-render domain. Unit level clock | |
1519 | * gating for RCS should also be disabled. | |
1520 | */ | |
1521 | I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & | |
1522 | ~GEN7_DOP_CLOCK_GATE_ENABLE)); | |
1523 | I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | | |
1524 | GEN6_CSUNIT_CLOCK_GATE_DISABLE)); | |
1525 | ||
701f8231 | 1526 | config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); |
d7965152 RB |
1527 | |
1528 | /* It apparently takes a fairly long time for a new MUX | |
1529 | * configuration to be be applied after these register writes. | |
1530 | * This delay duration was derived empirically based on the | |
1531 | * render_basic config but hopefully it covers the maximum | |
1532 | * configuration latency. | |
1533 | * | |
1534 | * As a fallback, the checks in _append_oa_reports() to skip | |
1535 | * invalid OA reports do also seem to work to discard reports | |
1536 | * generated before this config has completed - albeit not | |
1537 | * silently. | |
1538 | * | |
1539 | * Unfortunately this is essentially a magic number, since we | |
1540 | * don't currently know of a reliable mechanism for predicting | |
1541 | * how long the MUX config will take to apply and besides | |
1542 | * seeing invalid reports we don't know of a reliable way to | |
1543 | * explicitly check that the MUX config has landed. | |
1544 | * | |
1545 | * It's even possible we've miss characterized the underlying | |
1546 | * problem - it just seems like the simplest explanation why | |
1547 | * a delay at this location would mitigate any invalid reports. | |
1548 | */ | |
1549 | usleep_range(15000, 20000); | |
1550 | ||
701f8231 LL |
1551 | config_oa_regs(dev_priv, oa_config->b_counter_regs, |
1552 | oa_config->b_counter_regs_len); | |
d7965152 RB |
1553 | |
1554 | return 0; | |
1555 | } | |
1556 | ||
1557 | static void hsw_disable_metric_set(struct drm_i915_private *dev_priv) | |
1558 | { | |
1559 | I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) & | |
1560 | ~GEN6_CSUNIT_CLOCK_GATE_DISABLE)); | |
1561 | I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) | | |
1562 | GEN7_DOP_CLOCK_GATE_ENABLE)); | |
1563 | ||
1564 | I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & | |
1565 | ~GT_NOA_ENABLE)); | |
1566 | } | |
1567 | ||
19f81df2 RB |
1568 | /* |
1569 | * NB: It must always remain pointer safe to run this even if the OA unit | |
1570 | * has been disabled. | |
1571 | * | |
1572 | * It's fine to put out-of-date values into these per-context registers | |
1573 | * in the case that the OA unit has been disabled. | |
1574 | */ | |
1575 | static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, | |
701f8231 LL |
1576 | u32 *reg_state, |
1577 | const struct i915_oa_config *oa_config) | |
19f81df2 RB |
1578 | { |
1579 | struct drm_i915_private *dev_priv = ctx->i915; | |
19f81df2 RB |
1580 | u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; |
1581 | u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; | |
1582 | /* The MMIO offsets for Flex EU registers aren't contiguous */ | |
1583 | u32 flex_mmio[] = { | |
1584 | i915_mmio_reg_offset(EU_PERF_CNTL0), | |
1585 | i915_mmio_reg_offset(EU_PERF_CNTL1), | |
1586 | i915_mmio_reg_offset(EU_PERF_CNTL2), | |
1587 | i915_mmio_reg_offset(EU_PERF_CNTL3), | |
1588 | i915_mmio_reg_offset(EU_PERF_CNTL4), | |
1589 | i915_mmio_reg_offset(EU_PERF_CNTL5), | |
1590 | i915_mmio_reg_offset(EU_PERF_CNTL6), | |
1591 | }; | |
1592 | int i; | |
1593 | ||
1594 | reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL); | |
1595 | reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent << | |
1596 | GEN8_OA_TIMER_PERIOD_SHIFT) | | |
1597 | (dev_priv->perf.oa.periodic ? | |
1598 | GEN8_OA_TIMER_ENABLE : 0) | | |
1599 | GEN8_OA_COUNTER_RESUME; | |
1600 | ||
1601 | for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) { | |
1602 | u32 state_offset = ctx_flexeu0 + i * 2; | |
1603 | u32 mmio = flex_mmio[i]; | |
1604 | ||
1605 | /* | |
1606 | * This arbitrary default will select the 'EU FPU0 Pipeline | |
1607 | * Active' event. In the future it's anticipated that there | |
1608 | * will be an explicit 'No Event' we can select, but not yet... | |
1609 | */ | |
1610 | u32 value = 0; | |
19f81df2 | 1611 | |
701f8231 LL |
1612 | if (oa_config) { |
1613 | u32 j; | |
1614 | ||
1615 | for (j = 0; j < oa_config->flex_regs_len; j++) { | |
1616 | if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) { | |
1617 | value = oa_config->flex_regs[j].value; | |
1618 | break; | |
1619 | } | |
19f81df2 RB |
1620 | } |
1621 | } | |
1622 | ||
1623 | reg_state[state_offset] = mmio; | |
1624 | reg_state[state_offset+1] = value; | |
1625 | } | |
1626 | } | |
1627 | ||
1628 | /* | |
1629 | * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This | |
1630 | * is only used by the kernel context. | |
1631 | */ | |
701f8231 LL |
1632 | static int gen8_emit_oa_config(struct drm_i915_gem_request *req, |
1633 | const struct i915_oa_config *oa_config) | |
19f81df2 RB |
1634 | { |
1635 | struct drm_i915_private *dev_priv = req->i915; | |
19f81df2 RB |
1636 | /* The MMIO offsets for Flex EU registers aren't contiguous */ |
1637 | u32 flex_mmio[] = { | |
1638 | i915_mmio_reg_offset(EU_PERF_CNTL0), | |
1639 | i915_mmio_reg_offset(EU_PERF_CNTL1), | |
1640 | i915_mmio_reg_offset(EU_PERF_CNTL2), | |
1641 | i915_mmio_reg_offset(EU_PERF_CNTL3), | |
1642 | i915_mmio_reg_offset(EU_PERF_CNTL4), | |
1643 | i915_mmio_reg_offset(EU_PERF_CNTL5), | |
1644 | i915_mmio_reg_offset(EU_PERF_CNTL6), | |
1645 | }; | |
1646 | u32 *cs; | |
1647 | int i; | |
1648 | ||
01d928e9 | 1649 | cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4); |
19f81df2 RB |
1650 | if (IS_ERR(cs)) |
1651 | return PTR_ERR(cs); | |
1652 | ||
01d928e9 | 1653 | *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1); |
19f81df2 RB |
1654 | |
1655 | *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); | |
1656 | *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | | |
1657 | (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) | | |
1658 | GEN8_OA_COUNTER_RESUME; | |
1659 | ||
1660 | for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) { | |
1661 | u32 mmio = flex_mmio[i]; | |
1662 | ||
1663 | /* | |
1664 | * This arbitrary default will select the 'EU FPU0 Pipeline | |
1665 | * Active' event. In the future it's anticipated that there | |
1666 | * will be an explicit 'No Event' we can select, but not | |
1667 | * yet... | |
1668 | */ | |
1669 | u32 value = 0; | |
19f81df2 | 1670 | |
701f8231 LL |
1671 | if (oa_config) { |
1672 | u32 j; | |
1673 | ||
1674 | for (j = 0; j < oa_config->flex_regs_len; j++) { | |
1675 | if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) { | |
1676 | value = oa_config->flex_regs[j].value; | |
1677 | break; | |
1678 | } | |
19f81df2 RB |
1679 | } |
1680 | } | |
1681 | ||
1682 | *cs++ = mmio; | |
1683 | *cs++ = value; | |
1684 | } | |
1685 | ||
1686 | *cs++ = MI_NOOP; | |
1687 | intel_ring_advance(req, cs); | |
1688 | ||
1689 | return 0; | |
1690 | } | |
1691 | ||
701f8231 LL |
1692 | static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv, |
1693 | const struct i915_oa_config *oa_config) | |
19f81df2 RB |
1694 | { |
1695 | struct intel_engine_cs *engine = dev_priv->engine[RCS]; | |
1696 | struct i915_gem_timeline *timeline; | |
1697 | struct drm_i915_gem_request *req; | |
1698 | int ret; | |
1699 | ||
1700 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | |
1701 | ||
1702 | i915_gem_retire_requests(dev_priv); | |
1703 | ||
1704 | req = i915_gem_request_alloc(engine, dev_priv->kernel_context); | |
1705 | if (IS_ERR(req)) | |
1706 | return PTR_ERR(req); | |
1707 | ||
701f8231 | 1708 | ret = gen8_emit_oa_config(req, oa_config); |
19f81df2 RB |
1709 | if (ret) { |
1710 | i915_add_request(req); | |
1711 | return ret; | |
1712 | } | |
1713 | ||
1714 | /* Queue this switch after all other activity */ | |
1715 | list_for_each_entry(timeline, &dev_priv->gt.timelines, link) { | |
1716 | struct drm_i915_gem_request *prev; | |
1717 | struct intel_timeline *tl; | |
1718 | ||
1719 | tl = &timeline->engine[engine->id]; | |
1720 | prev = i915_gem_active_raw(&tl->last_request, | |
1721 | &dev_priv->drm.struct_mutex); | |
1722 | if (prev) | |
1723 | i915_sw_fence_await_sw_fence_gfp(&req->submit, | |
1724 | &prev->submit, | |
1725 | GFP_KERNEL); | |
1726 | } | |
1727 | ||
1728 | ret = i915_switch_context(req); | |
1729 | i915_add_request(req); | |
1730 | ||
1731 | return ret; | |
1732 | } | |
1733 | ||
1734 | /* | |
1735 | * Manages updating the per-context aspects of the OA stream | |
1736 | * configuration across all contexts. | |
1737 | * | |
1738 | * The awkward consideration here is that OACTXCONTROL controls the | |
1739 | * exponent for periodic sampling which is primarily used for system | |
1740 | * wide profiling where we'd like a consistent sampling period even in | |
1741 | * the face of context switches. | |
1742 | * | |
1743 | * Our approach of updating the register state context (as opposed to | |
1744 | * say using a workaround batch buffer) ensures that the hardware | |
1745 | * won't automatically reload an out-of-date timer exponent even | |
1746 | * transiently before a WA BB could be parsed. | |
1747 | * | |
1748 | * This function needs to: | |
1749 | * - Ensure the currently running context's per-context OA state is | |
1750 | * updated | |
1751 | * - Ensure that all existing contexts will have the correct per-context | |
1752 | * OA state if they are scheduled for use. | |
1753 | * - Ensure any new contexts will be initialized with the correct | |
1754 | * per-context OA state. | |
1755 | * | |
1756 | * Note: it's only the RCS/Render context that has any OA state. | |
1757 | */ | |
1758 | static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, | |
701f8231 | 1759 | const struct i915_oa_config *oa_config, |
19f81df2 RB |
1760 | bool interruptible) |
1761 | { | |
1762 | struct i915_gem_context *ctx; | |
1763 | int ret; | |
1764 | unsigned int wait_flags = I915_WAIT_LOCKED; | |
1765 | ||
1766 | if (interruptible) { | |
1767 | ret = i915_mutex_lock_interruptible(&dev_priv->drm); | |
1768 | if (ret) | |
1769 | return ret; | |
1770 | ||
1771 | wait_flags |= I915_WAIT_INTERRUPTIBLE; | |
1772 | } else { | |
1773 | mutex_lock(&dev_priv->drm.struct_mutex); | |
1774 | } | |
1775 | ||
1776 | /* Switch away from any user context. */ | |
701f8231 | 1777 | ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); |
19f81df2 RB |
1778 | if (ret) |
1779 | goto out; | |
1780 | ||
1781 | /* | |
1782 | * The OA register config is setup through the context image. This image | |
1783 | * might be written to by the GPU on context switch (in particular on | |
1784 | * lite-restore). This means we can't safely update a context's image, | |
1785 | * if this context is scheduled/submitted to run on the GPU. | |
1786 | * | |
1787 | * We could emit the OA register config through the batch buffer but | |
1788 | * this might leave small interval of time where the OA unit is | |
1789 | * configured at an invalid sampling period. | |
1790 | * | |
1791 | * So far the best way to work around this issue seems to be draining | |
1792 | * the GPU from any submitted work. | |
1793 | */ | |
1794 | ret = i915_gem_wait_for_idle(dev_priv, wait_flags); | |
1795 | if (ret) | |
1796 | goto out; | |
1797 | ||
1798 | /* Update all contexts now that we've stalled the submission. */ | |
829a0af2 | 1799 | list_for_each_entry(ctx, &dev_priv->contexts.list, link) { |
19f81df2 RB |
1800 | struct intel_context *ce = &ctx->engine[RCS]; |
1801 | u32 *regs; | |
1802 | ||
1803 | /* OA settings will be set upon first use */ | |
1804 | if (!ce->state) | |
1805 | continue; | |
1806 | ||
1807 | regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); | |
1808 | if (IS_ERR(regs)) { | |
1809 | ret = PTR_ERR(regs); | |
1810 | goto out; | |
1811 | } | |
1812 | ||
1813 | ce->state->obj->mm.dirty = true; | |
1814 | regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs); | |
1815 | ||
701f8231 | 1816 | gen8_update_reg_state_unlocked(ctx, regs, oa_config); |
19f81df2 RB |
1817 | |
1818 | i915_gem_object_unpin_map(ce->state->obj); | |
1819 | } | |
1820 | ||
1821 | out: | |
1822 | mutex_unlock(&dev_priv->drm.struct_mutex); | |
1823 | ||
1824 | return ret; | |
1825 | } | |
1826 | ||
701f8231 LL |
1827 | static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, |
1828 | const struct i915_oa_config *oa_config) | |
19f81df2 | 1829 | { |
701f8231 | 1830 | int ret; |
19f81df2 RB |
1831 | |
1832 | /* | |
1833 | * We disable slice/unslice clock ratio change reports on SKL since | |
1834 | * they are too noisy. The HW generates a lot of redundant reports | |
1835 | * where the ratio hasn't really changed causing a lot of redundant | |
1836 | * work to processes and increasing the chances we'll hit buffer | |
1837 | * overruns. | |
1838 | * | |
1839 | * Although we don't currently use the 'disable overrun' OABUFFER | |
1840 | * feature it's worth noting that clock ratio reports have to be | |
1841 | * disabled before considering to use that feature since the HW doesn't | |
1842 | * correctly block these reports. | |
1843 | * | |
1844 | * Currently none of the high-level metrics we have depend on knowing | |
1845 | * this ratio to normalize. | |
1846 | * | |
1847 | * Note: This register is not power context saved and restored, but | |
1848 | * that's OK considering that we disable RC6 while the OA unit is | |
1849 | * enabled. | |
1850 | * | |
1851 | * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to | |
1852 | * be read back from automatically triggered reports, as part of the | |
1853 | * RPT_ID field. | |
1854 | */ | |
342a2c84 | 1855 | if (IS_GEN9(dev_priv)) { |
19f81df2 RB |
1856 | I915_WRITE(GEN8_OA_DEBUG, |
1857 | _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | | |
1858 | GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); | |
1859 | } | |
1860 | ||
1861 | /* | |
1862 | * Update all contexts prior writing the mux configurations as we need | |
1863 | * to make sure all slices/subslices are ON before writing to NOA | |
1864 | * registers. | |
1865 | */ | |
701f8231 | 1866 | ret = gen8_configure_all_contexts(dev_priv, oa_config, true); |
19f81df2 RB |
1867 | if (ret) |
1868 | return ret; | |
1869 | ||
701f8231 LL |
1870 | config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); |
1871 | ||
701f8231 LL |
1872 | config_oa_regs(dev_priv, oa_config->b_counter_regs, |
1873 | oa_config->b_counter_regs_len); | |
19f81df2 RB |
1874 | |
1875 | return 0; | |
1876 | } | |
1877 | ||
1878 | static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) | |
1879 | { | |
1880 | /* Reset all contexts' slices/subslices configurations. */ | |
701f8231 | 1881 | gen8_configure_all_contexts(dev_priv, NULL, false); |
28964cf2 LL |
1882 | |
1883 | I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & | |
1884 | ~GT_NOA_ENABLE)); | |
1885 | ||
19f81df2 RB |
1886 | } |
1887 | ||
1bef3409 | 1888 | static void gen7_oa_enable(struct drm_i915_private *dev_priv) |
d7965152 | 1889 | { |
1bef3409 RB |
1890 | /* |
1891 | * Reset buf pointers so we don't forward reports from before now. | |
1892 | * | |
1893 | * Think carefully if considering trying to avoid this, since it | |
1894 | * also ensures status flags and the buffer itself are cleared | |
1895 | * in error paths, and we have checks for invalid reports based | |
1896 | * on the assumption that certain fields are written to zeroed | |
1897 | * memory which this helps maintains. | |
1898 | */ | |
1899 | gen7_init_oa_buffer(dev_priv); | |
d7965152 RB |
1900 | |
1901 | if (dev_priv->perf.oa.exclusive_stream->enabled) { | |
1902 | struct i915_gem_context *ctx = | |
1903 | dev_priv->perf.oa.exclusive_stream->ctx; | |
1904 | u32 ctx_id = dev_priv->perf.oa.specific_ctx_id; | |
1905 | ||
1906 | bool periodic = dev_priv->perf.oa.periodic; | |
1907 | u32 period_exponent = dev_priv->perf.oa.period_exponent; | |
1908 | u32 report_format = dev_priv->perf.oa.oa_buffer.format; | |
1909 | ||
1910 | I915_WRITE(GEN7_OACONTROL, | |
1911 | (ctx_id & GEN7_OACONTROL_CTX_MASK) | | |
1912 | (period_exponent << | |
1913 | GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | | |
1914 | (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | | |
1915 | (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | | |
1916 | (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | | |
1917 | GEN7_OACONTROL_ENABLE); | |
1918 | } else | |
1919 | I915_WRITE(GEN7_OACONTROL, 0); | |
1920 | } | |
1921 | ||
19f81df2 RB |
1922 | static void gen8_oa_enable(struct drm_i915_private *dev_priv) |
1923 | { | |
1924 | u32 report_format = dev_priv->perf.oa.oa_buffer.format; | |
1925 | ||
1926 | /* | |
1927 | * Reset buf pointers so we don't forward reports from before now. | |
1928 | * | |
1929 | * Think carefully if considering trying to avoid this, since it | |
1930 | * also ensures status flags and the buffer itself are cleared | |
1931 | * in error paths, and we have checks for invalid reports based | |
1932 | * on the assumption that certain fields are written to zeroed | |
1933 | * memory which this helps maintains. | |
1934 | */ | |
1935 | gen8_init_oa_buffer(dev_priv); | |
1936 | ||
1937 | /* | |
1938 | * Note: we don't rely on the hardware to perform single context | |
1939 | * filtering and instead filter on the cpu based on the context-id | |
1940 | * field of reports | |
1941 | */ | |
1942 | I915_WRITE(GEN8_OACONTROL, (report_format << | |
1943 | GEN8_OA_REPORT_FORMAT_SHIFT) | | |
1944 | GEN8_OA_COUNTER_ENABLE); | |
1945 | } | |
1946 | ||
16d98b31 RB |
1947 | /** |
1948 | * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream | |
1949 | * @stream: An i915 perf stream opened for OA metrics | |
1950 | * | |
1951 | * [Re]enables hardware periodic sampling according to the period configured | |
1952 | * when opening the stream. This also starts a hrtimer that will periodically | |
1953 | * check for data in the circular OA buffer for notifying userspace (e.g. | |
1954 | * during a read() or poll()). | |
1955 | */ | |
d7965152 RB |
1956 | static void i915_oa_stream_enable(struct i915_perf_stream *stream) |
1957 | { | |
1958 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
1959 | ||
1960 | dev_priv->perf.oa.ops.oa_enable(dev_priv); | |
1961 | ||
1962 | if (dev_priv->perf.oa.periodic) | |
1963 | hrtimer_start(&dev_priv->perf.oa.poll_check_timer, | |
1964 | ns_to_ktime(POLL_PERIOD), | |
1965 | HRTIMER_MODE_REL_PINNED); | |
1966 | } | |
1967 | ||
1968 | static void gen7_oa_disable(struct drm_i915_private *dev_priv) | |
1969 | { | |
1970 | I915_WRITE(GEN7_OACONTROL, 0); | |
1971 | } | |
1972 | ||
19f81df2 RB |
1973 | static void gen8_oa_disable(struct drm_i915_private *dev_priv) |
1974 | { | |
1975 | I915_WRITE(GEN8_OACONTROL, 0); | |
1976 | } | |
1977 | ||
16d98b31 RB |
1978 | /** |
1979 | * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream | |
1980 | * @stream: An i915 perf stream opened for OA metrics | |
1981 | * | |
1982 | * Stops the OA unit from periodically writing counter reports into the | |
1983 | * circular OA buffer. This also stops the hrtimer that periodically checks for | |
1984 | * data in the circular OA buffer, for notifying userspace. | |
1985 | */ | |
d7965152 RB |
1986 | static void i915_oa_stream_disable(struct i915_perf_stream *stream) |
1987 | { | |
1988 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
1989 | ||
1990 | dev_priv->perf.oa.ops.oa_disable(dev_priv); | |
1991 | ||
1992 | if (dev_priv->perf.oa.periodic) | |
1993 | hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer); | |
1994 | } | |
1995 | ||
d7965152 RB |
1996 | static const struct i915_perf_stream_ops i915_oa_stream_ops = { |
1997 | .destroy = i915_oa_stream_destroy, | |
1998 | .enable = i915_oa_stream_enable, | |
1999 | .disable = i915_oa_stream_disable, | |
2000 | .wait_unlocked = i915_oa_wait_unlocked, | |
2001 | .poll_wait = i915_oa_poll_wait, | |
2002 | .read = i915_oa_read, | |
eec688e1 RB |
2003 | }; |
2004 | ||
16d98b31 RB |
2005 | /** |
2006 | * i915_oa_stream_init - validate combined props for OA stream and init | |
2007 | * @stream: An i915 perf stream | |
2008 | * @param: The open parameters passed to `DRM_I915_PERF_OPEN` | |
2009 | * @props: The property state that configures stream (individually validated) | |
2010 | * | |
2011 | * While read_properties_unlocked() validates properties in isolation it | |
2012 | * doesn't ensure that the combination necessarily makes sense. | |
2013 | * | |
2014 | * At this point it has been determined that userspace wants a stream of | |
2015 | * OA metrics, but still we need to further validate the combined | |
2016 | * properties are OK. | |
2017 | * | |
2018 | * If the configuration makes sense then we can allocate memory for | |
2019 | * a circular OA buffer and apply the requested metric set configuration. | |
2020 | * | |
2021 | * Returns: zero on success or a negative error code. | |
2022 | */ | |
d7965152 RB |
2023 | static int i915_oa_stream_init(struct i915_perf_stream *stream, |
2024 | struct drm_i915_perf_open_param *param, | |
2025 | struct perf_open_properties *props) | |
2026 | { | |
2027 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
2028 | int format_size; | |
2029 | int ret; | |
2030 | ||
442b8c06 RB |
2031 | /* If the sysfs metrics/ directory wasn't registered for some |
2032 | * reason then don't let userspace try their luck with config | |
2033 | * IDs | |
2034 | */ | |
2035 | if (!dev_priv->perf.metrics_kobj) { | |
7708550c | 2036 | DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); |
442b8c06 RB |
2037 | return -EINVAL; |
2038 | } | |
2039 | ||
d7965152 | 2040 | if (!(props->sample_flags & SAMPLE_OA_REPORT)) { |
7708550c | 2041 | DRM_DEBUG("Only OA report sampling supported\n"); |
d7965152 RB |
2042 | return -EINVAL; |
2043 | } | |
2044 | ||
2045 | if (!dev_priv->perf.oa.ops.init_oa_buffer) { | |
7708550c | 2046 | DRM_DEBUG("OA unit not supported\n"); |
d7965152 RB |
2047 | return -ENODEV; |
2048 | } | |
2049 | ||
2050 | /* To avoid the complexity of having to accurately filter | |
2051 | * counter reports and marshal to the appropriate client | |
2052 | * we currently only allow exclusive access | |
2053 | */ | |
2054 | if (dev_priv->perf.oa.exclusive_stream) { | |
7708550c | 2055 | DRM_DEBUG("OA unit already in use\n"); |
d7965152 RB |
2056 | return -EBUSY; |
2057 | } | |
2058 | ||
d7965152 | 2059 | if (!props->oa_format) { |
7708550c | 2060 | DRM_DEBUG("OA report format not specified\n"); |
d7965152 RB |
2061 | return -EINVAL; |
2062 | } | |
2063 | ||
712122ea RB |
2064 | /* We set up some ratelimit state to potentially throttle any _NOTES |
2065 | * about spurious, invalid OA reports which we don't forward to | |
2066 | * userspace. | |
2067 | * | |
2068 | * The initialization is associated with opening the stream (not driver | |
2069 | * init) considering we print a _NOTE about any throttling when closing | |
2070 | * the stream instead of waiting until driver _fini which no one would | |
2071 | * ever see. | |
2072 | * | |
2073 | * Using the same limiting factors as printk_ratelimit() | |
2074 | */ | |
2075 | ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs, | |
2076 | 5 * HZ, 10); | |
2077 | /* Since we use a DRM_NOTE for spurious reports it would be | |
2078 | * inconsistent to let __ratelimit() automatically print a warning for | |
2079 | * throttling. | |
2080 | */ | |
2081 | ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs, | |
2082 | RATELIMIT_MSG_ON_RELEASE); | |
2083 | ||
d7965152 RB |
2084 | stream->sample_size = sizeof(struct drm_i915_perf_record_header); |
2085 | ||
2086 | format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size; | |
2087 | ||
2088 | stream->sample_flags |= SAMPLE_OA_REPORT; | |
2089 | stream->sample_size += format_size; | |
2090 | ||
2091 | dev_priv->perf.oa.oa_buffer.format_size = format_size; | |
2092 | if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0)) | |
2093 | return -EINVAL; | |
2094 | ||
2095 | dev_priv->perf.oa.oa_buffer.format = | |
2096 | dev_priv->perf.oa.oa_formats[props->oa_format].format; | |
2097 | ||
d7965152 | 2098 | dev_priv->perf.oa.periodic = props->oa_periodic; |
0dd860cf | 2099 | if (dev_priv->perf.oa.periodic) |
d7965152 RB |
2100 | dev_priv->perf.oa.period_exponent = props->oa_period_exponent; |
2101 | ||
d7965152 RB |
2102 | if (stream->ctx) { |
2103 | ret = oa_get_render_ctx_id(stream); | |
2104 | if (ret) | |
2105 | return ret; | |
2106 | } | |
2107 | ||
f89823c2 LL |
2108 | ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config); |
2109 | if (ret) | |
2110 | goto err_config; | |
701f8231 | 2111 | |
d7965152 RB |
2112 | /* PRM - observability performance counters: |
2113 | * | |
2114 | * OACONTROL, performance counter enable, note: | |
2115 | * | |
2116 | * "When this bit is set, in order to have coherent counts, | |
2117 | * RC6 power state and trunk clock gating must be disabled. | |
2118 | * This can be achieved by programming MMIO registers as | |
2119 | * 0xA094=0 and 0xA090[31]=1" | |
2120 | * | |
2121 | * In our case we are expecting that taking pm + FORCEWAKE | |
2122 | * references will effectively disable RC6. | |
2123 | */ | |
2124 | intel_runtime_pm_get(dev_priv); | |
2125 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | |
2126 | ||
987f8c44 | 2127 | ret = alloc_oa_buffer(dev_priv); |
2128 | if (ret) | |
2129 | goto err_oa_buf_alloc; | |
2130 | ||
701f8231 LL |
2131 | ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, |
2132 | stream->oa_config); | |
d7965152 RB |
2133 | if (ret) |
2134 | goto err_enable; | |
2135 | ||
2136 | stream->ops = &i915_oa_stream_ops; | |
2137 | ||
701f8231 LL |
2138 | /* Lock device for exclusive_stream access late because |
2139 | * enable_metric_set() might lock as well on gen8+. | |
2140 | */ | |
2141 | ret = i915_mutex_lock_interruptible(&dev_priv->drm); | |
2142 | if (ret) | |
2143 | goto err_lock; | |
2144 | ||
d7965152 RB |
2145 | dev_priv->perf.oa.exclusive_stream = stream; |
2146 | ||
701f8231 LL |
2147 | mutex_unlock(&dev_priv->drm.struct_mutex); |
2148 | ||
d7965152 RB |
2149 | return 0; |
2150 | ||
701f8231 LL |
2151 | err_lock: |
2152 | dev_priv->perf.oa.ops.disable_metric_set(dev_priv); | |
2153 | ||
d7965152 | 2154 | err_enable: |
d7965152 RB |
2155 | free_oa_buffer(dev_priv); |
2156 | ||
2157 | err_oa_buf_alloc: | |
f89823c2 LL |
2158 | put_oa_config(dev_priv, stream->oa_config); |
2159 | ||
987f8c44 | 2160 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
2161 | intel_runtime_pm_put(dev_priv); | |
f89823c2 LL |
2162 | |
2163 | err_config: | |
d7965152 RB |
2164 | if (stream->ctx) |
2165 | oa_put_render_ctx_id(stream); | |
2166 | ||
2167 | return ret; | |
2168 | } | |
2169 | ||
19f81df2 RB |
2170 | void i915_oa_init_reg_state(struct intel_engine_cs *engine, |
2171 | struct i915_gem_context *ctx, | |
2172 | u32 *reg_state) | |
2173 | { | |
28b6cb08 | 2174 | struct i915_perf_stream *stream; |
19f81df2 RB |
2175 | |
2176 | if (engine->id != RCS) | |
2177 | return; | |
2178 | ||
28b6cb08 | 2179 | stream = engine->i915->perf.oa.exclusive_stream; |
701f8231 LL |
2180 | if (stream) |
2181 | gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config); | |
19f81df2 RB |
2182 | } |
2183 | ||
16d98b31 RB |
2184 | /** |
2185 | * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation | |
2186 | * @stream: An i915 perf stream | |
2187 | * @file: An i915 perf stream file | |
2188 | * @buf: destination buffer given by userspace | |
2189 | * @count: the number of bytes userspace wants to read | |
2190 | * @ppos: (inout) file seek position (unused) | |
2191 | * | |
2192 | * Besides wrapping &i915_perf_stream_ops->read this provides a common place to | |
2193 | * ensure that if we've successfully copied any data then reporting that takes | |
2194 | * precedence over any internal error status, so the data isn't lost. | |
2195 | * | |
2196 | * For example ret will be -ENOSPC whenever there is more buffered data than | |
2197 | * can be copied to userspace, but that's only interesting if we weren't able | |
2198 | * to copy some data because it implies the userspace buffer is too small to | |
2199 | * receive a single record (and we never split records). | |
2200 | * | |
2201 | * Another case with ret == -EFAULT is more of a grey area since it would seem | |
2202 | * like bad form for userspace to ask us to overrun its buffer, but the user | |
2203 | * knows best: | |
2204 | * | |
2205 | * http://yarchive.net/comp/linux/partial_reads_writes.html | |
2206 | * | |
2207 | * Returns: The number of bytes copied or a negative error code on failure. | |
2208 | */ | |
eec688e1 RB |
2209 | static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream, |
2210 | struct file *file, | |
2211 | char __user *buf, | |
2212 | size_t count, | |
2213 | loff_t *ppos) | |
2214 | { | |
2215 | /* Note we keep the offset (aka bytes read) separate from any | |
2216 | * error status so that the final check for whether we return | |
2217 | * the bytes read with a higher precedence than any error (see | |
2218 | * comment below) doesn't need to be handled/duplicated in | |
2219 | * stream->ops->read() implementations. | |
2220 | */ | |
2221 | size_t offset = 0; | |
2222 | int ret = stream->ops->read(stream, buf, count, &offset); | |
2223 | ||
eec688e1 RB |
2224 | return offset ?: (ret ?: -EAGAIN); |
2225 | } | |
2226 | ||
16d98b31 RB |
2227 | /** |
2228 | * i915_perf_read - handles read() FOP for i915 perf stream FDs | |
2229 | * @file: An i915 perf stream file | |
2230 | * @buf: destination buffer given by userspace | |
2231 | * @count: the number of bytes userspace wants to read | |
2232 | * @ppos: (inout) file seek position (unused) | |
2233 | * | |
2234 | * The entry point for handling a read() on a stream file descriptor from | |
2235 | * userspace. Most of the work is left to the i915_perf_read_locked() and | |
2236 | * &i915_perf_stream_ops->read but to save having stream implementations (of | |
2237 | * which we might have multiple later) we handle blocking read here. | |
2238 | * | |
2239 | * We can also consistently treat trying to read from a disabled stream | |
2240 | * as an IO error so implementations can assume the stream is enabled | |
2241 | * while reading. | |
2242 | * | |
2243 | * Returns: The number of bytes copied or a negative error code on failure. | |
2244 | */ | |
eec688e1 RB |
2245 | static ssize_t i915_perf_read(struct file *file, |
2246 | char __user *buf, | |
2247 | size_t count, | |
2248 | loff_t *ppos) | |
2249 | { | |
2250 | struct i915_perf_stream *stream = file->private_data; | |
2251 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
2252 | ssize_t ret; | |
2253 | ||
d7965152 RB |
2254 | /* To ensure it's handled consistently we simply treat all reads of a |
2255 | * disabled stream as an error. In particular it might otherwise lead | |
2256 | * to a deadlock for blocking file descriptors... | |
2257 | */ | |
2258 | if (!stream->enabled) | |
2259 | return -EIO; | |
2260 | ||
eec688e1 | 2261 | if (!(file->f_flags & O_NONBLOCK)) { |
d7965152 RB |
2262 | /* There's the small chance of false positives from |
2263 | * stream->ops->wait_unlocked. | |
2264 | * | |
2265 | * E.g. with single context filtering since we only wait until | |
2266 | * oabuffer has >= 1 report we don't immediately know whether | |
2267 | * any reports really belong to the current context | |
eec688e1 RB |
2268 | */ |
2269 | do { | |
2270 | ret = stream->ops->wait_unlocked(stream); | |
2271 | if (ret) | |
2272 | return ret; | |
2273 | ||
2274 | mutex_lock(&dev_priv->perf.lock); | |
2275 | ret = i915_perf_read_locked(stream, file, | |
2276 | buf, count, ppos); | |
2277 | mutex_unlock(&dev_priv->perf.lock); | |
2278 | } while (ret == -EAGAIN); | |
2279 | } else { | |
2280 | mutex_lock(&dev_priv->perf.lock); | |
2281 | ret = i915_perf_read_locked(stream, file, buf, count, ppos); | |
2282 | mutex_unlock(&dev_priv->perf.lock); | |
2283 | } | |
2284 | ||
26ebd9c7 RB |
2285 | /* We allow the poll checking to sometimes report false positive POLLIN |
2286 | * events where we might actually report EAGAIN on read() if there's | |
2287 | * not really any data available. In this situation though we don't | |
2288 | * want to enter a busy loop between poll() reporting a POLLIN event | |
2289 | * and read() returning -EAGAIN. Clearing the oa.pollin state here | |
2290 | * effectively ensures we back off until the next hrtimer callback | |
2291 | * before reporting another POLLIN event. | |
2292 | */ | |
2293 | if (ret >= 0 || ret == -EAGAIN) { | |
d7965152 RB |
2294 | /* Maybe make ->pollin per-stream state if we support multiple |
2295 | * concurrent streams in the future. | |
2296 | */ | |
2297 | dev_priv->perf.oa.pollin = false; | |
2298 | } | |
2299 | ||
eec688e1 RB |
2300 | return ret; |
2301 | } | |
2302 | ||
d7965152 RB |
2303 | static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) |
2304 | { | |
2305 | struct drm_i915_private *dev_priv = | |
2306 | container_of(hrtimer, typeof(*dev_priv), | |
2307 | perf.oa.poll_check_timer); | |
2308 | ||
19f81df2 | 2309 | if (oa_buffer_check_unlocked(dev_priv)) { |
d7965152 RB |
2310 | dev_priv->perf.oa.pollin = true; |
2311 | wake_up(&dev_priv->perf.oa.poll_wq); | |
2312 | } | |
2313 | ||
2314 | hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD)); | |
2315 | ||
2316 | return HRTIMER_RESTART; | |
2317 | } | |
2318 | ||
16d98b31 RB |
2319 | /** |
2320 | * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream | |
2321 | * @dev_priv: i915 device instance | |
2322 | * @stream: An i915 perf stream | |
2323 | * @file: An i915 perf stream file | |
2324 | * @wait: poll() state table | |
2325 | * | |
2326 | * For handling userspace polling on an i915 perf stream, this calls through to | |
2327 | * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that | |
2328 | * will be woken for new stream data. | |
2329 | * | |
2330 | * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize | |
2331 | * with any non-file-operation driver hooks. | |
2332 | * | |
2333 | * Returns: any poll events that are ready without sleeping | |
2334 | */ | |
d7965152 RB |
2335 | static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv, |
2336 | struct i915_perf_stream *stream, | |
eec688e1 RB |
2337 | struct file *file, |
2338 | poll_table *wait) | |
2339 | { | |
d7965152 | 2340 | unsigned int events = 0; |
eec688e1 RB |
2341 | |
2342 | stream->ops->poll_wait(stream, file, wait); | |
2343 | ||
d7965152 RB |
2344 | /* Note: we don't explicitly check whether there's something to read |
2345 | * here since this path may be very hot depending on what else | |
2346 | * userspace is polling, or on the timeout in use. We rely solely on | |
2347 | * the hrtimer/oa_poll_check_timer_cb to notify us when there are | |
2348 | * samples to read. | |
2349 | */ | |
2350 | if (dev_priv->perf.oa.pollin) | |
2351 | events |= POLLIN; | |
eec688e1 | 2352 | |
d7965152 | 2353 | return events; |
eec688e1 RB |
2354 | } |
2355 | ||
16d98b31 RB |
2356 | /** |
2357 | * i915_perf_poll - call poll_wait() with a suitable wait queue for stream | |
2358 | * @file: An i915 perf stream file | |
2359 | * @wait: poll() state table | |
2360 | * | |
2361 | * For handling userspace polling on an i915 perf stream, this ensures | |
2362 | * poll_wait() gets called with a wait queue that will be woken for new stream | |
2363 | * data. | |
2364 | * | |
2365 | * Note: Implementation deferred to i915_perf_poll_locked() | |
2366 | * | |
2367 | * Returns: any poll events that are ready without sleeping | |
2368 | */ | |
eec688e1 RB |
2369 | static unsigned int i915_perf_poll(struct file *file, poll_table *wait) |
2370 | { | |
2371 | struct i915_perf_stream *stream = file->private_data; | |
2372 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
2373 | int ret; | |
2374 | ||
2375 | mutex_lock(&dev_priv->perf.lock); | |
d7965152 | 2376 | ret = i915_perf_poll_locked(dev_priv, stream, file, wait); |
eec688e1 RB |
2377 | mutex_unlock(&dev_priv->perf.lock); |
2378 | ||
2379 | return ret; | |
2380 | } | |
2381 | ||
16d98b31 RB |
2382 | /** |
2383 | * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl | |
2384 | * @stream: A disabled i915 perf stream | |
2385 | * | |
2386 | * [Re]enables the associated capture of data for this stream. | |
2387 | * | |
2388 | * If a stream was previously enabled then there's currently no intention | |
2389 | * to provide userspace any guarantee about the preservation of previously | |
2390 | * buffered data. | |
2391 | */ | |
eec688e1 RB |
2392 | static void i915_perf_enable_locked(struct i915_perf_stream *stream) |
2393 | { | |
2394 | if (stream->enabled) | |
2395 | return; | |
2396 | ||
2397 | /* Allow stream->ops->enable() to refer to this */ | |
2398 | stream->enabled = true; | |
2399 | ||
2400 | if (stream->ops->enable) | |
2401 | stream->ops->enable(stream); | |
2402 | } | |
2403 | ||
16d98b31 RB |
2404 | /** |
2405 | * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl | |
2406 | * @stream: An enabled i915 perf stream | |
2407 | * | |
2408 | * Disables the associated capture of data for this stream. | |
2409 | * | |
2410 | * The intention is that disabling an re-enabling a stream will ideally be | |
2411 | * cheaper than destroying and re-opening a stream with the same configuration, | |
2412 | * though there are no formal guarantees about what state or buffered data | |
2413 | * must be retained between disabling and re-enabling a stream. | |
2414 | * | |
2415 | * Note: while a stream is disabled it's considered an error for userspace | |
2416 | * to attempt to read from the stream (-EIO). | |
2417 | */ | |
eec688e1 RB |
2418 | static void i915_perf_disable_locked(struct i915_perf_stream *stream) |
2419 | { | |
2420 | if (!stream->enabled) | |
2421 | return; | |
2422 | ||
2423 | /* Allow stream->ops->disable() to refer to this */ | |
2424 | stream->enabled = false; | |
2425 | ||
2426 | if (stream->ops->disable) | |
2427 | stream->ops->disable(stream); | |
2428 | } | |
2429 | ||
16d98b31 RB |
2430 | /** |
2431 | * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs | |
2432 | * @stream: An i915 perf stream | |
2433 | * @cmd: the ioctl request | |
2434 | * @arg: the ioctl data | |
2435 | * | |
2436 | * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize | |
2437 | * with any non-file-operation driver hooks. | |
2438 | * | |
2439 | * Returns: zero on success or a negative error code. Returns -EINVAL for | |
2440 | * an unknown ioctl request. | |
2441 | */ | |
eec688e1 RB |
2442 | static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, |
2443 | unsigned int cmd, | |
2444 | unsigned long arg) | |
2445 | { | |
2446 | switch (cmd) { | |
2447 | case I915_PERF_IOCTL_ENABLE: | |
2448 | i915_perf_enable_locked(stream); | |
2449 | return 0; | |
2450 | case I915_PERF_IOCTL_DISABLE: | |
2451 | i915_perf_disable_locked(stream); | |
2452 | return 0; | |
2453 | } | |
2454 | ||
2455 | return -EINVAL; | |
2456 | } | |
2457 | ||
16d98b31 RB |
2458 | /** |
2459 | * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs | |
2460 | * @file: An i915 perf stream file | |
2461 | * @cmd: the ioctl request | |
2462 | * @arg: the ioctl data | |
2463 | * | |
2464 | * Implementation deferred to i915_perf_ioctl_locked(). | |
2465 | * | |
2466 | * Returns: zero on success or a negative error code. Returns -EINVAL for | |
2467 | * an unknown ioctl request. | |
2468 | */ | |
eec688e1 RB |
2469 | static long i915_perf_ioctl(struct file *file, |
2470 | unsigned int cmd, | |
2471 | unsigned long arg) | |
2472 | { | |
2473 | struct i915_perf_stream *stream = file->private_data; | |
2474 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
2475 | long ret; | |
2476 | ||
2477 | mutex_lock(&dev_priv->perf.lock); | |
2478 | ret = i915_perf_ioctl_locked(stream, cmd, arg); | |
2479 | mutex_unlock(&dev_priv->perf.lock); | |
2480 | ||
2481 | return ret; | |
2482 | } | |
2483 | ||
16d98b31 RB |
2484 | /** |
2485 | * i915_perf_destroy_locked - destroy an i915 perf stream | |
2486 | * @stream: An i915 perf stream | |
2487 | * | |
2488 | * Frees all resources associated with the given i915 perf @stream, disabling | |
2489 | * any associated data capture in the process. | |
2490 | * | |
2491 | * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize | |
2492 | * with any non-file-operation driver hooks. | |
2493 | */ | |
eec688e1 RB |
2494 | static void i915_perf_destroy_locked(struct i915_perf_stream *stream) |
2495 | { | |
eec688e1 RB |
2496 | if (stream->enabled) |
2497 | i915_perf_disable_locked(stream); | |
2498 | ||
2499 | if (stream->ops->destroy) | |
2500 | stream->ops->destroy(stream); | |
2501 | ||
2502 | list_del(&stream->link); | |
2503 | ||
69df05e1 | 2504 | if (stream->ctx) |
5f09a9c8 | 2505 | i915_gem_context_put(stream->ctx); |
eec688e1 RB |
2506 | |
2507 | kfree(stream); | |
2508 | } | |
2509 | ||
16d98b31 RB |
2510 | /** |
2511 | * i915_perf_release - handles userspace close() of a stream file | |
2512 | * @inode: anonymous inode associated with file | |
2513 | * @file: An i915 perf stream file | |
2514 | * | |
2515 | * Cleans up any resources associated with an open i915 perf stream file. | |
2516 | * | |
2517 | * NB: close() can't really fail from the userspace point of view. | |
2518 | * | |
2519 | * Returns: zero on success or a negative error code. | |
2520 | */ | |
eec688e1 RB |
2521 | static int i915_perf_release(struct inode *inode, struct file *file) |
2522 | { | |
2523 | struct i915_perf_stream *stream = file->private_data; | |
2524 | struct drm_i915_private *dev_priv = stream->dev_priv; | |
2525 | ||
2526 | mutex_lock(&dev_priv->perf.lock); | |
2527 | i915_perf_destroy_locked(stream); | |
2528 | mutex_unlock(&dev_priv->perf.lock); | |
2529 | ||
2530 | return 0; | |
2531 | } | |
2532 | ||
2533 | ||
2534 | static const struct file_operations fops = { | |
2535 | .owner = THIS_MODULE, | |
2536 | .llseek = no_llseek, | |
2537 | .release = i915_perf_release, | |
2538 | .poll = i915_perf_poll, | |
2539 | .read = i915_perf_read, | |
2540 | .unlocked_ioctl = i915_perf_ioctl, | |
191f8960 LL |
2541 | /* Our ioctl have no arguments, so it's safe to use the same function |
2542 | * to handle 32bits compatibility. | |
2543 | */ | |
2544 | .compat_ioctl = i915_perf_ioctl, | |
eec688e1 RB |
2545 | }; |
2546 | ||
2547 | ||
16d98b31 RB |
2548 | /** |
2549 | * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD | |
2550 | * @dev_priv: i915 device instance | |
2551 | * @param: The open parameters passed to 'DRM_I915_PERF_OPEN` | |
2552 | * @props: individually validated u64 property value pairs | |
2553 | * @file: drm file | |
2554 | * | |
2555 | * See i915_perf_ioctl_open() for interface details. | |
2556 | * | |
2557 | * Implements further stream config validation and stream initialization on | |
2558 | * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex | |
2559 | * taken to serialize with any non-file-operation driver hooks. | |
2560 | * | |
2561 | * Note: at this point the @props have only been validated in isolation and | |
2562 | * it's still necessary to validate that the combination of properties makes | |
2563 | * sense. | |
2564 | * | |
2565 | * In the case where userspace is interested in OA unit metrics then further | |
2566 | * config validation and stream initialization details will be handled by | |
2567 | * i915_oa_stream_init(). The code here should only validate config state that | |
2568 | * will be relevant to all stream types / backends. | |
2569 | * | |
2570 | * Returns: zero on success or a negative error code. | |
2571 | */ | |
eec688e1 RB |
2572 | static int |
2573 | i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, | |
2574 | struct drm_i915_perf_open_param *param, | |
2575 | struct perf_open_properties *props, | |
2576 | struct drm_file *file) | |
2577 | { | |
2578 | struct i915_gem_context *specific_ctx = NULL; | |
2579 | struct i915_perf_stream *stream = NULL; | |
2580 | unsigned long f_flags = 0; | |
19f81df2 | 2581 | bool privileged_op = true; |
eec688e1 RB |
2582 | int stream_fd; |
2583 | int ret; | |
2584 | ||
2585 | if (props->single_context) { | |
2586 | u32 ctx_handle = props->ctx_handle; | |
2587 | struct drm_i915_file_private *file_priv = file->driver_priv; | |
2588 | ||
635f56c3 ID |
2589 | specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle); |
2590 | if (!specific_ctx) { | |
2591 | DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n", | |
2592 | ctx_handle); | |
2593 | ret = -ENOENT; | |
eec688e1 RB |
2594 | goto err; |
2595 | } | |
2596 | } | |
2597 | ||
19f81df2 RB |
2598 | /* |
2599 | * On Haswell the OA unit supports clock gating off for a specific | |
2600 | * context and in this mode there's no visibility of metrics for the | |
2601 | * rest of the system, which we consider acceptable for a | |
2602 | * non-privileged client. | |
2603 | * | |
2604 | * For Gen8+ the OA unit no longer supports clock gating off for a | |
2605 | * specific context and the kernel can't securely stop the counters | |
2606 | * from updating as system-wide / global values. Even though we can | |
2607 | * filter reports based on the included context ID we can't block | |
2608 | * clients from seeing the raw / global counter values via | |
2609 | * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to | |
2610 | * enable the OA unit by default. | |
2611 | */ | |
2612 | if (IS_HASWELL(dev_priv) && specific_ctx) | |
2613 | privileged_op = false; | |
2614 | ||
ccdf6341 RB |
2615 | /* Similar to perf's kernel.perf_paranoid_cpu sysctl option |
2616 | * we check a dev.i915.perf_stream_paranoid sysctl option | |
2617 | * to determine if it's ok to access system wide OA counters | |
2618 | * without CAP_SYS_ADMIN privileges. | |
2619 | */ | |
19f81df2 | 2620 | if (privileged_op && |
ccdf6341 | 2621 | i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { |
7708550c | 2622 | DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n"); |
eec688e1 RB |
2623 | ret = -EACCES; |
2624 | goto err_ctx; | |
2625 | } | |
2626 | ||
2627 | stream = kzalloc(sizeof(*stream), GFP_KERNEL); | |
2628 | if (!stream) { | |
2629 | ret = -ENOMEM; | |
2630 | goto err_ctx; | |
2631 | } | |
2632 | ||
eec688e1 RB |
2633 | stream->dev_priv = dev_priv; |
2634 | stream->ctx = specific_ctx; | |
2635 | ||
d7965152 RB |
2636 | ret = i915_oa_stream_init(stream, param, props); |
2637 | if (ret) | |
2638 | goto err_alloc; | |
2639 | ||
2640 | /* we avoid simply assigning stream->sample_flags = props->sample_flags | |
2641 | * to have _stream_init check the combination of sample flags more | |
2642 | * thoroughly, but still this is the expected result at this point. | |
eec688e1 | 2643 | */ |
d7965152 RB |
2644 | if (WARN_ON(stream->sample_flags != props->sample_flags)) { |
2645 | ret = -ENODEV; | |
22f880ca | 2646 | goto err_flags; |
d7965152 | 2647 | } |
eec688e1 RB |
2648 | |
2649 | list_add(&stream->link, &dev_priv->perf.streams); | |
2650 | ||
2651 | if (param->flags & I915_PERF_FLAG_FD_CLOEXEC) | |
2652 | f_flags |= O_CLOEXEC; | |
2653 | if (param->flags & I915_PERF_FLAG_FD_NONBLOCK) | |
2654 | f_flags |= O_NONBLOCK; | |
2655 | ||
2656 | stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); | |
2657 | if (stream_fd < 0) { | |
2658 | ret = stream_fd; | |
2659 | goto err_open; | |
2660 | } | |
2661 | ||
2662 | if (!(param->flags & I915_PERF_FLAG_DISABLED)) | |
2663 | i915_perf_enable_locked(stream); | |
2664 | ||
2665 | return stream_fd; | |
2666 | ||
2667 | err_open: | |
2668 | list_del(&stream->link); | |
22f880ca | 2669 | err_flags: |
eec688e1 RB |
2670 | if (stream->ops->destroy) |
2671 | stream->ops->destroy(stream); | |
2672 | err_alloc: | |
2673 | kfree(stream); | |
2674 | err_ctx: | |
69df05e1 | 2675 | if (specific_ctx) |
5f09a9c8 | 2676 | i915_gem_context_put(specific_ctx); |
eec688e1 RB |
2677 | err: |
2678 | return ret; | |
2679 | } | |
2680 | ||
155e941f RB |
2681 | static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent) |
2682 | { | |
2683 | return div_u64(1000000000ULL * (2ULL << exponent), | |
2684 | dev_priv->perf.oa.timestamp_frequency); | |
2685 | } | |
2686 | ||
16d98b31 RB |
2687 | /** |
2688 | * read_properties_unlocked - validate + copy userspace stream open properties | |
2689 | * @dev_priv: i915 device instance | |
2690 | * @uprops: The array of u64 key value pairs given by userspace | |
2691 | * @n_props: The number of key value pairs expected in @uprops | |
2692 | * @props: The stream configuration built up while validating properties | |
eec688e1 RB |
2693 | * |
2694 | * Note this function only validates properties in isolation it doesn't | |
2695 | * validate that the combination of properties makes sense or that all | |
2696 | * properties necessary for a particular kind of stream have been set. | |
16d98b31 RB |
2697 | * |
2698 | * Note that there currently aren't any ordering requirements for properties so | |
2699 | * we shouldn't validate or assume anything about ordering here. This doesn't | |
2700 | * rule out defining new properties with ordering requirements in the future. | |
eec688e1 RB |
2701 | */ |
2702 | static int read_properties_unlocked(struct drm_i915_private *dev_priv, | |
2703 | u64 __user *uprops, | |
2704 | u32 n_props, | |
2705 | struct perf_open_properties *props) | |
2706 | { | |
2707 | u64 __user *uprop = uprops; | |
701f8231 | 2708 | u32 i; |
eec688e1 RB |
2709 | |
2710 | memset(props, 0, sizeof(struct perf_open_properties)); | |
2711 | ||
2712 | if (!n_props) { | |
7708550c | 2713 | DRM_DEBUG("No i915 perf properties given\n"); |
eec688e1 RB |
2714 | return -EINVAL; |
2715 | } | |
2716 | ||
2717 | /* Considering that ID = 0 is reserved and assuming that we don't | |
2718 | * (currently) expect any configurations to ever specify duplicate | |
2719 | * values for a particular property ID then the last _PROP_MAX value is | |
2720 | * one greater than the maximum number of properties we expect to get | |
2721 | * from userspace. | |
2722 | */ | |
2723 | if (n_props >= DRM_I915_PERF_PROP_MAX) { | |
7708550c | 2724 | DRM_DEBUG("More i915 perf properties specified than exist\n"); |
eec688e1 RB |
2725 | return -EINVAL; |
2726 | } | |
2727 | ||
2728 | for (i = 0; i < n_props; i++) { | |
00319ba0 | 2729 | u64 oa_period, oa_freq_hz; |
eec688e1 RB |
2730 | u64 id, value; |
2731 | int ret; | |
2732 | ||
2733 | ret = get_user(id, uprop); | |
2734 | if (ret) | |
2735 | return ret; | |
2736 | ||
2737 | ret = get_user(value, uprop + 1); | |
2738 | if (ret) | |
2739 | return ret; | |
2740 | ||
0a309f9e MA |
2741 | if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) { |
2742 | DRM_DEBUG("Unknown i915 perf property ID\n"); | |
2743 | return -EINVAL; | |
2744 | } | |
2745 | ||
eec688e1 RB |
2746 | switch ((enum drm_i915_perf_property_id)id) { |
2747 | case DRM_I915_PERF_PROP_CTX_HANDLE: | |
2748 | props->single_context = 1; | |
2749 | props->ctx_handle = value; | |
2750 | break; | |
d7965152 RB |
2751 | case DRM_I915_PERF_PROP_SAMPLE_OA: |
2752 | props->sample_flags |= SAMPLE_OA_REPORT; | |
2753 | break; | |
2754 | case DRM_I915_PERF_PROP_OA_METRICS_SET: | |
701f8231 | 2755 | if (value == 0) { |
7708550c | 2756 | DRM_DEBUG("Unknown OA metric set ID\n"); |
d7965152 RB |
2757 | return -EINVAL; |
2758 | } | |
2759 | props->metrics_set = value; | |
2760 | break; | |
2761 | case DRM_I915_PERF_PROP_OA_FORMAT: | |
2762 | if (value == 0 || value >= I915_OA_FORMAT_MAX) { | |
52c57c26 RB |
2763 | DRM_DEBUG("Out-of-range OA report format %llu\n", |
2764 | value); | |
d7965152 RB |
2765 | return -EINVAL; |
2766 | } | |
2767 | if (!dev_priv->perf.oa.oa_formats[value].size) { | |
52c57c26 RB |
2768 | DRM_DEBUG("Unsupported OA report format %llu\n", |
2769 | value); | |
d7965152 RB |
2770 | return -EINVAL; |
2771 | } | |
2772 | props->oa_format = value; | |
2773 | break; | |
2774 | case DRM_I915_PERF_PROP_OA_EXPONENT: | |
2775 | if (value > OA_EXPONENT_MAX) { | |
7708550c RB |
2776 | DRM_DEBUG("OA timer exponent too high (> %u)\n", |
2777 | OA_EXPONENT_MAX); | |
d7965152 RB |
2778 | return -EINVAL; |
2779 | } | |
2780 | ||
00319ba0 | 2781 | /* Theoretically we can program the OA unit to sample |
155e941f RB |
2782 | * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns |
2783 | * for BXT. We don't allow such high sampling | |
2784 | * frequencies by default unless root. | |
00319ba0 | 2785 | */ |
155e941f | 2786 | |
00319ba0 | 2787 | BUILD_BUG_ON(sizeof(oa_period) != 8); |
155e941f | 2788 | oa_period = oa_exponent_to_ns(dev_priv, value); |
00319ba0 RB |
2789 | |
2790 | /* This check is primarily to ensure that oa_period <= | |
2791 | * UINT32_MAX (before passing to do_div which only | |
2792 | * accepts a u32 denominator), but we can also skip | |
2793 | * checking anything < 1Hz which implicitly can't be | |
2794 | * limited via an integer oa_max_sample_rate. | |
d7965152 | 2795 | */ |
00319ba0 RB |
2796 | if (oa_period <= NSEC_PER_SEC) { |
2797 | u64 tmp = NSEC_PER_SEC; | |
2798 | do_div(tmp, oa_period); | |
2799 | oa_freq_hz = tmp; | |
2800 | } else | |
2801 | oa_freq_hz = 0; | |
2802 | ||
2803 | if (oa_freq_hz > i915_oa_max_sample_rate && | |
2804 | !capable(CAP_SYS_ADMIN)) { | |
7708550c | 2805 | DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n", |
00319ba0 | 2806 | i915_oa_max_sample_rate); |
d7965152 RB |
2807 | return -EACCES; |
2808 | } | |
2809 | ||
2810 | props->oa_periodic = true; | |
2811 | props->oa_period_exponent = value; | |
2812 | break; | |
0a309f9e | 2813 | case DRM_I915_PERF_PROP_MAX: |
eec688e1 | 2814 | MISSING_CASE(id); |
eec688e1 RB |
2815 | return -EINVAL; |
2816 | } | |
2817 | ||
2818 | uprop += 2; | |
2819 | } | |
2820 | ||
2821 | return 0; | |
2822 | } | |
2823 | ||
16d98b31 RB |
2824 | /** |
2825 | * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD | |
2826 | * @dev: drm device | |
2827 | * @data: ioctl data copied from userspace (unvalidated) | |
2828 | * @file: drm file | |
2829 | * | |
2830 | * Validates the stream open parameters given by userspace including flags | |
2831 | * and an array of u64 key, value pair properties. | |
2832 | * | |
2833 | * Very little is assumed up front about the nature of the stream being | |
2834 | * opened (for instance we don't assume it's for periodic OA unit metrics). An | |
2835 | * i915-perf stream is expected to be a suitable interface for other forms of | |
2836 | * buffered data written by the GPU besides periodic OA metrics. | |
2837 | * | |
2838 | * Note we copy the properties from userspace outside of the i915 perf | |
2839 | * mutex to avoid an awkward lockdep with mmap_sem. | |
2840 | * | |
2841 | * Most of the implementation details are handled by | |
2842 | * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock | |
2843 | * mutex for serializing with any non-file-operation driver hooks. | |
2844 | * | |
2845 | * Return: A newly opened i915 Perf stream file descriptor or negative | |
2846 | * error code on failure. | |
2847 | */ | |
eec688e1 RB |
2848 | int i915_perf_open_ioctl(struct drm_device *dev, void *data, |
2849 | struct drm_file *file) | |
2850 | { | |
2851 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2852 | struct drm_i915_perf_open_param *param = data; | |
2853 | struct perf_open_properties props; | |
2854 | u32 known_open_flags; | |
2855 | int ret; | |
2856 | ||
2857 | if (!dev_priv->perf.initialized) { | |
7708550c | 2858 | DRM_DEBUG("i915 perf interface not available for this system\n"); |
eec688e1 RB |
2859 | return -ENOTSUPP; |
2860 | } | |
2861 | ||
2862 | known_open_flags = I915_PERF_FLAG_FD_CLOEXEC | | |
2863 | I915_PERF_FLAG_FD_NONBLOCK | | |
2864 | I915_PERF_FLAG_DISABLED; | |
2865 | if (param->flags & ~known_open_flags) { | |
7708550c | 2866 | DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n"); |
eec688e1 RB |
2867 | return -EINVAL; |
2868 | } | |
2869 | ||
2870 | ret = read_properties_unlocked(dev_priv, | |
2871 | u64_to_user_ptr(param->properties_ptr), | |
2872 | param->num_properties, | |
2873 | &props); | |
2874 | if (ret) | |
2875 | return ret; | |
2876 | ||
2877 | mutex_lock(&dev_priv->perf.lock); | |
2878 | ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file); | |
2879 | mutex_unlock(&dev_priv->perf.lock); | |
2880 | ||
2881 | return ret; | |
2882 | } | |
2883 | ||
16d98b31 RB |
2884 | /** |
2885 | * i915_perf_register - exposes i915-perf to userspace | |
2886 | * @dev_priv: i915 device instance | |
2887 | * | |
2888 | * In particular OA metric sets are advertised under a sysfs metrics/ | |
2889 | * directory allowing userspace to enumerate valid IDs that can be | |
2890 | * used to open an i915-perf stream. | |
2891 | */ | |
442b8c06 RB |
2892 | void i915_perf_register(struct drm_i915_private *dev_priv) |
2893 | { | |
701f8231 LL |
2894 | int ret; |
2895 | ||
442b8c06 RB |
2896 | if (!dev_priv->perf.initialized) |
2897 | return; | |
2898 | ||
2899 | /* To be sure we're synchronized with an attempted | |
2900 | * i915_perf_open_ioctl(); considering that we register after | |
2901 | * being exposed to userspace. | |
2902 | */ | |
2903 | mutex_lock(&dev_priv->perf.lock); | |
2904 | ||
2905 | dev_priv->perf.metrics_kobj = | |
2906 | kobject_create_and_add("metrics", | |
2907 | &dev_priv->drm.primary->kdev->kobj); | |
2908 | if (!dev_priv->perf.metrics_kobj) | |
2909 | goto exit; | |
2910 | ||
40f75ea4 | 2911 | sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr); |
701f8231 | 2912 | |
19f81df2 | 2913 | if (IS_HASWELL(dev_priv)) { |
701f8231 | 2914 | i915_perf_load_test_config_hsw(dev_priv); |
19f81df2 | 2915 | } else if (IS_BROADWELL(dev_priv)) { |
701f8231 | 2916 | i915_perf_load_test_config_bdw(dev_priv); |
19f81df2 | 2917 | } else if (IS_CHERRYVIEW(dev_priv)) { |
701f8231 | 2918 | i915_perf_load_test_config_chv(dev_priv); |
19f81df2 | 2919 | } else if (IS_SKYLAKE(dev_priv)) { |
701f8231 LL |
2920 | if (IS_SKL_GT2(dev_priv)) |
2921 | i915_perf_load_test_config_sklgt2(dev_priv); | |
2922 | else if (IS_SKL_GT3(dev_priv)) | |
2923 | i915_perf_load_test_config_sklgt3(dev_priv); | |
2924 | else if (IS_SKL_GT4(dev_priv)) | |
2925 | i915_perf_load_test_config_sklgt4(dev_priv); | |
19f81df2 | 2926 | } else if (IS_BROXTON(dev_priv)) { |
701f8231 | 2927 | i915_perf_load_test_config_bxt(dev_priv); |
6c5c1d89 | 2928 | } else if (IS_KABYLAKE(dev_priv)) { |
701f8231 LL |
2929 | if (IS_KBL_GT2(dev_priv)) |
2930 | i915_perf_load_test_config_kblgt2(dev_priv); | |
2931 | else if (IS_KBL_GT3(dev_priv)) | |
2932 | i915_perf_load_test_config_kblgt3(dev_priv); | |
28c7ef9e | 2933 | } else if (IS_GEMINILAKE(dev_priv)) { |
701f8231 | 2934 | i915_perf_load_test_config_glk(dev_priv); |
22ea4f35 LL |
2935 | } else if (IS_COFFEELAKE(dev_priv)) { |
2936 | if (IS_CFL_GT2(dev_priv)) | |
2937 | i915_perf_load_test_config_cflgt2(dev_priv); | |
4407eaa9 LL |
2938 | if (IS_CFL_GT3(dev_priv)) |
2939 | i915_perf_load_test_config_cflgt3(dev_priv); | |
442b8c06 RB |
2940 | } |
2941 | ||
701f8231 LL |
2942 | if (dev_priv->perf.oa.test_config.id == 0) |
2943 | goto sysfs_error; | |
2944 | ||
2945 | ret = sysfs_create_group(dev_priv->perf.metrics_kobj, | |
2946 | &dev_priv->perf.oa.test_config.sysfs_metric); | |
2947 | if (ret) | |
2948 | goto sysfs_error; | |
f89823c2 LL |
2949 | |
2950 | atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1); | |
2951 | ||
19f81df2 RB |
2952 | goto exit; |
2953 | ||
2954 | sysfs_error: | |
2955 | kobject_put(dev_priv->perf.metrics_kobj); | |
2956 | dev_priv->perf.metrics_kobj = NULL; | |
2957 | ||
442b8c06 RB |
2958 | exit: |
2959 | mutex_unlock(&dev_priv->perf.lock); | |
2960 | } | |
2961 | ||
16d98b31 RB |
2962 | /** |
2963 | * i915_perf_unregister - hide i915-perf from userspace | |
2964 | * @dev_priv: i915 device instance | |
2965 | * | |
2966 | * i915-perf state cleanup is split up into an 'unregister' and | |
2967 | * 'deinit' phase where the interface is first hidden from | |
2968 | * userspace by i915_perf_unregister() before cleaning up | |
2969 | * remaining state in i915_perf_fini(). | |
2970 | */ | |
442b8c06 RB |
2971 | void i915_perf_unregister(struct drm_i915_private *dev_priv) |
2972 | { | |
442b8c06 RB |
2973 | if (!dev_priv->perf.metrics_kobj) |
2974 | return; | |
2975 | ||
701f8231 LL |
2976 | sysfs_remove_group(dev_priv->perf.metrics_kobj, |
2977 | &dev_priv->perf.oa.test_config.sysfs_metric); | |
442b8c06 RB |
2978 | |
2979 | kobject_put(dev_priv->perf.metrics_kobj); | |
2980 | dev_priv->perf.metrics_kobj = NULL; | |
2981 | } | |
2982 | ||
f89823c2 LL |
2983 | static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr) |
2984 | { | |
2985 | static const i915_reg_t flex_eu_regs[] = { | |
2986 | EU_PERF_CNTL0, | |
2987 | EU_PERF_CNTL1, | |
2988 | EU_PERF_CNTL2, | |
2989 | EU_PERF_CNTL3, | |
2990 | EU_PERF_CNTL4, | |
2991 | EU_PERF_CNTL5, | |
2992 | EU_PERF_CNTL6, | |
2993 | }; | |
2994 | int i; | |
2995 | ||
2996 | for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) { | |
2997 | if (flex_eu_regs[i].reg == addr) | |
2998 | return true; | |
2999 | } | |
3000 | return false; | |
3001 | } | |
3002 | ||
3003 | static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr) | |
3004 | { | |
3005 | return (addr >= OASTARTTRIG1.reg && addr <= OASTARTTRIG8.reg) || | |
3006 | (addr >= OAREPORTTRIG1.reg && addr <= OAREPORTTRIG8.reg) || | |
3007 | (addr >= OACEC0_0.reg && addr <= OACEC7_1.reg); | |
3008 | } | |
3009 | ||
3010 | static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) | |
3011 | { | |
3012 | return addr == HALF_SLICE_CHICKEN2.reg || | |
3013 | (addr >= MICRO_BP0_0.reg && addr <= NOA_WRITE.reg) || | |
3014 | (addr >= OA_PERFCNT1_LO.reg && addr <= OA_PERFCNT2_HI.reg) || | |
3015 | (addr >= OA_PERFMATRIX_LO.reg && addr <= OA_PERFMATRIX_HI.reg); | |
3016 | } | |
3017 | ||
3018 | static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) | |
3019 | { | |
3020 | return gen7_is_valid_mux_addr(dev_priv, addr) || | |
3021 | addr == WAIT_FOR_RC6_EXIT.reg || | |
3022 | (addr >= RPM_CONFIG0.reg && addr <= NOA_CONFIG(8).reg); | |
3023 | } | |
3024 | ||
3025 | static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) | |
3026 | { | |
3027 | return gen7_is_valid_mux_addr(dev_priv, addr) || | |
3028 | (addr >= 0x25100 && addr <= 0x2FF90) || | |
a54b19f1 LL |
3029 | (addr >= HSW_MBVID2_NOA0.reg && addr <= HSW_MBVID2_NOA9.reg) || |
3030 | addr == HSW_MBVID2_MISR0.reg; | |
f89823c2 LL |
3031 | } |
3032 | ||
3033 | static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) | |
3034 | { | |
3035 | return gen7_is_valid_mux_addr(dev_priv, addr) || | |
3036 | (addr >= 0x182300 && addr <= 0x1823A4); | |
3037 | } | |
3038 | ||
3039 | static uint32_t mask_reg_value(u32 reg, u32 val) | |
3040 | { | |
3041 | /* HALF_SLICE_CHICKEN2 is programmed with a the | |
3042 | * WaDisableSTUnitPowerOptimization workaround. Make sure the value | |
3043 | * programmed by userspace doesn't change this. | |
3044 | */ | |
3045 | if (HALF_SLICE_CHICKEN2.reg == reg) | |
3046 | val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE); | |
3047 | ||
3048 | /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function | |
3049 | * indicated by its name and a bunch of selection fields used by OA | |
3050 | * configs. | |
3051 | */ | |
3052 | if (WAIT_FOR_RC6_EXIT.reg == reg) | |
3053 | val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE); | |
3054 | ||
3055 | return val; | |
3056 | } | |
3057 | ||
3058 | static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv, | |
3059 | bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr), | |
3060 | u32 __user *regs, | |
3061 | u32 n_regs) | |
3062 | { | |
3063 | struct i915_oa_reg *oa_regs; | |
3064 | int err; | |
3065 | u32 i; | |
3066 | ||
3067 | if (!n_regs) | |
3068 | return NULL; | |
3069 | ||
3070 | if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2)) | |
3071 | return ERR_PTR(-EFAULT); | |
3072 | ||
3073 | /* No is_valid function means we're not allowing any register to be programmed. */ | |
3074 | GEM_BUG_ON(!is_valid); | |
3075 | if (!is_valid) | |
3076 | return ERR_PTR(-EINVAL); | |
3077 | ||
3078 | oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL); | |
3079 | if (!oa_regs) | |
3080 | return ERR_PTR(-ENOMEM); | |
3081 | ||
3082 | for (i = 0; i < n_regs; i++) { | |
3083 | u32 addr, value; | |
3084 | ||
3085 | err = get_user(addr, regs); | |
3086 | if (err) | |
3087 | goto addr_err; | |
3088 | ||
3089 | if (!is_valid(dev_priv, addr)) { | |
3090 | DRM_DEBUG("Invalid oa_reg address: %X\n", addr); | |
3091 | err = -EINVAL; | |
3092 | goto addr_err; | |
3093 | } | |
3094 | ||
3095 | err = get_user(value, regs + 1); | |
3096 | if (err) | |
3097 | goto addr_err; | |
3098 | ||
3099 | oa_regs[i].addr = _MMIO(addr); | |
3100 | oa_regs[i].value = mask_reg_value(addr, value); | |
3101 | ||
3102 | regs += 2; | |
3103 | } | |
3104 | ||
3105 | return oa_regs; | |
3106 | ||
3107 | addr_err: | |
3108 | kfree(oa_regs); | |
3109 | return ERR_PTR(err); | |
3110 | } | |
3111 | ||
3112 | static ssize_t show_dynamic_id(struct device *dev, | |
3113 | struct device_attribute *attr, | |
3114 | char *buf) | |
3115 | { | |
3116 | struct i915_oa_config *oa_config = | |
3117 | container_of(attr, typeof(*oa_config), sysfs_metric_id); | |
3118 | ||
3119 | return sprintf(buf, "%d\n", oa_config->id); | |
3120 | } | |
3121 | ||
3122 | static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv, | |
3123 | struct i915_oa_config *oa_config) | |
3124 | { | |
28152a23 | 3125 | sysfs_attr_init(&oa_config->sysfs_metric_id.attr); |
f89823c2 LL |
3126 | oa_config->sysfs_metric_id.attr.name = "id"; |
3127 | oa_config->sysfs_metric_id.attr.mode = S_IRUGO; | |
3128 | oa_config->sysfs_metric_id.show = show_dynamic_id; | |
3129 | oa_config->sysfs_metric_id.store = NULL; | |
3130 | ||
3131 | oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr; | |
3132 | oa_config->attrs[1] = NULL; | |
3133 | ||
3134 | oa_config->sysfs_metric.name = oa_config->uuid; | |
3135 | oa_config->sysfs_metric.attrs = oa_config->attrs; | |
3136 | ||
3137 | return sysfs_create_group(dev_priv->perf.metrics_kobj, | |
3138 | &oa_config->sysfs_metric); | |
3139 | } | |
3140 | ||
3141 | /** | |
3142 | * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config | |
3143 | * @dev: drm device | |
3144 | * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from | |
3145 | * userspace (unvalidated) | |
3146 | * @file: drm file | |
3147 | * | |
3148 | * Validates the submitted OA register to be saved into a new OA config that | |
3149 | * can then be used for programming the OA unit and its NOA network. | |
3150 | * | |
3151 | * Returns: A new allocated config number to be used with the perf open ioctl | |
3152 | * or a negative error code on failure. | |
3153 | */ | |
3154 | int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, | |
3155 | struct drm_file *file) | |
3156 | { | |
3157 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3158 | struct drm_i915_perf_oa_config *args = data; | |
3159 | struct i915_oa_config *oa_config, *tmp; | |
3160 | int err, id; | |
3161 | ||
3162 | if (!dev_priv->perf.initialized) { | |
3163 | DRM_DEBUG("i915 perf interface not available for this system\n"); | |
3164 | return -ENOTSUPP; | |
3165 | } | |
3166 | ||
3167 | if (!dev_priv->perf.metrics_kobj) { | |
3168 | DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); | |
3169 | return -EINVAL; | |
3170 | } | |
3171 | ||
3172 | if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { | |
3173 | DRM_DEBUG("Insufficient privileges to add i915 OA config\n"); | |
3174 | return -EACCES; | |
3175 | } | |
3176 | ||
3177 | if ((!args->mux_regs_ptr || !args->n_mux_regs) && | |
3178 | (!args->boolean_regs_ptr || !args->n_boolean_regs) && | |
3179 | (!args->flex_regs_ptr || !args->n_flex_regs)) { | |
3180 | DRM_DEBUG("No OA registers given\n"); | |
3181 | return -EINVAL; | |
3182 | } | |
3183 | ||
3184 | oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL); | |
3185 | if (!oa_config) { | |
3186 | DRM_DEBUG("Failed to allocate memory for the OA config\n"); | |
3187 | return -ENOMEM; | |
3188 | } | |
3189 | ||
3190 | atomic_set(&oa_config->ref_count, 1); | |
3191 | ||
3192 | if (!uuid_is_valid(args->uuid)) { | |
3193 | DRM_DEBUG("Invalid uuid format for OA config\n"); | |
3194 | err = -EINVAL; | |
3195 | goto reg_err; | |
3196 | } | |
3197 | ||
3198 | /* Last character in oa_config->uuid will be 0 because oa_config is | |
3199 | * kzalloc. | |
3200 | */ | |
3201 | memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid)); | |
3202 | ||
3203 | oa_config->mux_regs_len = args->n_mux_regs; | |
3204 | oa_config->mux_regs = | |
3205 | alloc_oa_regs(dev_priv, | |
3206 | dev_priv->perf.oa.ops.is_valid_mux_reg, | |
3207 | u64_to_user_ptr(args->mux_regs_ptr), | |
3208 | args->n_mux_regs); | |
3209 | ||
3210 | if (IS_ERR(oa_config->mux_regs)) { | |
3211 | DRM_DEBUG("Failed to create OA config for mux_regs\n"); | |
3212 | err = PTR_ERR(oa_config->mux_regs); | |
3213 | goto reg_err; | |
3214 | } | |
3215 | ||
3216 | oa_config->b_counter_regs_len = args->n_boolean_regs; | |
3217 | oa_config->b_counter_regs = | |
3218 | alloc_oa_regs(dev_priv, | |
3219 | dev_priv->perf.oa.ops.is_valid_b_counter_reg, | |
3220 | u64_to_user_ptr(args->boolean_regs_ptr), | |
3221 | args->n_boolean_regs); | |
3222 | ||
3223 | if (IS_ERR(oa_config->b_counter_regs)) { | |
3224 | DRM_DEBUG("Failed to create OA config for b_counter_regs\n"); | |
3225 | err = PTR_ERR(oa_config->b_counter_regs); | |
3226 | goto reg_err; | |
3227 | } | |
3228 | ||
3229 | if (INTEL_GEN(dev_priv) < 8) { | |
3230 | if (args->n_flex_regs != 0) { | |
3231 | err = -EINVAL; | |
3232 | goto reg_err; | |
3233 | } | |
3234 | } else { | |
3235 | oa_config->flex_regs_len = args->n_flex_regs; | |
3236 | oa_config->flex_regs = | |
3237 | alloc_oa_regs(dev_priv, | |
3238 | dev_priv->perf.oa.ops.is_valid_flex_reg, | |
3239 | u64_to_user_ptr(args->flex_regs_ptr), | |
3240 | args->n_flex_regs); | |
3241 | ||
3242 | if (IS_ERR(oa_config->flex_regs)) { | |
3243 | DRM_DEBUG("Failed to create OA config for flex_regs\n"); | |
3244 | err = PTR_ERR(oa_config->flex_regs); | |
3245 | goto reg_err; | |
3246 | } | |
3247 | } | |
3248 | ||
3249 | err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock); | |
3250 | if (err) | |
3251 | goto reg_err; | |
3252 | ||
3253 | /* We shouldn't have too many configs, so this iteration shouldn't be | |
3254 | * too costly. | |
3255 | */ | |
3256 | idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) { | |
3257 | if (!strcmp(tmp->uuid, oa_config->uuid)) { | |
3258 | DRM_DEBUG("OA config already exists with this uuid\n"); | |
3259 | err = -EADDRINUSE; | |
3260 | goto sysfs_err; | |
3261 | } | |
3262 | } | |
3263 | ||
3264 | err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config); | |
3265 | if (err) { | |
3266 | DRM_DEBUG("Failed to create sysfs entry for OA config\n"); | |
3267 | goto sysfs_err; | |
3268 | } | |
3269 | ||
3270 | /* Config id 0 is invalid, id 1 for kernel stored test config. */ | |
3271 | oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr, | |
3272 | oa_config, 2, | |
3273 | 0, GFP_KERNEL); | |
3274 | if (oa_config->id < 0) { | |
3275 | DRM_DEBUG("Failed to create sysfs entry for OA config\n"); | |
3276 | err = oa_config->id; | |
3277 | goto sysfs_err; | |
3278 | } | |
3279 | ||
3280 | mutex_unlock(&dev_priv->perf.metrics_lock); | |
3281 | ||
3282 | return oa_config->id; | |
3283 | ||
3284 | sysfs_err: | |
3285 | mutex_unlock(&dev_priv->perf.metrics_lock); | |
3286 | reg_err: | |
3287 | put_oa_config(dev_priv, oa_config); | |
3288 | DRM_DEBUG("Failed to add new OA config\n"); | |
3289 | return err; | |
3290 | } | |
3291 | ||
3292 | /** | |
3293 | * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config | |
3294 | * @dev: drm device | |
3295 | * @data: ioctl data (pointer to u64 integer) copied from userspace | |
3296 | * @file: drm file | |
3297 | * | |
3298 | * Configs can be removed while being used, the will stop appearing in sysfs | |
3299 | * and their content will be freed when the stream using the config is closed. | |
3300 | * | |
3301 | * Returns: 0 on success or a negative error code on failure. | |
3302 | */ | |
3303 | int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, | |
3304 | struct drm_file *file) | |
3305 | { | |
3306 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3307 | u64 *arg = data; | |
3308 | struct i915_oa_config *oa_config; | |
3309 | int ret; | |
3310 | ||
3311 | if (!dev_priv->perf.initialized) { | |
3312 | DRM_DEBUG("i915 perf interface not available for this system\n"); | |
3313 | return -ENOTSUPP; | |
3314 | } | |
3315 | ||
3316 | if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { | |
3317 | DRM_DEBUG("Insufficient privileges to remove i915 OA config\n"); | |
3318 | return -EACCES; | |
3319 | } | |
3320 | ||
3321 | ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock); | |
3322 | if (ret) | |
3323 | goto lock_err; | |
3324 | ||
3325 | oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg); | |
3326 | if (!oa_config) { | |
3327 | DRM_DEBUG("Failed to remove unknown OA config\n"); | |
3328 | ret = -ENOENT; | |
3329 | goto config_err; | |
3330 | } | |
3331 | ||
3332 | GEM_BUG_ON(*arg != oa_config->id); | |
3333 | ||
3334 | sysfs_remove_group(dev_priv->perf.metrics_kobj, | |
3335 | &oa_config->sysfs_metric); | |
3336 | ||
3337 | idr_remove(&dev_priv->perf.metrics_idr, *arg); | |
3338 | put_oa_config(dev_priv, oa_config); | |
3339 | ||
3340 | config_err: | |
3341 | mutex_unlock(&dev_priv->perf.metrics_lock); | |
3342 | lock_err: | |
3343 | return ret; | |
3344 | } | |
3345 | ||
ccdf6341 RB |
3346 | static struct ctl_table oa_table[] = { |
3347 | { | |
3348 | .procname = "perf_stream_paranoid", | |
3349 | .data = &i915_perf_stream_paranoid, | |
3350 | .maxlen = sizeof(i915_perf_stream_paranoid), | |
3351 | .mode = 0644, | |
3352 | .proc_handler = proc_dointvec_minmax, | |
3353 | .extra1 = &zero, | |
3354 | .extra2 = &one, | |
3355 | }, | |
00319ba0 RB |
3356 | { |
3357 | .procname = "oa_max_sample_rate", | |
3358 | .data = &i915_oa_max_sample_rate, | |
3359 | .maxlen = sizeof(i915_oa_max_sample_rate), | |
3360 | .mode = 0644, | |
3361 | .proc_handler = proc_dointvec_minmax, | |
3362 | .extra1 = &zero, | |
3363 | .extra2 = &oa_sample_rate_hard_limit, | |
3364 | }, | |
ccdf6341 RB |
3365 | {} |
3366 | }; | |
3367 | ||
3368 | static struct ctl_table i915_root[] = { | |
3369 | { | |
3370 | .procname = "i915", | |
3371 | .maxlen = 0, | |
3372 | .mode = 0555, | |
3373 | .child = oa_table, | |
3374 | }, | |
3375 | {} | |
3376 | }; | |
3377 | ||
3378 | static struct ctl_table dev_root[] = { | |
3379 | { | |
3380 | .procname = "dev", | |
3381 | .maxlen = 0, | |
3382 | .mode = 0555, | |
3383 | .child = i915_root, | |
3384 | }, | |
3385 | {} | |
3386 | }; | |
3387 | ||
16d98b31 RB |
3388 | /** |
3389 | * i915_perf_init - initialize i915-perf state on module load | |
3390 | * @dev_priv: i915 device instance | |
3391 | * | |
3392 | * Initializes i915-perf state without exposing anything to userspace. | |
3393 | * | |
3394 | * Note: i915-perf initialization is split into an 'init' and 'register' | |
3395 | * phase with the i915_perf_register() exposing state to userspace. | |
3396 | */ | |
eec688e1 RB |
3397 | void i915_perf_init(struct drm_i915_private *dev_priv) |
3398 | { | |
701f8231 | 3399 | dev_priv->perf.oa.timestamp_frequency = 0; |
19f81df2 RB |
3400 | |
3401 | if (IS_HASWELL(dev_priv)) { | |
f89823c2 LL |
3402 | dev_priv->perf.oa.ops.is_valid_b_counter_reg = |
3403 | gen7_is_valid_b_counter_addr; | |
3404 | dev_priv->perf.oa.ops.is_valid_mux_reg = | |
3405 | hsw_is_valid_mux_addr; | |
3406 | dev_priv->perf.oa.ops.is_valid_flex_reg = NULL; | |
19f81df2 RB |
3407 | dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer; |
3408 | dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set; | |
3409 | dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set; | |
3410 | dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable; | |
3411 | dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable; | |
3412 | dev_priv->perf.oa.ops.read = gen7_oa_read; | |
3413 | dev_priv->perf.oa.ops.oa_hw_tail_read = | |
3414 | gen7_oa_hw_tail_read; | |
3415 | ||
155e941f RB |
3416 | dev_priv->perf.oa.timestamp_frequency = 12500000; |
3417 | ||
19f81df2 | 3418 | dev_priv->perf.oa.oa_formats = hsw_oa_formats; |
4f044a88 | 3419 | } else if (i915_modparams.enable_execlists) { |
19f81df2 RB |
3420 | /* Note: that although we could theoretically also support the |
3421 | * legacy ringbuffer mode on BDW (and earlier iterations of | |
3422 | * this driver, before upstreaming did this) it didn't seem | |
3423 | * worth the complexity to maintain now that BDW+ enable | |
3424 | * execlist mode by default. | |
3425 | */ | |
f89823c2 LL |
3426 | dev_priv->perf.oa.ops.is_valid_b_counter_reg = |
3427 | gen7_is_valid_b_counter_addr; | |
3428 | dev_priv->perf.oa.ops.is_valid_mux_reg = | |
3429 | gen8_is_valid_mux_addr; | |
3430 | dev_priv->perf.oa.ops.is_valid_flex_reg = | |
3431 | gen8_is_valid_flex_addr; | |
d7965152 | 3432 | |
701f8231 LL |
3433 | dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer; |
3434 | dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; | |
3435 | dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set; | |
3436 | dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable; | |
3437 | dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable; | |
3438 | dev_priv->perf.oa.ops.read = gen8_oa_read; | |
3439 | dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read; | |
3440 | ||
3441 | dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats; | |
3442 | ||
19f81df2 RB |
3443 | if (IS_GEN8(dev_priv)) { |
3444 | dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120; | |
3445 | dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce; | |
155e941f RB |
3446 | |
3447 | dev_priv->perf.oa.timestamp_frequency = 12500000; | |
3448 | ||
19f81df2 | 3449 | dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25); |
f89823c2 LL |
3450 | if (IS_CHERRYVIEW(dev_priv)) { |
3451 | dev_priv->perf.oa.ops.is_valid_mux_reg = | |
3452 | chv_is_valid_mux_addr; | |
3453 | } | |
19f81df2 RB |
3454 | } else if (IS_GEN9(dev_priv)) { |
3455 | dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; | |
3456 | dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; | |
155e941f | 3457 | |
19f81df2 RB |
3458 | dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); |
3459 | ||
701f8231 LL |
3460 | switch (dev_priv->info.platform) { |
3461 | case INTEL_BROXTON: | |
3462 | case INTEL_GEMINILAKE: | |
155e941f | 3463 | dev_priv->perf.oa.timestamp_frequency = 19200000; |
701f8231 LL |
3464 | break; |
3465 | case INTEL_SKYLAKE: | |
3466 | case INTEL_KABYLAKE: | |
22ea4f35 | 3467 | case INTEL_COFFEELAKE: |
701f8231 LL |
3468 | dev_priv->perf.oa.timestamp_frequency = 12000000; |
3469 | break; | |
3470 | default: | |
3471 | /* Leave timestamp_frequency to 0 so we can | |
3472 | * detect unsupported platforms. | |
3473 | */ | |
3474 | break; | |
19f81df2 RB |
3475 | } |
3476 | } | |
19f81df2 | 3477 | } |
d7965152 | 3478 | |
701f8231 | 3479 | if (dev_priv->perf.oa.timestamp_frequency) { |
19f81df2 RB |
3480 | hrtimer_init(&dev_priv->perf.oa.poll_check_timer, |
3481 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
3482 | dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb; | |
3483 | init_waitqueue_head(&dev_priv->perf.oa.poll_wq); | |
d7965152 | 3484 | |
19f81df2 RB |
3485 | INIT_LIST_HEAD(&dev_priv->perf.streams); |
3486 | mutex_init(&dev_priv->perf.lock); | |
19f81df2 | 3487 | spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock); |
eec688e1 | 3488 | |
155e941f RB |
3489 | oa_sample_rate_hard_limit = |
3490 | dev_priv->perf.oa.timestamp_frequency / 2; | |
19f81df2 | 3491 | dev_priv->perf.sysctl_header = register_sysctl_table(dev_root); |
ccdf6341 | 3492 | |
f89823c2 LL |
3493 | mutex_init(&dev_priv->perf.metrics_lock); |
3494 | idr_init(&dev_priv->perf.metrics_idr); | |
3495 | ||
19f81df2 RB |
3496 | dev_priv->perf.initialized = true; |
3497 | } | |
eec688e1 RB |
3498 | } |
3499 | ||
f89823c2 LL |
3500 | static int destroy_config(int id, void *p, void *data) |
3501 | { | |
3502 | struct drm_i915_private *dev_priv = data; | |
3503 | struct i915_oa_config *oa_config = p; | |
3504 | ||
3505 | put_oa_config(dev_priv, oa_config); | |
3506 | ||
3507 | return 0; | |
3508 | } | |
3509 | ||
16d98b31 RB |
3510 | /** |
3511 | * i915_perf_fini - Counter part to i915_perf_init() | |
3512 | * @dev_priv: i915 device instance | |
3513 | */ | |
eec688e1 RB |
3514 | void i915_perf_fini(struct drm_i915_private *dev_priv) |
3515 | { | |
3516 | if (!dev_priv->perf.initialized) | |
3517 | return; | |
3518 | ||
f89823c2 LL |
3519 | idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv); |
3520 | idr_destroy(&dev_priv->perf.metrics_idr); | |
3521 | ||
ccdf6341 RB |
3522 | unregister_sysctl_table(dev_priv->perf.sysctl_header); |
3523 | ||
d7965152 | 3524 | memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops)); |
19f81df2 | 3525 | |
eec688e1 RB |
3526 | dev_priv->perf.initialized = false; |
3527 | } |