Commit | Line | Data |
---|---|---|
0136db58 BW |
1 | /* |
2 | * Copyright © 2012 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Ben Widawsky <ben@bwidawsk.net> | |
25 | * | |
26 | */ | |
27 | ||
28 | #include <linux/device.h> | |
29 | #include <linux/module.h> | |
30 | #include <linux/stat.h> | |
31 | #include <linux/sysfs.h> | |
56c5098f | 32 | |
c1132367 AS |
33 | #include "gt/intel_rc6.h" |
34 | ||
ecbb5fb7 | 35 | #include "i915_drv.h" |
be68261d | 36 | #include "i915_sysfs.h" |
ecbb5fb7 | 37 | #include "intel_pm.h" |
56c5098f | 38 | #include "intel_sideband.h" |
0136db58 | 39 | |
694c2828 | 40 | static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) |
c49d13ee | 41 | { |
694c2828 DW |
42 | struct drm_minor *minor = dev_get_drvdata(kdev); |
43 | return to_i915(minor->dev); | |
c49d13ee | 44 | } |
14c8d110 | 45 | |
5ab3633d | 46 | #ifdef CONFIG_PM |
694c2828 | 47 | static u32 calc_residency(struct drm_i915_private *dev_priv, |
f0f59a00 | 48 | i915_reg_t reg) |
0136db58 | 49 | { |
48d1c812 | 50 | intel_wakeref_t wakeref; |
d4225a53 | 51 | u64 res = 0; |
36cc8b96 | 52 | |
c447ff7d | 53 | with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) |
c1132367 | 54 | res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg); |
36cc8b96 TU |
55 | |
56 | return DIV_ROUND_CLOSEST_ULL(res, 1000); | |
0136db58 BW |
57 | } |
58 | ||
59 | static ssize_t | |
dbdfd8e9 | 60 | show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 61 | { |
fb6db0f5 CW |
62 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
63 | unsigned int mask; | |
64 | ||
65 | mask = 0; | |
66 | if (HAS_RC6(dev_priv)) | |
67 | mask |= BIT(0); | |
68 | if (HAS_RC6p(dev_priv)) | |
69 | mask |= BIT(1); | |
70 | if (HAS_RC6pp(dev_priv)) | |
71 | mask |= BIT(2); | |
72 | ||
73 | return snprintf(buf, PAGE_SIZE, "%x\n", mask); | |
0136db58 BW |
74 | } |
75 | ||
76 | static ssize_t | |
dbdfd8e9 | 77 | show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 78 | { |
694c2828 DW |
79 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
80 | u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6); | |
3e2a1556 | 81 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); |
0136db58 BW |
82 | } |
83 | ||
84 | static ssize_t | |
dbdfd8e9 | 85 | show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 86 | { |
694c2828 DW |
87 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
88 | u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p); | |
3e2a1556 | 89 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); |
0136db58 BW |
90 | } |
91 | ||
92 | static ssize_t | |
dbdfd8e9 | 93 | show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
0136db58 | 94 | { |
694c2828 DW |
95 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
96 | u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp); | |
3e2a1556 | 97 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); |
0136db58 BW |
98 | } |
99 | ||
626ad6f3 VS |
100 | static ssize_t |
101 | show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) | |
102 | { | |
694c2828 DW |
103 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
104 | u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6); | |
626ad6f3 VS |
105 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); |
106 | } | |
107 | ||
0136db58 BW |
108 | static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); |
109 | static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL); | |
110 | static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL); | |
111 | static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL); | |
626ad6f3 | 112 | static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL); |
0136db58 BW |
113 | |
114 | static struct attribute *rc6_attrs[] = { | |
115 | &dev_attr_rc6_enable.attr, | |
116 | &dev_attr_rc6_residency_ms.attr, | |
0136db58 BW |
117 | NULL |
118 | }; | |
119 | ||
0a7a0986 | 120 | static const struct attribute_group rc6_attr_group = { |
0136db58 BW |
121 | .name = power_group_name, |
122 | .attrs = rc6_attrs | |
123 | }; | |
58abf1da RV |
124 | |
125 | static struct attribute *rc6p_attrs[] = { | |
126 | &dev_attr_rc6p_residency_ms.attr, | |
127 | &dev_attr_rc6pp_residency_ms.attr, | |
128 | NULL | |
129 | }; | |
130 | ||
0a7a0986 | 131 | static const struct attribute_group rc6p_attr_group = { |
58abf1da RV |
132 | .name = power_group_name, |
133 | .attrs = rc6p_attrs | |
134 | }; | |
626ad6f3 VS |
135 | |
136 | static struct attribute *media_rc6_attrs[] = { | |
137 | &dev_attr_media_rc6_residency_ms.attr, | |
138 | NULL | |
139 | }; | |
140 | ||
0a7a0986 | 141 | static const struct attribute_group media_rc6_attr_group = { |
626ad6f3 VS |
142 | .name = power_group_name, |
143 | .attrs = media_rc6_attrs | |
144 | }; | |
8c3f929b | 145 | #endif |
0136db58 | 146 | |
261ea7e2 | 147 | static int l3_access_valid(struct drm_i915_private *i915, loff_t offset) |
84bc7581 | 148 | { |
261ea7e2 | 149 | if (!HAS_L3_DPF(i915)) |
84bc7581 BW |
150 | return -EPERM; |
151 | ||
261ea7e2 | 152 | if (!IS_ALIGNED(offset, sizeof(u32))) |
84bc7581 BW |
153 | return -EINVAL; |
154 | ||
155 | if (offset >= GEN7_L3LOG_SIZE) | |
156 | return -ENXIO; | |
157 | ||
158 | return 0; | |
159 | } | |
160 | ||
161 | static ssize_t | |
162 | i915_l3_read(struct file *filp, struct kobject *kobj, | |
163 | struct bin_attribute *attr, char *buf, | |
164 | loff_t offset, size_t count) | |
165 | { | |
c49d13ee | 166 | struct device *kdev = kobj_to_dev(kobj); |
261ea7e2 | 167 | struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); |
35a85ac6 | 168 | int slice = (int)(uintptr_t)attr->private; |
3ccfd19d | 169 | int ret; |
84bc7581 | 170 | |
261ea7e2 | 171 | ret = l3_access_valid(i915, offset); |
84bc7581 BW |
172 | if (ret) |
173 | return ret; | |
174 | ||
261ea7e2 | 175 | count = round_down(count, sizeof(u32)); |
e5ad4026 | 176 | count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count); |
261ea7e2 | 177 | memset(buf, 0, count); |
33618ea5 | 178 | |
a4e7ccda | 179 | spin_lock(&i915->gem.contexts.lock); |
261ea7e2 | 180 | if (i915->l3_parity.remap_info[slice]) |
3ccfd19d | 181 | memcpy(buf, |
261ea7e2 | 182 | i915->l3_parity.remap_info[slice] + offset / sizeof(u32), |
3ccfd19d | 183 | count); |
a4e7ccda | 184 | spin_unlock(&i915->gem.contexts.lock); |
84bc7581 | 185 | |
1c966dd2 | 186 | return count; |
84bc7581 BW |
187 | } |
188 | ||
189 | static ssize_t | |
190 | i915_l3_write(struct file *filp, struct kobject *kobj, | |
191 | struct bin_attribute *attr, char *buf, | |
192 | loff_t offset, size_t count) | |
193 | { | |
c49d13ee | 194 | struct device *kdev = kobj_to_dev(kobj); |
261ea7e2 | 195 | struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); |
35a85ac6 | 196 | int slice = (int)(uintptr_t)attr->private; |
a4e7ccda | 197 | u32 *remap_info, *freeme = NULL; |
261ea7e2 | 198 | struct i915_gem_context *ctx; |
84bc7581 BW |
199 | int ret; |
200 | ||
261ea7e2 | 201 | ret = l3_access_valid(i915, offset); |
84bc7581 BW |
202 | if (ret) |
203 | return ret; | |
204 | ||
261ea7e2 CW |
205 | if (count < sizeof(u32)) |
206 | return -EINVAL; | |
207 | ||
a4e7ccda CW |
208 | remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); |
209 | if (!remap_info) | |
210 | return -ENOMEM; | |
211 | ||
212 | spin_lock(&i915->gem.contexts.lock); | |
84bc7581 | 213 | |
a4e7ccda CW |
214 | if (i915->l3_parity.remap_info[slice]) { |
215 | freeme = remap_info; | |
216 | remap_info = i915->l3_parity.remap_info[slice]; | |
217 | } else { | |
218 | i915->l3_parity.remap_info[slice] = remap_info; | |
84bc7581 BW |
219 | } |
220 | ||
261ea7e2 | 221 | count = round_down(count, sizeof(u32)); |
a4e7ccda | 222 | memcpy(remap_info + offset / sizeof(u32), buf, count); |
261ea7e2 CW |
223 | |
224 | /* NB: We defer the remapping until we switch to the context */ | |
a4e7ccda | 225 | list_for_each_entry(ctx, &i915->gem.contexts.list, link) |
261ea7e2 CW |
226 | ctx->remap_slice |= BIT(slice); |
227 | ||
a4e7ccda CW |
228 | spin_unlock(&i915->gem.contexts.lock); |
229 | kfree(freeme); | |
230 | ||
261ea7e2 CW |
231 | /* |
232 | * TODO: Ideally we really want a GPU reset here to make sure errors | |
84bc7581 BW |
233 | * aren't propagated. Since I cannot find a stable way to reset the GPU |
234 | * at this point it is left as a TODO. | |
235 | */ | |
84bc7581 | 236 | |
a4e7ccda | 237 | return count; |
84bc7581 BW |
238 | } |
239 | ||
59f3da1e | 240 | static const struct bin_attribute dpf_attrs = { |
84bc7581 BW |
241 | .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)}, |
242 | .size = GEN7_L3LOG_SIZE, | |
243 | .read = i915_l3_read, | |
244 | .write = i915_l3_write, | |
35a85ac6 BW |
245 | .mmap = NULL, |
246 | .private = (void *)0 | |
247 | }; | |
248 | ||
59f3da1e | 249 | static const struct bin_attribute dpf_attrs_1 = { |
35a85ac6 BW |
250 | .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)}, |
251 | .size = GEN7_L3LOG_SIZE, | |
252 | .read = i915_l3_read, | |
253 | .write = i915_l3_write, | |
254 | .mmap = NULL, | |
255 | .private = (void *)1 | |
84bc7581 BW |
256 | }; |
257 | ||
c8c972eb | 258 | static ssize_t gt_act_freq_mhz_show(struct device *kdev, |
df6eedc8 BW |
259 | struct device_attribute *attr, char *buf) |
260 | { | |
694c2828 | 261 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
48d1c812 | 262 | intel_wakeref_t wakeref; |
337fa6e0 | 263 | u32 freq; |
df6eedc8 | 264 | |
d858d569 | 265 | wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); |
d46c0517 | 266 | |
666a4537 | 267 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
337fa6e0 | 268 | vlv_punit_get(dev_priv); |
64936258 | 269 | freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); |
337fa6e0 CW |
270 | vlv_punit_put(dev_priv); |
271 | ||
272 | freq = (freq >> 8) & 0xff; | |
c8c972eb | 273 | } else { |
337fa6e0 | 274 | freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1)); |
c8c972eb | 275 | } |
c8c972eb | 276 | |
d858d569 | 277 | intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); |
c8c972eb | 278 | |
337fa6e0 | 279 | return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq)); |
c8c972eb VS |
280 | } |
281 | ||
282 | static ssize_t gt_cur_freq_mhz_show(struct device *kdev, | |
283 | struct device_attribute *attr, char *buf) | |
284 | { | |
694c2828 | 285 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
df6eedc8 | 286 | |
62e1baa1 CW |
287 | return snprintf(buf, PAGE_SIZE, "%d\n", |
288 | intel_gpu_freq(dev_priv, | |
562d9bae | 289 | dev_priv->gt_pm.rps.cur_freq)); |
df6eedc8 BW |
290 | } |
291 | ||
29ecd78d CW |
292 | static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
293 | { | |
694c2828 | 294 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
29ecd78d CW |
295 | |
296 | return snprintf(buf, PAGE_SIZE, "%d\n", | |
62e1baa1 | 297 | intel_gpu_freq(dev_priv, |
562d9bae | 298 | dev_priv->gt_pm.rps.boost_freq)); |
29ecd78d CW |
299 | } |
300 | ||
301 | static ssize_t gt_boost_freq_mhz_store(struct device *kdev, | |
302 | struct device_attribute *attr, | |
303 | const char *buf, size_t count) | |
304 | { | |
694c2828 | 305 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
562d9bae | 306 | struct intel_rps *rps = &dev_priv->gt_pm.rps; |
59cd31f1 | 307 | bool boost = false; |
29ecd78d | 308 | ssize_t ret; |
59cd31f1 | 309 | u32 val; |
29ecd78d CW |
310 | |
311 | ret = kstrtou32(buf, 0, &val); | |
312 | if (ret) | |
313 | return ret; | |
314 | ||
315 | /* Validate against (static) hardware limits */ | |
316 | val = intel_freq_opcode(dev_priv, val); | |
562d9bae | 317 | if (val < rps->min_freq || val > rps->max_freq) |
29ecd78d CW |
318 | return -EINVAL; |
319 | ||
ebb5eb7d | 320 | mutex_lock(&rps->lock); |
59cd31f1 CW |
321 | if (val != rps->boost_freq) { |
322 | rps->boost_freq = val; | |
323 | boost = atomic_read(&rps->num_waiters); | |
324 | } | |
ebb5eb7d | 325 | mutex_unlock(&rps->lock); |
59cd31f1 CW |
326 | if (boost) |
327 | schedule_work(&rps->work); | |
29ecd78d CW |
328 | |
329 | return count; | |
330 | } | |
331 | ||
97e4eed7 CW |
332 | static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, |
333 | struct device_attribute *attr, char *buf) | |
334 | { | |
694c2828 | 335 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
97e4eed7 | 336 | |
62e1baa1 CW |
337 | return snprintf(buf, PAGE_SIZE, "%d\n", |
338 | intel_gpu_freq(dev_priv, | |
562d9bae | 339 | dev_priv->gt_pm.rps.efficient_freq)); |
97e4eed7 CW |
340 | } |
341 | ||
df6eedc8 BW |
342 | static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
343 | { | |
694c2828 | 344 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
5c9669ce | 345 | |
62e1baa1 CW |
346 | return snprintf(buf, PAGE_SIZE, "%d\n", |
347 | intel_gpu_freq(dev_priv, | |
562d9bae | 348 | dev_priv->gt_pm.rps.max_freq_softlimit)); |
df6eedc8 BW |
349 | } |
350 | ||
46ddf194 BW |
351 | static ssize_t gt_max_freq_mhz_store(struct device *kdev, |
352 | struct device_attribute *attr, | |
353 | const char *buf, size_t count) | |
354 | { | |
694c2828 | 355 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
562d9bae | 356 | struct intel_rps *rps = &dev_priv->gt_pm.rps; |
48d1c812 | 357 | intel_wakeref_t wakeref; |
2a5913a8 | 358 | u32 val; |
46ddf194 BW |
359 | ssize_t ret; |
360 | ||
361 | ret = kstrtou32(buf, 0, &val); | |
362 | if (ret) | |
363 | return ret; | |
364 | ||
d858d569 | 365 | wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); |
ebb5eb7d | 366 | mutex_lock(&rps->lock); |
46ddf194 | 367 | |
7c59a9c1 | 368 | val = intel_freq_opcode(dev_priv, val); |
562d9bae SAK |
369 | if (val < rps->min_freq || |
370 | val > rps->max_freq || | |
371 | val < rps->min_freq_softlimit) { | |
ebb5eb7d CW |
372 | ret = -EINVAL; |
373 | goto unlock; | |
46ddf194 BW |
374 | } |
375 | ||
562d9bae | 376 | if (val > rps->rp0_freq) |
31c77388 | 377 | DRM_DEBUG("User requested overclocking to %d\n", |
7c59a9c1 | 378 | intel_gpu_freq(dev_priv, val)); |
31c77388 | 379 | |
562d9bae | 380 | rps->max_freq_softlimit = val; |
6917c7b9 | 381 | |
562d9bae SAK |
382 | val = clamp_t(int, rps->cur_freq, |
383 | rps->min_freq_softlimit, | |
384 | rps->max_freq_softlimit); | |
f745a80e VS |
385 | |
386 | /* We still need *_set_rps to process the new max_delay and | |
387 | * update the interrupt limits and PMINTRMSK even though | |
388 | * frequency request may be unchanged. */ | |
9fcee2f7 | 389 | ret = intel_set_rps(dev_priv, val); |
46ddf194 | 390 | |
ebb5eb7d CW |
391 | unlock: |
392 | mutex_unlock(&rps->lock); | |
d858d569 | 393 | intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); |
933bfb44 | 394 | |
9fcee2f7 | 395 | return ret ?: count; |
46ddf194 BW |
396 | } |
397 | ||
df6eedc8 BW |
398 | static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
399 | { | |
694c2828 | 400 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
df6eedc8 | 401 | |
62e1baa1 CW |
402 | return snprintf(buf, PAGE_SIZE, "%d\n", |
403 | intel_gpu_freq(dev_priv, | |
562d9bae | 404 | dev_priv->gt_pm.rps.min_freq_softlimit)); |
df6eedc8 BW |
405 | } |
406 | ||
46ddf194 BW |
407 | static ssize_t gt_min_freq_mhz_store(struct device *kdev, |
408 | struct device_attribute *attr, | |
409 | const char *buf, size_t count) | |
410 | { | |
694c2828 | 411 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
562d9bae | 412 | struct intel_rps *rps = &dev_priv->gt_pm.rps; |
48d1c812 | 413 | intel_wakeref_t wakeref; |
2a5913a8 | 414 | u32 val; |
46ddf194 BW |
415 | ssize_t ret; |
416 | ||
417 | ret = kstrtou32(buf, 0, &val); | |
418 | if (ret) | |
419 | return ret; | |
420 | ||
d858d569 | 421 | wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); |
ebb5eb7d | 422 | mutex_lock(&rps->lock); |
46ddf194 | 423 | |
7c59a9c1 | 424 | val = intel_freq_opcode(dev_priv, val); |
562d9bae SAK |
425 | if (val < rps->min_freq || |
426 | val > rps->max_freq || | |
427 | val > rps->max_freq_softlimit) { | |
ebb5eb7d CW |
428 | ret = -EINVAL; |
429 | goto unlock; | |
46ddf194 BW |
430 | } |
431 | ||
562d9bae | 432 | rps->min_freq_softlimit = val; |
6917c7b9 | 433 | |
562d9bae SAK |
434 | val = clamp_t(int, rps->cur_freq, |
435 | rps->min_freq_softlimit, | |
436 | rps->max_freq_softlimit); | |
f745a80e VS |
437 | |
438 | /* We still need *_set_rps to process the new min_delay and | |
439 | * update the interrupt limits and PMINTRMSK even though | |
440 | * frequency request may be unchanged. */ | |
9fcee2f7 | 441 | ret = intel_set_rps(dev_priv, val); |
46ddf194 | 442 | |
ebb5eb7d CW |
443 | unlock: |
444 | mutex_unlock(&rps->lock); | |
d858d569 | 445 | intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); |
933bfb44 | 446 | |
9fcee2f7 | 447 | return ret ?: count; |
46ddf194 BW |
448 | } |
449 | ||
c828a892 JP |
450 | static DEVICE_ATTR_RO(gt_act_freq_mhz); |
451 | static DEVICE_ATTR_RO(gt_cur_freq_mhz); | |
b6b996b6 JP |
452 | static DEVICE_ATTR_RW(gt_boost_freq_mhz); |
453 | static DEVICE_ATTR_RW(gt_max_freq_mhz); | |
454 | static DEVICE_ATTR_RW(gt_min_freq_mhz); | |
df6eedc8 | 455 | |
c828a892 | 456 | static DEVICE_ATTR_RO(vlv_rpe_freq_mhz); |
ac6ae347 BW |
457 | |
458 | static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); | |
459 | static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); | |
460 | static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); | |
461 | static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); | |
462 | ||
463 | /* For now we have a static number of RP states */ | |
464 | static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) | |
465 | { | |
694c2828 | 466 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
562d9bae | 467 | struct intel_rps *rps = &dev_priv->gt_pm.rps; |
bc4d91f6 | 468 | u32 val; |
ac6ae347 | 469 | |
bc4d91f6 | 470 | if (attr == &dev_attr_gt_RP0_freq_mhz) |
562d9bae | 471 | val = intel_gpu_freq(dev_priv, rps->rp0_freq); |
bc4d91f6 | 472 | else if (attr == &dev_attr_gt_RP1_freq_mhz) |
562d9bae | 473 | val = intel_gpu_freq(dev_priv, rps->rp1_freq); |
bc4d91f6 | 474 | else if (attr == &dev_attr_gt_RPn_freq_mhz) |
562d9bae | 475 | val = intel_gpu_freq(dev_priv, rps->min_freq); |
bc4d91f6 | 476 | else |
ac6ae347 | 477 | BUG(); |
bc4d91f6 | 478 | |
3e2a1556 | 479 | return snprintf(buf, PAGE_SIZE, "%d\n", val); |
ac6ae347 BW |
480 | } |
481 | ||
e1215de8 | 482 | static const struct attribute * const gen6_attrs[] = { |
c8c972eb | 483 | &dev_attr_gt_act_freq_mhz.attr, |
df6eedc8 | 484 | &dev_attr_gt_cur_freq_mhz.attr, |
29ecd78d | 485 | &dev_attr_gt_boost_freq_mhz.attr, |
df6eedc8 BW |
486 | &dev_attr_gt_max_freq_mhz.attr, |
487 | &dev_attr_gt_min_freq_mhz.attr, | |
ac6ae347 BW |
488 | &dev_attr_gt_RP0_freq_mhz.attr, |
489 | &dev_attr_gt_RP1_freq_mhz.attr, | |
490 | &dev_attr_gt_RPn_freq_mhz.attr, | |
df6eedc8 BW |
491 | NULL, |
492 | }; | |
493 | ||
e1215de8 | 494 | static const struct attribute * const vlv_attrs[] = { |
c8c972eb | 495 | &dev_attr_gt_act_freq_mhz.attr, |
97e4eed7 | 496 | &dev_attr_gt_cur_freq_mhz.attr, |
29ecd78d | 497 | &dev_attr_gt_boost_freq_mhz.attr, |
97e4eed7 CW |
498 | &dev_attr_gt_max_freq_mhz.attr, |
499 | &dev_attr_gt_min_freq_mhz.attr, | |
74c4f62b D |
500 | &dev_attr_gt_RP0_freq_mhz.attr, |
501 | &dev_attr_gt_RP1_freq_mhz.attr, | |
502 | &dev_attr_gt_RPn_freq_mhz.attr, | |
97e4eed7 CW |
503 | &dev_attr_vlv_rpe_freq_mhz.attr, |
504 | NULL, | |
505 | }; | |
506 | ||
98a2f411 CW |
507 | #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) |
508 | ||
ef86ddce MK |
509 | static ssize_t error_state_read(struct file *filp, struct kobject *kobj, |
510 | struct bin_attribute *attr, char *buf, | |
511 | loff_t off, size_t count) | |
512 | { | |
513 | ||
657fb5fb | 514 | struct device *kdev = kobj_to_dev(kobj); |
0e39037b | 515 | struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); |
5a4c6f1b CW |
516 | struct i915_gpu_state *gpu; |
517 | ssize_t ret; | |
ef86ddce | 518 | |
0e39037b | 519 | gpu = i915_first_error_state(i915); |
e6154e4c CW |
520 | if (IS_ERR(gpu)) { |
521 | ret = PTR_ERR(gpu); | |
522 | } else if (gpu) { | |
0e39037b CW |
523 | ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); |
524 | i915_gpu_state_put(gpu); | |
525 | } else { | |
526 | const char *str = "No error state collected\n"; | |
527 | size_t len = strlen(str); | |
ef86ddce | 528 | |
0e39037b CW |
529 | ret = min_t(size_t, count, len - off); |
530 | memcpy(buf, str + off, ret); | |
531 | } | |
ef86ddce | 532 | |
5a4c6f1b | 533 | return ret; |
ef86ddce MK |
534 | } |
535 | ||
536 | static ssize_t error_state_write(struct file *file, struct kobject *kobj, | |
537 | struct bin_attribute *attr, char *buf, | |
538 | loff_t off, size_t count) | |
539 | { | |
657fb5fb | 540 | struct device *kdev = kobj_to_dev(kobj); |
694c2828 | 541 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
ef86ddce MK |
542 | |
543 | DRM_DEBUG_DRIVER("Resetting error state\n"); | |
5a4c6f1b | 544 | i915_reset_error_state(dev_priv); |
ef86ddce MK |
545 | |
546 | return count; | |
547 | } | |
548 | ||
59f3da1e | 549 | static const struct bin_attribute error_state_attr = { |
ef86ddce MK |
550 | .attr.name = "error", |
551 | .attr.mode = S_IRUSR | S_IWUSR, | |
552 | .size = 0, | |
553 | .read = error_state_read, | |
554 | .write = error_state_write, | |
555 | }; | |
556 | ||
98a2f411 CW |
557 | static void i915_setup_error_capture(struct device *kdev) |
558 | { | |
559 | if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr)) | |
560 | DRM_ERROR("error_state sysfs setup failed\n"); | |
561 | } | |
562 | ||
563 | static void i915_teardown_error_capture(struct device *kdev) | |
564 | { | |
565 | sysfs_remove_bin_file(&kdev->kobj, &error_state_attr); | |
566 | } | |
567 | #else | |
568 | static void i915_setup_error_capture(struct device *kdev) {} | |
569 | static void i915_teardown_error_capture(struct device *kdev) {} | |
570 | #endif | |
571 | ||
694c2828 | 572 | void i915_setup_sysfs(struct drm_i915_private *dev_priv) |
0136db58 | 573 | { |
694c2828 | 574 | struct device *kdev = dev_priv->drm.primary->kdev; |
0136db58 BW |
575 | int ret; |
576 | ||
8c3f929b | 577 | #ifdef CONFIG_PM |
694c2828 DW |
578 | if (HAS_RC6(dev_priv)) { |
579 | ret = sysfs_merge_group(&kdev->kobj, | |
112abd29 DV |
580 | &rc6_attr_group); |
581 | if (ret) | |
582 | DRM_ERROR("RC6 residency sysfs setup failed\n"); | |
583 | } | |
694c2828 DW |
584 | if (HAS_RC6p(dev_priv)) { |
585 | ret = sysfs_merge_group(&kdev->kobj, | |
58abf1da RV |
586 | &rc6p_attr_group); |
587 | if (ret) | |
588 | DRM_ERROR("RC6p residency sysfs setup failed\n"); | |
589 | } | |
694c2828 DW |
590 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
591 | ret = sysfs_merge_group(&kdev->kobj, | |
626ad6f3 VS |
592 | &media_rc6_attr_group); |
593 | if (ret) | |
594 | DRM_ERROR("Media RC6 residency sysfs setup failed\n"); | |
595 | } | |
8c3f929b | 596 | #endif |
694c2828 DW |
597 | if (HAS_L3_DPF(dev_priv)) { |
598 | ret = device_create_bin_file(kdev, &dpf_attrs); | |
112abd29 DV |
599 | if (ret) |
600 | DRM_ERROR("l3 parity sysfs setup failed\n"); | |
35a85ac6 | 601 | |
694c2828 DW |
602 | if (NUM_L3_SLICES(dev_priv) > 1) { |
603 | ret = device_create_bin_file(kdev, | |
35a85ac6 BW |
604 | &dpf_attrs_1); |
605 | if (ret) | |
606 | DRM_ERROR("l3 parity slice 1 setup failed\n"); | |
607 | } | |
112abd29 | 608 | } |
df6eedc8 | 609 | |
97e4eed7 | 610 | ret = 0; |
694c2828 DW |
611 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
612 | ret = sysfs_create_files(&kdev->kobj, vlv_attrs); | |
613 | else if (INTEL_GEN(dev_priv) >= 6) | |
614 | ret = sysfs_create_files(&kdev->kobj, gen6_attrs); | |
97e4eed7 CW |
615 | if (ret) |
616 | DRM_ERROR("RPS sysfs setup failed\n"); | |
ef86ddce | 617 | |
98a2f411 | 618 | i915_setup_error_capture(kdev); |
0136db58 BW |
619 | } |
620 | ||
694c2828 | 621 | void i915_teardown_sysfs(struct drm_i915_private *dev_priv) |
0136db58 | 622 | { |
694c2828 DW |
623 | struct device *kdev = dev_priv->drm.primary->kdev; |
624 | ||
98a2f411 CW |
625 | i915_teardown_error_capture(kdev); |
626 | ||
694c2828 DW |
627 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
628 | sysfs_remove_files(&kdev->kobj, vlv_attrs); | |
97e4eed7 | 629 | else |
694c2828 DW |
630 | sysfs_remove_files(&kdev->kobj, gen6_attrs); |
631 | device_remove_bin_file(kdev, &dpf_attrs_1); | |
632 | device_remove_bin_file(kdev, &dpf_attrs); | |
853c70e8 | 633 | #ifdef CONFIG_PM |
694c2828 DW |
634 | sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group); |
635 | sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group); | |
853c70e8 | 636 | #endif |
0136db58 | 637 | } |