Commit | Line | Data |
---|---|---|
75758255 AD |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | */ | |
25 | ||
26 | #include <linux/kthread.h> | |
fdf2f6c5 SR |
27 | #include <linux/pci.h> |
28 | #include <linux/uaccess.h> | |
a9ffe2a9 | 29 | #include <linux/pm_runtime.h> |
fdf2f6c5 | 30 | |
75758255 | 31 | #include "amdgpu.h" |
a4c5b1bb | 32 | #include "amdgpu_pm.h" |
d090e7db | 33 | #include "amdgpu_dm_debugfs.h" |
17cb04f2 | 34 | #include "amdgpu_ras.h" |
a4322e18 | 35 | #include "amdgpu_rap.h" |
ecaafb7b | 36 | #include "amdgpu_securedisplay.h" |
19ae3330 | 37 | #include "amdgpu_fw_attestation.h" |
37df9560 | 38 | #include "amdgpu_umr.h" |
75758255 | 39 | |
d0fb18b5 | 40 | #include "amdgpu_reset.h" |
e50d9ba0 | 41 | #include "amdgpu_psp_ta.h" |
d0fb18b5 | 42 | |
728e7e0c | 43 | #if defined(CONFIG_DEBUG_FS) |
728e7e0c | 44 | |
7e4237db TSD |
45 | /** |
46 | * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes | |
47 | * | |
48 | * @read: True if reading | |
49 | * @f: open file handle | |
50 | * @buf: User buffer to write/read to | |
51 | * @size: Number of bytes to write/read | |
52 | * @pos: Offset to seek to | |
53 | * | |
54 | * This debugfs entry has special meaning on the offset being sought. | |
55 | * Various bits have different meanings: | |
56 | * | |
57 | * Bit 62: Indicates a GRBM bank switch is needed | |
58 | * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is | |
8fa76350 | 59 | * zero) |
7e4237db TSD |
60 | * Bits 24..33: The SE or ME selector if needed |
61 | * Bits 34..43: The SH (or SA) or PIPE selector if needed | |
62 | * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed | |
63 | * | |
64 | * Bit 23: Indicates that the PM power gating lock should be held | |
8fa76350 SS |
65 | * This is necessary to read registers that might be |
66 | * unreliable during a power gating transistion. | |
7e4237db TSD |
67 | * |
68 | * The lower bits are the BYTE offset of the register to read. This | |
69 | * allows reading multiple registers in a single call and having | |
70 | * the returned size reflect that. | |
71 | */ | |
f7a9ee81 AG |
72 | static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, |
73 | char __user *buf, size_t size, loff_t *pos) | |
75758255 AD |
74 | { |
75 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
76 | ssize_t result = 0; | |
77 | int r; | |
f7a9ee81 | 78 | bool pm_pg_lock, use_bank, use_ring; |
8fa76350 | 79 | unsigned int instance_bank, sh_bank, se_bank, me, pipe, queue, vmid; |
75758255 | 80 | |
f7a9ee81 | 81 | pm_pg_lock = use_bank = use_ring = false; |
0fa4246e | 82 | instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0; |
f7a9ee81 AG |
83 | |
84 | if (size & 0x3 || *pos & 0x3 || | |
85 | ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61)))) | |
75758255 AD |
86 | return -EINVAL; |
87 | ||
88 | /* are we reading registers for which a PG lock is necessary? */ | |
89 | pm_pg_lock = (*pos >> 23) & 1; | |
90 | ||
91 | if (*pos & (1ULL << 62)) { | |
92 | se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; | |
93 | sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; | |
94 | instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; | |
95 | ||
96 | if (se_bank == 0x3FF) | |
97 | se_bank = 0xFFFFFFFF; | |
98 | if (sh_bank == 0x3FF) | |
99 | sh_bank = 0xFFFFFFFF; | |
100 | if (instance_bank == 0x3FF) | |
101 | instance_bank = 0xFFFFFFFF; | |
c5b2bd5d | 102 | use_bank = true; |
f7a9ee81 AG |
103 | } else if (*pos & (1ULL << 61)) { |
104 | ||
105 | me = (*pos & GENMASK_ULL(33, 24)) >> 24; | |
106 | pipe = (*pos & GENMASK_ULL(43, 34)) >> 34; | |
107 | queue = (*pos & GENMASK_ULL(53, 44)) >> 44; | |
88891430 | 108 | vmid = (*pos & GENMASK_ULL(58, 54)) >> 54; |
f7a9ee81 | 109 | |
c5b2bd5d | 110 | use_ring = true; |
75758255 | 111 | } else { |
c5b2bd5d | 112 | use_bank = use_ring = false; |
75758255 AD |
113 | } |
114 | ||
115 | *pos &= (1UL << 22) - 1; | |
116 | ||
4a580877 | 117 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); |
9eee152a | 118 | if (r < 0) { |
4a580877 | 119 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
a9ffe2a9 | 120 | return r; |
9eee152a | 121 | } |
a9ffe2a9 | 122 | |
95a2f917 | 123 | r = amdgpu_virt_enable_access_debugfs(adev); |
9eee152a | 124 | if (r < 0) { |
4a580877 | 125 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
95a2f917 | 126 | return r; |
9eee152a | 127 | } |
95a2f917 | 128 | |
75758255 AD |
129 | if (use_bank) { |
130 | if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || | |
a9ffe2a9 | 131 | (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) { |
4a580877 LT |
132 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); |
133 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
95a2f917 | 134 | amdgpu_virt_disable_access_debugfs(adev); |
75758255 | 135 | return -EINVAL; |
a9ffe2a9 | 136 | } |
75758255 AD |
137 | mutex_lock(&adev->grbm_idx_mutex); |
138 | amdgpu_gfx_select_se_sh(adev, se_bank, | |
d51ac6d0 | 139 | sh_bank, instance_bank, 0); |
f7a9ee81 AG |
140 | } else if (use_ring) { |
141 | mutex_lock(&adev->srbm_mutex); | |
553f973a | 142 | amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid, 0); |
75758255 AD |
143 | } |
144 | ||
145 | if (pm_pg_lock) | |
146 | mutex_lock(&adev->pm.mutex); | |
147 | ||
148 | while (size) { | |
149 | uint32_t value; | |
150 | ||
f7a9ee81 AG |
151 | if (read) { |
152 | value = RREG32(*pos >> 2); | |
153 | r = put_user(value, (uint32_t *)buf); | |
154 | } else { | |
155 | r = get_user(value, (uint32_t *)buf); | |
156 | if (!r) | |
8ed49dd1 | 157 | amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0); |
f7a9ee81 | 158 | } |
75758255 AD |
159 | if (r) { |
160 | result = r; | |
161 | goto end; | |
162 | } | |
163 | ||
164 | result += 4; | |
165 | buf += 4; | |
166 | *pos += 4; | |
167 | size -= 4; | |
168 | } | |
169 | ||
170 | end: | |
171 | if (use_bank) { | |
d51ac6d0 | 172 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); |
75758255 | 173 | mutex_unlock(&adev->grbm_idx_mutex); |
f7a9ee81 | 174 | } else if (use_ring) { |
553f973a | 175 | amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0); |
f7a9ee81 | 176 | mutex_unlock(&adev->srbm_mutex); |
75758255 AD |
177 | } |
178 | ||
179 | if (pm_pg_lock) | |
180 | mutex_unlock(&adev->pm.mutex); | |
181 | ||
4a580877 LT |
182 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); |
183 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
a9ffe2a9 | 184 | |
95a2f917 | 185 | amdgpu_virt_disable_access_debugfs(adev); |
75758255 AD |
186 | return result; |
187 | } | |
188 | ||
20ed491b | 189 | /* |
7e4237db TSD |
190 | * amdgpu_debugfs_regs_read - Callback for reading MMIO registers |
191 | */ | |
f7a9ee81 AG |
192 | static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, |
193 | size_t size, loff_t *pos) | |
194 | { | |
195 | return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos); | |
196 | } | |
197 | ||
20ed491b | 198 | /* |
7e4237db TSD |
199 | * amdgpu_debugfs_regs_write - Callback for writing MMIO registers |
200 | */ | |
75758255 AD |
201 | static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, |
202 | size_t size, loff_t *pos) | |
203 | { | |
f7a9ee81 | 204 | return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos); |
75758255 AD |
205 | } |
206 | ||
37df9560 TSD |
207 | static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file) |
208 | { | |
209 | struct amdgpu_debugfs_regs2_data *rd; | |
210 | ||
8fa76350 | 211 | rd = kzalloc(sizeof(*rd), GFP_KERNEL); |
37df9560 TSD |
212 | if (!rd) |
213 | return -ENOMEM; | |
214 | rd->adev = file_inode(file)->i_private; | |
215 | file->private_data = rd; | |
216 | mutex_init(&rd->lock); | |
217 | ||
218 | return 0; | |
219 | } | |
220 | ||
221 | static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file) | |
222 | { | |
223 | struct amdgpu_debugfs_regs2_data *rd = file->private_data; | |
8fa76350 | 224 | |
37df9560 TSD |
225 | mutex_destroy(&rd->lock); |
226 | kfree(file->private_data); | |
227 | return 0; | |
228 | } | |
229 | ||
230 | static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en) | |
231 | { | |
232 | struct amdgpu_debugfs_regs2_data *rd = f->private_data; | |
233 | struct amdgpu_device *adev = rd->adev; | |
234 | ssize_t result = 0; | |
235 | int r; | |
236 | uint32_t value; | |
237 | ||
238 | if (size & 0x3 || offset & 0x3) | |
239 | return -EINVAL; | |
240 | ||
241 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); | |
242 | if (r < 0) { | |
243 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
244 | return r; | |
245 | } | |
246 | ||
247 | r = amdgpu_virt_enable_access_debugfs(adev); | |
248 | if (r < 0) { | |
249 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
250 | return r; | |
251 | } | |
252 | ||
253 | mutex_lock(&rd->lock); | |
254 | ||
255 | if (rd->id.use_grbm) { | |
256 | if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) || | |
257 | (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) { | |
258 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); | |
259 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
260 | amdgpu_virt_disable_access_debugfs(adev); | |
261 | mutex_unlock(&rd->lock); | |
262 | return -EINVAL; | |
263 | } | |
264 | mutex_lock(&adev->grbm_idx_mutex); | |
265 | amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se, | |
553f973a TSD |
266 | rd->id.grbm.sh, |
267 | rd->id.grbm.instance, rd->id.xcc_id); | |
37df9560 TSD |
268 | } |
269 | ||
270 | if (rd->id.use_srbm) { | |
271 | mutex_lock(&adev->srbm_mutex); | |
272 | amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe, | |
553f973a | 273 | rd->id.srbm.queue, rd->id.srbm.vmid, rd->id.xcc_id); |
37df9560 TSD |
274 | } |
275 | ||
276 | if (rd->id.pg_lock) | |
277 | mutex_lock(&adev->pm.mutex); | |
278 | ||
279 | while (size) { | |
280 | if (!write_en) { | |
281 | value = RREG32(offset >> 2); | |
282 | r = put_user(value, (uint32_t *)buf); | |
283 | } else { | |
284 | r = get_user(value, (uint32_t *)buf); | |
285 | if (!r) | |
8ed49dd1 | 286 | amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value, rd->id.xcc_id); |
37df9560 TSD |
287 | } |
288 | if (r) { | |
289 | result = r; | |
290 | goto end; | |
291 | } | |
292 | offset += 4; | |
293 | size -= 4; | |
294 | result += 4; | |
295 | buf += 4; | |
296 | } | |
297 | end: | |
298 | if (rd->id.use_grbm) { | |
553f973a | 299 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, rd->id.xcc_id); |
37df9560 TSD |
300 | mutex_unlock(&adev->grbm_idx_mutex); |
301 | } | |
302 | ||
303 | if (rd->id.use_srbm) { | |
553f973a | 304 | amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, rd->id.xcc_id); |
37df9560 TSD |
305 | mutex_unlock(&adev->srbm_mutex); |
306 | } | |
307 | ||
308 | if (rd->id.pg_lock) | |
309 | mutex_unlock(&adev->pm.mutex); | |
310 | ||
311 | mutex_unlock(&rd->lock); | |
312 | ||
313 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); | |
314 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
315 | ||
316 | amdgpu_virt_disable_access_debugfs(adev); | |
317 | return result; | |
318 | } | |
319 | ||
320 | static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data) | |
321 | { | |
322 | struct amdgpu_debugfs_regs2_data *rd = f->private_data; | |
553f973a | 323 | struct amdgpu_debugfs_regs2_iocdata v1_data; |
37df9560 TSD |
324 | int r; |
325 | ||
553f973a TSD |
326 | mutex_lock(&rd->lock); |
327 | ||
37df9560 | 328 | switch (cmd) { |
553f973a TSD |
329 | case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE_V2: |
330 | r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata_v2 *)data, | |
8fa76350 | 331 | sizeof(rd->id)); |
553f973a TSD |
332 | if (r) |
333 | r = -EINVAL; | |
334 | goto done; | |
335 | case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE: | |
336 | r = copy_from_user(&v1_data, (struct amdgpu_debugfs_regs2_iocdata *)data, | |
337 | sizeof(v1_data)); | |
338 | if (r) { | |
339 | r = -EINVAL; | |
340 | goto done; | |
341 | } | |
342 | goto v1_copy; | |
37df9560 | 343 | default: |
553f973a TSD |
344 | r = -EINVAL; |
345 | goto done; | |
37df9560 | 346 | } |
553f973a TSD |
347 | |
348 | v1_copy: | |
349 | rd->id.use_srbm = v1_data.use_srbm; | |
350 | rd->id.use_grbm = v1_data.use_grbm; | |
351 | rd->id.pg_lock = v1_data.pg_lock; | |
352 | rd->id.grbm.se = v1_data.grbm.se; | |
353 | rd->id.grbm.sh = v1_data.grbm.sh; | |
354 | rd->id.grbm.instance = v1_data.grbm.instance; | |
355 | rd->id.srbm.me = v1_data.srbm.me; | |
356 | rd->id.srbm.pipe = v1_data.srbm.pipe; | |
357 | rd->id.srbm.queue = v1_data.srbm.queue; | |
358 | rd->id.xcc_id = 0; | |
359 | done: | |
360 | mutex_unlock(&rd->lock); | |
361 | return r; | |
37df9560 TSD |
362 | } |
363 | ||
364 | static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos) | |
365 | { | |
366 | return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0); | |
367 | } | |
368 | ||
369 | static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) | |
370 | { | |
371 | return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1); | |
372 | } | |
373 | ||
553f973a TSD |
374 | static int amdgpu_debugfs_gprwave_open(struct inode *inode, struct file *file) |
375 | { | |
376 | struct amdgpu_debugfs_gprwave_data *rd; | |
377 | ||
ad19c200 | 378 | rd = kzalloc(sizeof(*rd), GFP_KERNEL); |
553f973a TSD |
379 | if (!rd) |
380 | return -ENOMEM; | |
381 | rd->adev = file_inode(file)->i_private; | |
382 | file->private_data = rd; | |
383 | mutex_init(&rd->lock); | |
384 | ||
385 | return 0; | |
386 | } | |
387 | ||
388 | static int amdgpu_debugfs_gprwave_release(struct inode *inode, struct file *file) | |
389 | { | |
390 | struct amdgpu_debugfs_gprwave_data *rd = file->private_data; | |
ad19c200 | 391 | |
553f973a TSD |
392 | mutex_destroy(&rd->lock); |
393 | kfree(file->private_data); | |
394 | return 0; | |
395 | } | |
396 | ||
397 | static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, size_t size, loff_t *pos) | |
398 | { | |
399 | struct amdgpu_debugfs_gprwave_data *rd = f->private_data; | |
400 | struct amdgpu_device *adev = rd->adev; | |
401 | ssize_t result = 0; | |
402 | int r; | |
403 | uint32_t *data, x; | |
404 | ||
405 | if (size & 0x3 || *pos & 0x3) | |
406 | return -EINVAL; | |
407 | ||
408 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); | |
409 | if (r < 0) { | |
410 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
411 | return r; | |
412 | } | |
413 | ||
414 | r = amdgpu_virt_enable_access_debugfs(adev); | |
415 | if (r < 0) { | |
416 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
417 | return r; | |
418 | } | |
419 | ||
420 | data = kcalloc(1024, sizeof(*data), GFP_KERNEL); | |
421 | if (!data) { | |
422 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
423 | amdgpu_virt_disable_access_debugfs(adev); | |
424 | return -ENOMEM; | |
425 | } | |
426 | ||
427 | /* switch to the specific se/sh/cu */ | |
428 | mutex_lock(&adev->grbm_idx_mutex); | |
429 | amdgpu_gfx_select_se_sh(adev, rd->id.se, rd->id.sh, rd->id.cu, rd->id.xcc_id); | |
430 | ||
431 | if (!rd->id.gpr_or_wave) { | |
432 | x = 0; | |
433 | if (adev->gfx.funcs->read_wave_data) | |
434 | adev->gfx.funcs->read_wave_data(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, data, &x); | |
435 | } else { | |
436 | x = size >> 2; | |
437 | if (rd->id.gpr.vpgr_or_sgpr) { | |
438 | if (adev->gfx.funcs->read_wave_vgprs) | |
439 | adev->gfx.funcs->read_wave_vgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, rd->id.gpr.thread, *pos, size>>2, data); | |
440 | } else { | |
441 | if (adev->gfx.funcs->read_wave_sgprs) | |
442 | adev->gfx.funcs->read_wave_sgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, *pos, size>>2, data); | |
443 | } | |
444 | } | |
445 | ||
446 | amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id); | |
447 | mutex_unlock(&adev->grbm_idx_mutex); | |
448 | ||
449 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); | |
450 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
451 | ||
452 | if (!x) { | |
453 | result = -EINVAL; | |
454 | goto done; | |
455 | } | |
456 | ||
457 | while (size && (*pos < x * 4)) { | |
458 | uint32_t value; | |
459 | ||
460 | value = data[*pos >> 2]; | |
461 | r = put_user(value, (uint32_t *)buf); | |
462 | if (r) { | |
463 | result = r; | |
464 | goto done; | |
465 | } | |
466 | ||
467 | result += 4; | |
468 | buf += 4; | |
469 | *pos += 4; | |
470 | size -= 4; | |
471 | } | |
472 | ||
473 | done: | |
474 | amdgpu_virt_disable_access_debugfs(adev); | |
475 | kfree(data); | |
476 | return result; | |
477 | } | |
478 | ||
479 | static long amdgpu_debugfs_gprwave_ioctl(struct file *f, unsigned int cmd, unsigned long data) | |
480 | { | |
481 | struct amdgpu_debugfs_gprwave_data *rd = f->private_data; | |
9c9d501b | 482 | int r = 0; |
553f973a TSD |
483 | |
484 | mutex_lock(&rd->lock); | |
485 | ||
486 | switch (cmd) { | |
487 | case AMDGPU_DEBUGFS_GPRWAVE_IOC_SET_STATE: | |
9c9d501b DC |
488 | if (copy_from_user(&rd->id, |
489 | (struct amdgpu_debugfs_gprwave_iocdata *)data, | |
490 | sizeof(rd->id))) | |
491 | r = -EFAULT; | |
553f973a TSD |
492 | goto done; |
493 | default: | |
494 | r = -EINVAL; | |
495 | goto done; | |
496 | } | |
497 | ||
498 | done: | |
499 | mutex_unlock(&rd->lock); | |
500 | return r; | |
501 | } | |
502 | ||
503 | ||
504 | ||
7e4237db TSD |
505 | |
506 | /** | |
507 | * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register | |
508 | * | |
509 | * @f: open file handle | |
510 | * @buf: User buffer to store read data in | |
511 | * @size: Number of bytes to read | |
512 | * @pos: Offset to seek to | |
513 | * | |
514 | * The lower bits are the BYTE offset of the register to read. This | |
515 | * allows reading multiple registers in a single call and having | |
516 | * the returned size reflect that. | |
517 | */ | |
75758255 AD |
518 | static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, |
519 | size_t size, loff_t *pos) | |
520 | { | |
521 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
522 | ssize_t result = 0; | |
523 | int r; | |
524 | ||
525 | if (size & 0x3 || *pos & 0x3) | |
526 | return -EINVAL; | |
527 | ||
4a580877 | 528 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); |
9eee152a | 529 | if (r < 0) { |
4a580877 | 530 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
a9ffe2a9 | 531 | return r; |
9eee152a | 532 | } |
a9ffe2a9 | 533 | |
95a2f917 | 534 | r = amdgpu_virt_enable_access_debugfs(adev); |
9eee152a | 535 | if (r < 0) { |
4a580877 | 536 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
95a2f917 | 537 | return r; |
9eee152a | 538 | } |
95a2f917 | 539 | |
75758255 AD |
540 | while (size) { |
541 | uint32_t value; | |
542 | ||
5eb8094a MG |
543 | if (upper_32_bits(*pos)) |
544 | value = RREG32_PCIE_EXT(*pos); | |
545 | else | |
546 | value = RREG32_PCIE(*pos); | |
547 | ||
75758255 | 548 | r = put_user(value, (uint32_t *)buf); |
edadd6fc AA |
549 | if (r) |
550 | goto out; | |
75758255 AD |
551 | |
552 | result += 4; | |
553 | buf += 4; | |
554 | *pos += 4; | |
555 | size -= 4; | |
556 | } | |
557 | ||
edadd6fc AA |
558 | r = result; |
559 | out: | |
4a580877 LT |
560 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); |
561 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
95a2f917 | 562 | amdgpu_virt_disable_access_debugfs(adev); |
edadd6fc | 563 | return r; |
75758255 AD |
564 | } |
565 | ||
7e4237db TSD |
566 | /** |
567 | * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register | |
568 | * | |
569 | * @f: open file handle | |
570 | * @buf: User buffer to write data from | |
571 | * @size: Number of bytes to write | |
572 | * @pos: Offset to seek to | |
573 | * | |
574 | * The lower bits are the BYTE offset of the register to write. This | |
575 | * allows writing multiple registers in a single call and having | |
576 | * the returned size reflect that. | |
577 | */ | |
75758255 AD |
578 | static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf, |
579 | size_t size, loff_t *pos) | |
580 | { | |
581 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
582 | ssize_t result = 0; | |
583 | int r; | |
584 | ||
585 | if (size & 0x3 || *pos & 0x3) | |
586 | return -EINVAL; | |
587 | ||
4a580877 | 588 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); |
9eee152a | 589 | if (r < 0) { |
4a580877 | 590 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
a9ffe2a9 | 591 | return r; |
9eee152a | 592 | } |
a9ffe2a9 | 593 | |
95a2f917 | 594 | r = amdgpu_virt_enable_access_debugfs(adev); |
9eee152a | 595 | if (r < 0) { |
4a580877 | 596 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
95a2f917 | 597 | return r; |
9eee152a | 598 | } |
95a2f917 | 599 | |
75758255 AD |
600 | while (size) { |
601 | uint32_t value; | |
602 | ||
603 | r = get_user(value, (uint32_t *)buf); | |
edadd6fc AA |
604 | if (r) |
605 | goto out; | |
75758255 | 606 | |
5eb8094a MG |
607 | if (upper_32_bits(*pos)) |
608 | WREG32_PCIE_EXT(*pos, value); | |
609 | else | |
610 | WREG32_PCIE(*pos, value); | |
75758255 AD |
611 | |
612 | result += 4; | |
613 | buf += 4; | |
614 | *pos += 4; | |
615 | size -= 4; | |
616 | } | |
617 | ||
edadd6fc AA |
618 | r = result; |
619 | out: | |
4a580877 LT |
620 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); |
621 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
95a2f917 | 622 | amdgpu_virt_disable_access_debugfs(adev); |
edadd6fc | 623 | return r; |
75758255 AD |
624 | } |
625 | ||
7e4237db TSD |
626 | /** |
627 | * amdgpu_debugfs_regs_didt_read - Read from a DIDT register | |
628 | * | |
629 | * @f: open file handle | |
630 | * @buf: User buffer to store read data in | |
631 | * @size: Number of bytes to read | |
632 | * @pos: Offset to seek to | |
633 | * | |
634 | * The lower bits are the BYTE offset of the register to read. This | |
635 | * allows reading multiple registers in a single call and having | |
636 | * the returned size reflect that. | |
637 | */ | |
75758255 AD |
638 | static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, |
639 | size_t size, loff_t *pos) | |
640 | { | |
641 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
642 | ssize_t result = 0; | |
643 | int r; | |
644 | ||
645 | if (size & 0x3 || *pos & 0x3) | |
646 | return -EINVAL; | |
647 | ||
7b194fdc LY |
648 | if (!adev->didt_rreg) |
649 | return -EOPNOTSUPP; | |
650 | ||
4a580877 | 651 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); |
9eee152a | 652 | if (r < 0) { |
4a580877 | 653 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
a9ffe2a9 | 654 | return r; |
9eee152a | 655 | } |
a9ffe2a9 | 656 | |
95a2f917 | 657 | r = amdgpu_virt_enable_access_debugfs(adev); |
9eee152a | 658 | if (r < 0) { |
4a580877 | 659 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
95a2f917 | 660 | return r; |
9eee152a | 661 | } |
95a2f917 | 662 | |
75758255 AD |
663 | while (size) { |
664 | uint32_t value; | |
665 | ||
666 | value = RREG32_DIDT(*pos >> 2); | |
667 | r = put_user(value, (uint32_t *)buf); | |
edadd6fc AA |
668 | if (r) |
669 | goto out; | |
75758255 AD |
670 | |
671 | result += 4; | |
672 | buf += 4; | |
673 | *pos += 4; | |
674 | size -= 4; | |
675 | } | |
676 | ||
edadd6fc AA |
677 | r = result; |
678 | out: | |
4a580877 LT |
679 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); |
680 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
95a2f917 | 681 | amdgpu_virt_disable_access_debugfs(adev); |
edadd6fc | 682 | return r; |
75758255 AD |
683 | } |
684 | ||
7e4237db TSD |
685 | /** |
686 | * amdgpu_debugfs_regs_didt_write - Write to a DIDT register | |
687 | * | |
688 | * @f: open file handle | |
689 | * @buf: User buffer to write data from | |
690 | * @size: Number of bytes to write | |
691 | * @pos: Offset to seek to | |
692 | * | |
693 | * The lower bits are the BYTE offset of the register to write. This | |
694 | * allows writing multiple registers in a single call and having | |
695 | * the returned size reflect that. | |
696 | */ | |
75758255 AD |
697 | static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf, |
698 | size_t size, loff_t *pos) | |
699 | { | |
700 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
701 | ssize_t result = 0; | |
702 | int r; | |
703 | ||
704 | if (size & 0x3 || *pos & 0x3) | |
705 | return -EINVAL; | |
706 | ||
7b194fdc LY |
707 | if (!adev->didt_wreg) |
708 | return -EOPNOTSUPP; | |
709 | ||
4a580877 | 710 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); |
9eee152a | 711 | if (r < 0) { |
4a580877 | 712 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
a9ffe2a9 | 713 | return r; |
9eee152a | 714 | } |
a9ffe2a9 | 715 | |
95a2f917 | 716 | r = amdgpu_virt_enable_access_debugfs(adev); |
9eee152a | 717 | if (r < 0) { |
4a580877 | 718 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
95a2f917 | 719 | return r; |
9eee152a | 720 | } |
95a2f917 | 721 | |
75758255 AD |
722 | while (size) { |
723 | uint32_t value; | |
724 | ||
725 | r = get_user(value, (uint32_t *)buf); | |
edadd6fc AA |
726 | if (r) |
727 | goto out; | |
75758255 AD |
728 | |
729 | WREG32_DIDT(*pos >> 2, value); | |
730 | ||
731 | result += 4; | |
732 | buf += 4; | |
733 | *pos += 4; | |
734 | size -= 4; | |
735 | } | |
736 | ||
edadd6fc AA |
737 | r = result; |
738 | out: | |
4a580877 LT |
739 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); |
740 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
95a2f917 | 741 | amdgpu_virt_disable_access_debugfs(adev); |
edadd6fc | 742 | return r; |
75758255 AD |
743 | } |
744 | ||
7e4237db TSD |
745 | /** |
746 | * amdgpu_debugfs_regs_smc_read - Read from a SMC register | |
747 | * | |
748 | * @f: open file handle | |
749 | * @buf: User buffer to store read data in | |
750 | * @size: Number of bytes to read | |
751 | * @pos: Offset to seek to | |
752 | * | |
753 | * The lower bits are the BYTE offset of the register to read. This | |
754 | * allows reading multiple registers in a single call and having | |
755 | * the returned size reflect that. | |
756 | */ | |
75758255 AD |
757 | static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, |
758 | size_t size, loff_t *pos) | |
759 | { | |
760 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
761 | ssize_t result = 0; | |
762 | int r; | |
763 | ||
5104fdf5 | 764 | if (!adev->smc_rreg) |
afe58346 | 765 | return -EOPNOTSUPP; |
5104fdf5 | 766 | |
75758255 AD |
767 | if (size & 0x3 || *pos & 0x3) |
768 | return -EINVAL; | |
769 | ||
4a580877 | 770 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); |
9eee152a | 771 | if (r < 0) { |
4a580877 | 772 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
a9ffe2a9 | 773 | return r; |
9eee152a | 774 | } |
a9ffe2a9 | 775 | |
95a2f917 | 776 | r = amdgpu_virt_enable_access_debugfs(adev); |
9eee152a | 777 | if (r < 0) { |
4a580877 | 778 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
95a2f917 | 779 | return r; |
9eee152a | 780 | } |
95a2f917 | 781 | |
75758255 AD |
782 | while (size) { |
783 | uint32_t value; | |
784 | ||
785 | value = RREG32_SMC(*pos); | |
786 | r = put_user(value, (uint32_t *)buf); | |
edadd6fc AA |
787 | if (r) |
788 | goto out; | |
75758255 AD |
789 | |
790 | result += 4; | |
791 | buf += 4; | |
792 | *pos += 4; | |
793 | size -= 4; | |
794 | } | |
795 | ||
edadd6fc AA |
796 | r = result; |
797 | out: | |
4a580877 LT |
798 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); |
799 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
95a2f917 | 800 | amdgpu_virt_disable_access_debugfs(adev); |
edadd6fc | 801 | return r; |
75758255 AD |
802 | } |
803 | ||
7e4237db TSD |
804 | /** |
805 | * amdgpu_debugfs_regs_smc_write - Write to a SMC register | |
806 | * | |
807 | * @f: open file handle | |
808 | * @buf: User buffer to write data from | |
809 | * @size: Number of bytes to write | |
810 | * @pos: Offset to seek to | |
811 | * | |
812 | * The lower bits are the BYTE offset of the register to write. This | |
813 | * allows writing multiple registers in a single call and having | |
814 | * the returned size reflect that. | |
815 | */ | |
75758255 AD |
816 | static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf, |
817 | size_t size, loff_t *pos) | |
818 | { | |
819 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
820 | ssize_t result = 0; | |
821 | int r; | |
822 | ||
5104fdf5 | 823 | if (!adev->smc_wreg) |
afe58346 | 824 | return -EOPNOTSUPP; |
5104fdf5 | 825 | |
75758255 AD |
826 | if (size & 0x3 || *pos & 0x3) |
827 | return -EINVAL; | |
828 | ||
4a580877 | 829 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); |
9eee152a | 830 | if (r < 0) { |
4a580877 | 831 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
a9ffe2a9 | 832 | return r; |
9eee152a | 833 | } |
a9ffe2a9 | 834 | |
95a2f917 | 835 | r = amdgpu_virt_enable_access_debugfs(adev); |
9eee152a | 836 | if (r < 0) { |
4a580877 | 837 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
95a2f917 | 838 | return r; |
9eee152a | 839 | } |
95a2f917 | 840 | |
75758255 AD |
841 | while (size) { |
842 | uint32_t value; | |
843 | ||
844 | r = get_user(value, (uint32_t *)buf); | |
edadd6fc AA |
845 | if (r) |
846 | goto out; | |
75758255 AD |
847 | |
848 | WREG32_SMC(*pos, value); | |
849 | ||
850 | result += 4; | |
851 | buf += 4; | |
852 | *pos += 4; | |
853 | size -= 4; | |
854 | } | |
855 | ||
edadd6fc AA |
856 | r = result; |
857 | out: | |
4a580877 LT |
858 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); |
859 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
95a2f917 | 860 | amdgpu_virt_disable_access_debugfs(adev); |
edadd6fc | 861 | return r; |
75758255 AD |
862 | } |
863 | ||
7e4237db TSD |
864 | /** |
865 | * amdgpu_debugfs_gca_config_read - Read from gfx config data | |
866 | * | |
867 | * @f: open file handle | |
868 | * @buf: User buffer to store read data in | |
869 | * @size: Number of bytes to read | |
870 | * @pos: Offset to seek to | |
871 | * | |
872 | * This file is used to access configuration data in a somewhat | |
873 | * stable fashion. The format is a series of DWORDs with the first | |
874 | * indicating which revision it is. New content is appended to the | |
875 | * end so that older software can still read the data. | |
876 | */ | |
877 | ||
75758255 AD |
878 | static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, |
879 | size_t size, loff_t *pos) | |
880 | { | |
881 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
882 | ssize_t result = 0; | |
883 | int r; | |
884 | uint32_t *config, no_regs = 0; | |
885 | ||
886 | if (size & 0x3 || *pos & 0x3) | |
887 | return -EINVAL; | |
888 | ||
889 | config = kmalloc_array(256, sizeof(*config), GFP_KERNEL); | |
890 | if (!config) | |
891 | return -ENOMEM; | |
892 | ||
893 | /* version, increment each time something is added */ | |
dc2947b3 | 894 | config[no_regs++] = 5; |
75758255 AD |
895 | config[no_regs++] = adev->gfx.config.max_shader_engines; |
896 | config[no_regs++] = adev->gfx.config.max_tile_pipes; | |
897 | config[no_regs++] = adev->gfx.config.max_cu_per_sh; | |
898 | config[no_regs++] = adev->gfx.config.max_sh_per_se; | |
899 | config[no_regs++] = adev->gfx.config.max_backends_per_se; | |
900 | config[no_regs++] = adev->gfx.config.max_texture_channel_caches; | |
901 | config[no_regs++] = adev->gfx.config.max_gprs; | |
902 | config[no_regs++] = adev->gfx.config.max_gs_threads; | |
903 | config[no_regs++] = adev->gfx.config.max_hw_contexts; | |
904 | config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend; | |
905 | config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend; | |
906 | config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size; | |
907 | config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size; | |
908 | config[no_regs++] = adev->gfx.config.num_tile_pipes; | |
909 | config[no_regs++] = adev->gfx.config.backend_enable_mask; | |
910 | config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes; | |
911 | config[no_regs++] = adev->gfx.config.mem_row_size_in_kb; | |
912 | config[no_regs++] = adev->gfx.config.shader_engine_tile_size; | |
913 | config[no_regs++] = adev->gfx.config.num_gpus; | |
914 | config[no_regs++] = adev->gfx.config.multi_gpu_tile_size; | |
915 | config[no_regs++] = adev->gfx.config.mc_arb_ramcfg; | |
916 | config[no_regs++] = adev->gfx.config.gb_addr_config; | |
917 | config[no_regs++] = adev->gfx.config.num_rbs; | |
918 | ||
919 | /* rev==1 */ | |
920 | config[no_regs++] = adev->rev_id; | |
dc2947b3 | 921 | config[no_regs++] = lower_32_bits(adev->pg_flags); |
25faeddc | 922 | config[no_regs++] = lower_32_bits(adev->cg_flags); |
75758255 AD |
923 | |
924 | /* rev==2 */ | |
925 | config[no_regs++] = adev->family; | |
926 | config[no_regs++] = adev->external_rev_id; | |
927 | ||
928 | /* rev==3 */ | |
929 | config[no_regs++] = adev->pdev->device; | |
930 | config[no_regs++] = adev->pdev->revision; | |
931 | config[no_regs++] = adev->pdev->subsystem_device; | |
932 | config[no_regs++] = adev->pdev->subsystem_vendor; | |
933 | ||
8f74f68d TSD |
934 | /* rev==4 APU flag */ |
935 | config[no_regs++] = adev->flags & AMD_IS_APU ? 1 : 0; | |
936 | ||
dc2947b3 TSD |
937 | /* rev==5 PG/CG flag upper 32bit */ |
938 | config[no_regs++] = upper_32_bits(adev->pg_flags); | |
25faeddc EQ |
939 | config[no_regs++] = upper_32_bits(adev->cg_flags); |
940 | ||
75758255 AD |
941 | while (size && (*pos < no_regs * 4)) { |
942 | uint32_t value; | |
943 | ||
944 | value = config[*pos >> 2]; | |
945 | r = put_user(value, (uint32_t *)buf); | |
946 | if (r) { | |
947 | kfree(config); | |
948 | return r; | |
949 | } | |
950 | ||
951 | result += 4; | |
952 | buf += 4; | |
953 | *pos += 4; | |
954 | size -= 4; | |
955 | } | |
956 | ||
957 | kfree(config); | |
958 | return result; | |
959 | } | |
960 | ||
7e4237db TSD |
961 | /** |
962 | * amdgpu_debugfs_sensor_read - Read from the powerplay sensors | |
963 | * | |
964 | * @f: open file handle | |
965 | * @buf: User buffer to store read data in | |
966 | * @size: Number of bytes to read | |
967 | * @pos: Offset to seek to | |
968 | * | |
969 | * The offset is treated as the BYTE address of one of the sensors | |
970 | * enumerated in amd/include/kgd_pp_interface.h under the | |
971 | * 'amd_pp_sensors' enumeration. For instance to read the UVD VCLK | |
972 | * you would use the offset 3 * 4 = 12. | |
973 | */ | |
75758255 AD |
974 | static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, |
975 | size_t size, loff_t *pos) | |
976 | { | |
977 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
978 | int idx, x, outsize, r, valuesize; | |
979 | uint32_t values[16]; | |
980 | ||
981 | if (size & 3 || *pos & 0x3) | |
982 | return -EINVAL; | |
983 | ||
b13aa109 | 984 | if (!adev->pm.dpm_enabled) |
75758255 AD |
985 | return -EINVAL; |
986 | ||
987 | /* convert offset to sensor number */ | |
988 | idx = *pos >> 2; | |
989 | ||
990 | valuesize = sizeof(values); | |
a9ffe2a9 | 991 | |
4a580877 | 992 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); |
9eee152a | 993 | if (r < 0) { |
4a580877 | 994 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
a9ffe2a9 | 995 | return r; |
9eee152a | 996 | } |
a9ffe2a9 | 997 | |
95a2f917 | 998 | r = amdgpu_virt_enable_access_debugfs(adev); |
9eee152a | 999 | if (r < 0) { |
4a580877 | 1000 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
95a2f917 | 1001 | return r; |
9eee152a | 1002 | } |
95a2f917 | 1003 | |
4a5a2de6 | 1004 | r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); |
a9ffe2a9 | 1005 | |
4a580877 LT |
1006 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); |
1007 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
a9ffe2a9 | 1008 | |
95a2f917 YT |
1009 | if (r) { |
1010 | amdgpu_virt_disable_access_debugfs(adev); | |
4a5a2de6 | 1011 | return r; |
95a2f917 | 1012 | } |
75758255 | 1013 | |
95a2f917 YT |
1014 | if (size > valuesize) { |
1015 | amdgpu_virt_disable_access_debugfs(adev); | |
75758255 | 1016 | return -EINVAL; |
95a2f917 | 1017 | } |
75758255 AD |
1018 | |
1019 | outsize = 0; | |
1020 | x = 0; | |
1021 | if (!r) { | |
1022 | while (size) { | |
1023 | r = put_user(values[x++], (int32_t *)buf); | |
1024 | buf += 4; | |
1025 | size -= 4; | |
1026 | outsize += 4; | |
1027 | } | |
1028 | } | |
1029 | ||
95a2f917 | 1030 | amdgpu_virt_disable_access_debugfs(adev); |
75758255 AD |
1031 | return !r ? outsize : r; |
1032 | } | |
1033 | ||
7e4237db TSD |
1034 | /** amdgpu_debugfs_wave_read - Read WAVE STATUS data |
1035 | * | |
1036 | * @f: open file handle | |
1037 | * @buf: User buffer to store read data in | |
1038 | * @size: Number of bytes to read | |
1039 | * @pos: Offset to seek to | |
1040 | * | |
1041 | * The offset being sought changes which wave that the status data | |
1042 | * will be returned for. The bits are used as follows: | |
1043 | * | |
8fa76350 | 1044 | * Bits 0..6: Byte offset into data |
7e4237db TSD |
1045 | * Bits 7..14: SE selector |
1046 | * Bits 15..22: SH/SA selector | |
1047 | * Bits 23..30: CU/{WGP+SIMD} selector | |
1048 | * Bits 31..36: WAVE ID selector | |
1049 | * Bits 37..44: SIMD ID selector | |
1050 | * | |
1051 | * The returned data begins with one DWORD of version information | |
1052 | * Followed by WAVE STATUS registers relevant to the GFX IP version | |
1053 | * being used. See gfx_v8_0_read_wave_data() for an example output. | |
1054 | */ | |
75758255 AD |
1055 | static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, |
1056 | size_t size, loff_t *pos) | |
1057 | { | |
1058 | struct amdgpu_device *adev = f->f_inode->i_private; | |
1059 | int r, x; | |
f3729f7b | 1060 | ssize_t result = 0; |
75758255 AD |
1061 | uint32_t offset, se, sh, cu, wave, simd, data[32]; |
1062 | ||
1063 | if (size & 3 || *pos & 3) | |
1064 | return -EINVAL; | |
1065 | ||
1066 | /* decode offset */ | |
1067 | offset = (*pos & GENMASK_ULL(6, 0)); | |
1068 | se = (*pos & GENMASK_ULL(14, 7)) >> 7; | |
1069 | sh = (*pos & GENMASK_ULL(22, 15)) >> 15; | |
1070 | cu = (*pos & GENMASK_ULL(30, 23)) >> 23; | |
1071 | wave = (*pos & GENMASK_ULL(36, 31)) >> 31; | |
1072 | simd = (*pos & GENMASK_ULL(44, 37)) >> 37; | |
1073 | ||
4a580877 | 1074 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); |
9eee152a | 1075 | if (r < 0) { |
4a580877 | 1076 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
a9ffe2a9 | 1077 | return r; |
9eee152a | 1078 | } |
a9ffe2a9 | 1079 | |
95a2f917 | 1080 | r = amdgpu_virt_enable_access_debugfs(adev); |
9eee152a | 1081 | if (r < 0) { |
4a580877 | 1082 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
95a2f917 | 1083 | return r; |
9eee152a | 1084 | } |
95a2f917 | 1085 | |
75758255 AD |
1086 | /* switch to the specific se/sh/cu */ |
1087 | mutex_lock(&adev->grbm_idx_mutex); | |
d51ac6d0 | 1088 | amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0); |
75758255 AD |
1089 | |
1090 | x = 0; | |
1091 | if (adev->gfx.funcs->read_wave_data) | |
553f973a | 1092 | adev->gfx.funcs->read_wave_data(adev, 0, simd, wave, data, &x); |
75758255 | 1093 | |
d51ac6d0 | 1094 | amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); |
75758255 AD |
1095 | mutex_unlock(&adev->grbm_idx_mutex); |
1096 | ||
4a580877 LT |
1097 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); |
1098 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
a9ffe2a9 | 1099 | |
95a2f917 YT |
1100 | if (!x) { |
1101 | amdgpu_virt_disable_access_debugfs(adev); | |
75758255 | 1102 | return -EINVAL; |
95a2f917 | 1103 | } |
75758255 AD |
1104 | |
1105 | while (size && (offset < x * 4)) { | |
1106 | uint32_t value; | |
1107 | ||
1108 | value = data[offset >> 2]; | |
1109 | r = put_user(value, (uint32_t *)buf); | |
95a2f917 YT |
1110 | if (r) { |
1111 | amdgpu_virt_disable_access_debugfs(adev); | |
75758255 | 1112 | return r; |
95a2f917 | 1113 | } |
75758255 AD |
1114 | |
1115 | result += 4; | |
1116 | buf += 4; | |
1117 | offset += 4; | |
1118 | size -= 4; | |
1119 | } | |
1120 | ||
95a2f917 | 1121 | amdgpu_virt_disable_access_debugfs(adev); |
75758255 AD |
1122 | return result; |
1123 | } | |
1124 | ||
7e4237db TSD |
1125 | /** amdgpu_debugfs_gpr_read - Read wave gprs |
1126 | * | |
1127 | * @f: open file handle | |
1128 | * @buf: User buffer to store read data in | |
1129 | * @size: Number of bytes to read | |
1130 | * @pos: Offset to seek to | |
1131 | * | |
1132 | * The offset being sought changes which wave that the status data | |
1133 | * will be returned for. The bits are used as follows: | |
1134 | * | |
1135 | * Bits 0..11: Byte offset into data | |
1136 | * Bits 12..19: SE selector | |
1137 | * Bits 20..27: SH/SA selector | |
1138 | * Bits 28..35: CU/{WGP+SIMD} selector | |
1139 | * Bits 36..43: WAVE ID selector | |
1140 | * Bits 37..44: SIMD ID selector | |
1141 | * Bits 52..59: Thread selector | |
1142 | * Bits 60..61: Bank selector (VGPR=0,SGPR=1) | |
1143 | * | |
1144 | * The return data comes from the SGPR or VGPR register bank for | |
1145 | * the selected operational unit. | |
1146 | */ | |
75758255 AD |
1147 | static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, |
1148 | size_t size, loff_t *pos) | |
1149 | { | |
1150 | struct amdgpu_device *adev = f->f_inode->i_private; | |
1151 | int r; | |
1152 | ssize_t result = 0; | |
1153 | uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; | |
1154 | ||
6397ec58 | 1155 | if (size > 4096 || size & 3 || *pos & 3) |
75758255 AD |
1156 | return -EINVAL; |
1157 | ||
1158 | /* decode offset */ | |
6397ec58 | 1159 | offset = (*pos & GENMASK_ULL(11, 0)) >> 2; |
75758255 AD |
1160 | se = (*pos & GENMASK_ULL(19, 12)) >> 12; |
1161 | sh = (*pos & GENMASK_ULL(27, 20)) >> 20; | |
1162 | cu = (*pos & GENMASK_ULL(35, 28)) >> 28; | |
1163 | wave = (*pos & GENMASK_ULL(43, 36)) >> 36; | |
1164 | simd = (*pos & GENMASK_ULL(51, 44)) >> 44; | |
1165 | thread = (*pos & GENMASK_ULL(59, 52)) >> 52; | |
1166 | bank = (*pos & GENMASK_ULL(61, 60)) >> 60; | |
1167 | ||
929e571c | 1168 | data = kcalloc(1024, sizeof(*data), GFP_KERNEL); |
75758255 AD |
1169 | if (!data) |
1170 | return -ENOMEM; | |
1171 | ||
4a580877 | 1172 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); |
a9ffe2a9 | 1173 | if (r < 0) |
3e4aeff3 | 1174 | goto err; |
a9ffe2a9 | 1175 | |
95a2f917 YT |
1176 | r = amdgpu_virt_enable_access_debugfs(adev); |
1177 | if (r < 0) | |
888e32d7 | 1178 | goto err; |
95a2f917 | 1179 | |
75758255 AD |
1180 | /* switch to the specific se/sh/cu */ |
1181 | mutex_lock(&adev->grbm_idx_mutex); | |
d51ac6d0 | 1182 | amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0); |
75758255 AD |
1183 | |
1184 | if (bank == 0) { | |
1185 | if (adev->gfx.funcs->read_wave_vgprs) | |
553f973a | 1186 | adev->gfx.funcs->read_wave_vgprs(adev, 0, simd, wave, thread, offset, size>>2, data); |
75758255 AD |
1187 | } else { |
1188 | if (adev->gfx.funcs->read_wave_sgprs) | |
553f973a | 1189 | adev->gfx.funcs->read_wave_sgprs(adev, 0, simd, wave, offset, size>>2, data); |
75758255 AD |
1190 | } |
1191 | ||
d51ac6d0 | 1192 | amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); |
75758255 AD |
1193 | mutex_unlock(&adev->grbm_idx_mutex); |
1194 | ||
4a580877 LT |
1195 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); |
1196 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
a9ffe2a9 | 1197 | |
75758255 AD |
1198 | while (size) { |
1199 | uint32_t value; | |
1200 | ||
6397ec58 | 1201 | value = data[result >> 2]; |
75758255 AD |
1202 | r = put_user(value, (uint32_t *)buf); |
1203 | if (r) { | |
3e4aeff3 | 1204 | amdgpu_virt_disable_access_debugfs(adev); |
75758255 AD |
1205 | goto err; |
1206 | } | |
1207 | ||
1208 | result += 4; | |
1209 | buf += 4; | |
1210 | size -= 4; | |
1211 | } | |
1212 | ||
75758255 | 1213 | kfree(data); |
95a2f917 | 1214 | amdgpu_virt_disable_access_debugfs(adev); |
75758255 | 1215 | return result; |
3e4aeff3 CT |
1216 | |
1217 | err: | |
4a580877 | 1218 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
3e4aeff3 CT |
1219 | kfree(data); |
1220 | return r; | |
75758255 AD |
1221 | } |
1222 | ||
0ad7347a AA |
1223 | /** |
1224 | * amdgpu_debugfs_gfxoff_residency_read - Read GFXOFF residency | |
1225 | * | |
1226 | * @f: open file handle | |
1227 | * @buf: User buffer to store read data in | |
1228 | * @size: Number of bytes to read | |
1229 | * @pos: Offset to seek to | |
1230 | * | |
1231 | * Read the last residency value logged. It doesn't auto update, one needs to | |
1232 | * stop logging before getting the current value. | |
1233 | */ | |
1234 | static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user *buf, | |
1235 | size_t size, loff_t *pos) | |
1236 | { | |
1237 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
1238 | ssize_t result = 0; | |
1239 | int r; | |
1240 | ||
1241 | if (size & 0x3 || *pos & 0x3) | |
1242 | return -EINVAL; | |
1243 | ||
1244 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); | |
1245 | if (r < 0) { | |
1246 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
1247 | return r; | |
1248 | } | |
1249 | ||
1250 | while (size) { | |
1251 | uint32_t value; | |
1252 | ||
1253 | r = amdgpu_get_gfx_off_residency(adev, &value); | |
1254 | if (r) | |
1255 | goto out; | |
1256 | ||
1257 | r = put_user(value, (uint32_t *)buf); | |
1258 | if (r) | |
1259 | goto out; | |
1260 | ||
1261 | result += 4; | |
1262 | buf += 4; | |
1263 | *pos += 4; | |
1264 | size -= 4; | |
1265 | } | |
1266 | ||
1267 | r = result; | |
1268 | out: | |
1269 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); | |
1270 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
1271 | ||
1272 | return r; | |
1273 | } | |
1274 | ||
1275 | /** | |
1276 | * amdgpu_debugfs_gfxoff_residency_write - Log GFXOFF Residency | |
1277 | * | |
1278 | * @f: open file handle | |
1279 | * @buf: User buffer to write data from | |
1280 | * @size: Number of bytes to write | |
1281 | * @pos: Offset to seek to | |
1282 | * | |
1283 | * Write a 32-bit non-zero to start logging; write a 32-bit zero to stop | |
1284 | */ | |
1285 | static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char __user *buf, | |
1286 | size_t size, loff_t *pos) | |
1287 | { | |
1288 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
1289 | ssize_t result = 0; | |
1290 | int r; | |
1291 | ||
1292 | if (size & 0x3 || *pos & 0x3) | |
1293 | return -EINVAL; | |
1294 | ||
1295 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); | |
1296 | if (r < 0) { | |
1297 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
1298 | return r; | |
1299 | } | |
1300 | ||
1301 | while (size) { | |
1302 | u32 value; | |
1303 | ||
1304 | r = get_user(value, (uint32_t *)buf); | |
1305 | if (r) | |
1306 | goto out; | |
1307 | ||
1308 | amdgpu_set_gfx_off_residency(adev, value ? true : false); | |
1309 | ||
1310 | result += 4; | |
1311 | buf += 4; | |
1312 | *pos += 4; | |
1313 | size -= 4; | |
1314 | } | |
1315 | ||
1316 | r = result; | |
1317 | out: | |
1318 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); | |
1319 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
1320 | ||
1321 | return r; | |
1322 | } | |
1323 | ||
1324 | ||
1325 | /** | |
1326 | * amdgpu_debugfs_gfxoff_count_read - Read GFXOFF entry count | |
1327 | * | |
1328 | * @f: open file handle | |
1329 | * @buf: User buffer to store read data in | |
1330 | * @size: Number of bytes to read | |
1331 | * @pos: Offset to seek to | |
1332 | */ | |
1333 | static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf, | |
1334 | size_t size, loff_t *pos) | |
1335 | { | |
1336 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
1337 | ssize_t result = 0; | |
1338 | int r; | |
1339 | ||
1340 | if (size & 0x3 || *pos & 0x3) | |
1341 | return -EINVAL; | |
1342 | ||
1343 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); | |
1344 | if (r < 0) { | |
1345 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
1346 | return r; | |
1347 | } | |
1348 | ||
1349 | while (size) { | |
1350 | u64 value = 0; | |
1351 | ||
1352 | r = amdgpu_get_gfx_off_entrycount(adev, &value); | |
1353 | if (r) | |
1354 | goto out; | |
1355 | ||
1356 | r = put_user(value, (u64 *)buf); | |
1357 | if (r) | |
1358 | goto out; | |
1359 | ||
1360 | result += 4; | |
1361 | buf += 4; | |
1362 | *pos += 4; | |
1363 | size -= 4; | |
1364 | } | |
1365 | ||
1366 | r = result; | |
1367 | out: | |
1368 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); | |
1369 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
1370 | ||
1371 | return r; | |
1372 | } | |
1373 | ||
669e2f91 | 1374 | /** |
e72d4a8b | 1375 | * amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF |
669e2f91 TSD |
1376 | * |
1377 | * @f: open file handle | |
1378 | * @buf: User buffer to write data from | |
1379 | * @size: Number of bytes to write | |
1380 | * @pos: Offset to seek to | |
1381 | * | |
1382 | * Write a 32-bit zero to disable or a 32-bit non-zero to enable | |
1383 | */ | |
1384 | static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf, | |
1385 | size_t size, loff_t *pos) | |
1386 | { | |
1387 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
1388 | ssize_t result = 0; | |
1389 | int r; | |
1390 | ||
1391 | if (size & 0x3 || *pos & 0x3) | |
1392 | return -EINVAL; | |
1393 | ||
4a580877 | 1394 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); |
9eee152a | 1395 | if (r < 0) { |
4a580877 | 1396 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
669e2f91 | 1397 | return r; |
9eee152a | 1398 | } |
669e2f91 TSD |
1399 | |
1400 | while (size) { | |
1401 | uint32_t value; | |
1402 | ||
1403 | r = get_user(value, (uint32_t *)buf); | |
edadd6fc AA |
1404 | if (r) |
1405 | goto out; | |
669e2f91 TSD |
1406 | |
1407 | amdgpu_gfx_off_ctrl(adev, value ? true : false); | |
1408 | ||
1409 | result += 4; | |
1410 | buf += 4; | |
1411 | *pos += 4; | |
1412 | size -= 4; | |
1413 | } | |
1414 | ||
edadd6fc AA |
1415 | r = result; |
1416 | out: | |
4a580877 LT |
1417 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); |
1418 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
669e2f91 | 1419 | |
edadd6fc | 1420 | return r; |
669e2f91 TSD |
1421 | } |
1422 | ||
1423 | ||
443c7f3c | 1424 | /** |
e72d4a8b | 1425 | * amdgpu_debugfs_gfxoff_read - read gfxoff status |
443c7f3c JS |
1426 | * |
1427 | * @f: open file handle | |
1428 | * @buf: User buffer to store read data in | |
1429 | * @size: Number of bytes to read | |
1430 | * @pos: Offset to seek to | |
1431 | */ | |
1432 | static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, | |
1433 | size_t size, loff_t *pos) | |
1434 | { | |
1435 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
1436 | ssize_t result = 0; | |
1437 | int r; | |
1438 | ||
1439 | if (size & 0x3 || *pos & 0x3) | |
1440 | return -EINVAL; | |
1441 | ||
4a580877 | 1442 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); |
4bd8dd0d YL |
1443 | if (r < 0) { |
1444 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
443c7f3c | 1445 | return r; |
4bd8dd0d | 1446 | } |
443c7f3c JS |
1447 | |
1448 | while (size) { | |
4686177f AA |
1449 | u32 value = adev->gfx.gfx_off_state; |
1450 | ||
1451 | r = put_user(value, (u32 *)buf); | |
1452 | if (r) | |
1453 | goto out; | |
1454 | ||
1455 | result += 4; | |
1456 | buf += 4; | |
1457 | *pos += 4; | |
1458 | size -= 4; | |
1459 | } | |
1460 | ||
1461 | r = result; | |
1462 | out: | |
1463 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); | |
1464 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
1465 | ||
1466 | return r; | |
1467 | } | |
1468 | ||
1469 | static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *buf, | |
1470 | size_t size, loff_t *pos) | |
1471 | { | |
1472 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
1473 | ssize_t result = 0; | |
1474 | int r; | |
1475 | ||
1476 | if (size & 0x3 || *pos & 0x3) | |
1477 | return -EINVAL; | |
1478 | ||
1479 | r = pm_runtime_get_sync(adev_to_drm(adev)->dev); | |
1480 | if (r < 0) { | |
1481 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
1482 | return r; | |
1483 | } | |
1484 | ||
1485 | while (size) { | |
1486 | u32 value; | |
443c7f3c JS |
1487 | |
1488 | r = amdgpu_get_gfx_off_status(adev, &value); | |
edadd6fc AA |
1489 | if (r) |
1490 | goto out; | |
443c7f3c | 1491 | |
4686177f | 1492 | r = put_user(value, (u32 *)buf); |
edadd6fc AA |
1493 | if (r) |
1494 | goto out; | |
443c7f3c JS |
1495 | |
1496 | result += 4; | |
1497 | buf += 4; | |
1498 | *pos += 4; | |
1499 | size -= 4; | |
1500 | } | |
1501 | ||
edadd6fc AA |
1502 | r = result; |
1503 | out: | |
4a580877 LT |
1504 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); |
1505 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
443c7f3c | 1506 | |
edadd6fc | 1507 | return r; |
443c7f3c JS |
1508 | } |
1509 | ||
37df9560 TSD |
1510 | static const struct file_operations amdgpu_debugfs_regs2_fops = { |
1511 | .owner = THIS_MODULE, | |
1512 | .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl, | |
1513 | .read = amdgpu_debugfs_regs2_read, | |
1514 | .write = amdgpu_debugfs_regs2_write, | |
1515 | .open = amdgpu_debugfs_regs2_open, | |
1516 | .release = amdgpu_debugfs_regs2_release, | |
1517 | .llseek = default_llseek | |
1518 | }; | |
1519 | ||
553f973a TSD |
1520 | static const struct file_operations amdgpu_debugfs_gprwave_fops = { |
1521 | .owner = THIS_MODULE, | |
1522 | .unlocked_ioctl = amdgpu_debugfs_gprwave_ioctl, | |
1523 | .read = amdgpu_debugfs_gprwave_read, | |
1524 | .open = amdgpu_debugfs_gprwave_open, | |
1525 | .release = amdgpu_debugfs_gprwave_release, | |
1526 | .llseek = default_llseek | |
1527 | }; | |
1528 | ||
75758255 AD |
1529 | static const struct file_operations amdgpu_debugfs_regs_fops = { |
1530 | .owner = THIS_MODULE, | |
1531 | .read = amdgpu_debugfs_regs_read, | |
1532 | .write = amdgpu_debugfs_regs_write, | |
1533 | .llseek = default_llseek | |
1534 | }; | |
1535 | static const struct file_operations amdgpu_debugfs_regs_didt_fops = { | |
1536 | .owner = THIS_MODULE, | |
1537 | .read = amdgpu_debugfs_regs_didt_read, | |
1538 | .write = amdgpu_debugfs_regs_didt_write, | |
1539 | .llseek = default_llseek | |
1540 | }; | |
1541 | static const struct file_operations amdgpu_debugfs_regs_pcie_fops = { | |
1542 | .owner = THIS_MODULE, | |
1543 | .read = amdgpu_debugfs_regs_pcie_read, | |
1544 | .write = amdgpu_debugfs_regs_pcie_write, | |
1545 | .llseek = default_llseek | |
1546 | }; | |
1547 | static const struct file_operations amdgpu_debugfs_regs_smc_fops = { | |
1548 | .owner = THIS_MODULE, | |
1549 | .read = amdgpu_debugfs_regs_smc_read, | |
1550 | .write = amdgpu_debugfs_regs_smc_write, | |
1551 | .llseek = default_llseek | |
1552 | }; | |
1553 | ||
1554 | static const struct file_operations amdgpu_debugfs_gca_config_fops = { | |
1555 | .owner = THIS_MODULE, | |
1556 | .read = amdgpu_debugfs_gca_config_read, | |
1557 | .llseek = default_llseek | |
1558 | }; | |
1559 | ||
1560 | static const struct file_operations amdgpu_debugfs_sensors_fops = { | |
1561 | .owner = THIS_MODULE, | |
1562 | .read = amdgpu_debugfs_sensor_read, | |
1563 | .llseek = default_llseek | |
1564 | }; | |
1565 | ||
1566 | static const struct file_operations amdgpu_debugfs_wave_fops = { | |
1567 | .owner = THIS_MODULE, | |
1568 | .read = amdgpu_debugfs_wave_read, | |
1569 | .llseek = default_llseek | |
1570 | }; | |
1571 | static const struct file_operations amdgpu_debugfs_gpr_fops = { | |
1572 | .owner = THIS_MODULE, | |
1573 | .read = amdgpu_debugfs_gpr_read, | |
1574 | .llseek = default_llseek | |
1575 | }; | |
1576 | ||
669e2f91 TSD |
1577 | static const struct file_operations amdgpu_debugfs_gfxoff_fops = { |
1578 | .owner = THIS_MODULE, | |
443c7f3c | 1579 | .read = amdgpu_debugfs_gfxoff_read, |
669e2f91 | 1580 | .write = amdgpu_debugfs_gfxoff_write, |
443c7f3c | 1581 | .llseek = default_llseek |
669e2f91 TSD |
1582 | }; |
1583 | ||
4686177f AA |
1584 | static const struct file_operations amdgpu_debugfs_gfxoff_status_fops = { |
1585 | .owner = THIS_MODULE, | |
1586 | .read = amdgpu_debugfs_gfxoff_status_read, | |
1587 | .llseek = default_llseek | |
1588 | }; | |
1589 | ||
0ad7347a AA |
1590 | static const struct file_operations amdgpu_debugfs_gfxoff_count_fops = { |
1591 | .owner = THIS_MODULE, | |
1592 | .read = amdgpu_debugfs_gfxoff_count_read, | |
1593 | .llseek = default_llseek | |
1594 | }; | |
1595 | ||
1596 | static const struct file_operations amdgpu_debugfs_gfxoff_residency_fops = { | |
1597 | .owner = THIS_MODULE, | |
1598 | .read = amdgpu_debugfs_gfxoff_residency_read, | |
1599 | .write = amdgpu_debugfs_gfxoff_residency_write, | |
1600 | .llseek = default_llseek | |
1601 | }; | |
1602 | ||
75758255 AD |
1603 | static const struct file_operations *debugfs_regs[] = { |
1604 | &amdgpu_debugfs_regs_fops, | |
37df9560 | 1605 | &amdgpu_debugfs_regs2_fops, |
553f973a | 1606 | &amdgpu_debugfs_gprwave_fops, |
75758255 AD |
1607 | &amdgpu_debugfs_regs_didt_fops, |
1608 | &amdgpu_debugfs_regs_pcie_fops, | |
1609 | &amdgpu_debugfs_regs_smc_fops, | |
1610 | &amdgpu_debugfs_gca_config_fops, | |
1611 | &amdgpu_debugfs_sensors_fops, | |
1612 | &amdgpu_debugfs_wave_fops, | |
1613 | &amdgpu_debugfs_gpr_fops, | |
669e2f91 | 1614 | &amdgpu_debugfs_gfxoff_fops, |
4686177f | 1615 | &amdgpu_debugfs_gfxoff_status_fops, |
0ad7347a AA |
1616 | &amdgpu_debugfs_gfxoff_count_fops, |
1617 | &amdgpu_debugfs_gfxoff_residency_fops, | |
75758255 AD |
1618 | }; |
1619 | ||
8fa76350 | 1620 | static const char * const debugfs_regs_names[] = { |
75758255 | 1621 | "amdgpu_regs", |
37df9560 | 1622 | "amdgpu_regs2", |
553f973a | 1623 | "amdgpu_gprwave", |
75758255 AD |
1624 | "amdgpu_regs_didt", |
1625 | "amdgpu_regs_pcie", | |
1626 | "amdgpu_regs_smc", | |
1627 | "amdgpu_gca_config", | |
1628 | "amdgpu_sensors", | |
1629 | "amdgpu_wave", | |
1630 | "amdgpu_gpr", | |
669e2f91 | 1631 | "amdgpu_gfxoff", |
4686177f | 1632 | "amdgpu_gfxoff_status", |
0ad7347a AA |
1633 | "amdgpu_gfxoff_count", |
1634 | "amdgpu_gfxoff_residency", | |
75758255 AD |
1635 | }; |
1636 | ||
7e4237db TSD |
1637 | /** |
1638 | * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide | |
8fa76350 | 1639 | * register access. |
7e4237db TSD |
1640 | * |
1641 | * @adev: The device to attach the debugfs entries to | |
1642 | */ | |
75758255 AD |
1643 | int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) |
1644 | { | |
4a580877 | 1645 | struct drm_minor *minor = adev_to_drm(adev)->primary; |
75758255 | 1646 | struct dentry *ent, *root = minor->debugfs_root; |
d344b21b | 1647 | unsigned int i; |
75758255 AD |
1648 | |
1649 | for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { | |
1650 | ent = debugfs_create_file(debugfs_regs_names[i], | |
8fa76350 | 1651 | S_IFREG | 0444, root, |
75758255 | 1652 | adev, debugfs_regs[i]); |
d344b21b | 1653 | if (!i && !IS_ERR_OR_NULL(ent)) |
75758255 | 1654 | i_size_write(ent->d_inode, adev->rmmio_size); |
75758255 AD |
1655 | } |
1656 | ||
1657 | return 0; | |
1658 | } | |
1659 | ||
98d28ac2 | 1660 | static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) |
75758255 | 1661 | { |
109b4d8c | 1662 | struct amdgpu_device *adev = m->private; |
98d28ac2 | 1663 | struct drm_device *dev = adev_to_drm(adev); |
75758255 AD |
1664 | int r = 0, i; |
1665 | ||
a9ffe2a9 | 1666 | r = pm_runtime_get_sync(dev->dev); |
9eee152a | 1667 | if (r < 0) { |
98d28ac2 | 1668 | pm_runtime_put_autosuspend(dev->dev); |
a9ffe2a9 | 1669 | return r; |
9eee152a | 1670 | } |
a9ffe2a9 | 1671 | |
a28fda31 | 1672 | /* Avoid accidently unparking the sched thread during GPU reset */ |
d0fb18b5 | 1673 | r = down_write_killable(&adev->reset_domain->sem); |
6049db43 DL |
1674 | if (r) |
1675 | return r; | |
a28fda31 | 1676 | |
75758255 AD |
1677 | /* hold on the scheduler */ |
1678 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | |
1679 | struct amdgpu_ring *ring = adev->rings[i]; | |
1680 | ||
9749c868 | 1681 | if (!amdgpu_ring_sched_ready(ring)) |
75758255 | 1682 | continue; |
35963cf2 | 1683 | drm_sched_wqueue_stop(&ring->sched); |
75758255 AD |
1684 | } |
1685 | ||
8fa76350 | 1686 | seq_puts(m, "run ib test:\n"); |
75758255 AD |
1687 | r = amdgpu_ib_ring_tests(adev); |
1688 | if (r) | |
1689 | seq_printf(m, "ib ring tests failed (%d).\n", r); | |
1690 | else | |
8fa76350 | 1691 | seq_puts(m, "ib ring tests passed.\n"); |
75758255 AD |
1692 | |
1693 | /* go on the scheduler */ | |
1694 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | |
1695 | struct amdgpu_ring *ring = adev->rings[i]; | |
1696 | ||
9749c868 | 1697 | if (!amdgpu_ring_sched_ready(ring)) |
75758255 | 1698 | continue; |
35963cf2 | 1699 | drm_sched_wqueue_start(&ring->sched); |
75758255 AD |
1700 | } |
1701 | ||
d0fb18b5 | 1702 | up_write(&adev->reset_domain->sem); |
a28fda31 | 1703 | |
a9ffe2a9 AD |
1704 | pm_runtime_mark_last_busy(dev->dev); |
1705 | pm_runtime_put_autosuspend(dev->dev); | |
1706 | ||
75758255 AD |
1707 | return 0; |
1708 | } | |
1709 | ||
98d28ac2 | 1710 | static int amdgpu_debugfs_evict_vram(void *data, u64 *val) |
75758255 | 1711 | { |
98d28ac2 ND |
1712 | struct amdgpu_device *adev = (struct amdgpu_device *)data; |
1713 | struct drm_device *dev = adev_to_drm(adev); | |
a9ffe2a9 AD |
1714 | int r; |
1715 | ||
1716 | r = pm_runtime_get_sync(dev->dev); | |
9eee152a | 1717 | if (r < 0) { |
98d28ac2 | 1718 | pm_runtime_put_autosuspend(dev->dev); |
a9ffe2a9 | 1719 | return r; |
9eee152a | 1720 | } |
75758255 | 1721 | |
58144d28 | 1722 | *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); |
a9ffe2a9 AD |
1723 | |
1724 | pm_runtime_mark_last_busy(dev->dev); | |
1725 | pm_runtime_put_autosuspend(dev->dev); | |
1726 | ||
75758255 AD |
1727 | return 0; |
1728 | } | |
1729 | ||
98d28ac2 ND |
1730 | |
1731 | static int amdgpu_debugfs_evict_gtt(void *data, u64 *val) | |
87e90c76 | 1732 | { |
98d28ac2 ND |
1733 | struct amdgpu_device *adev = (struct amdgpu_device *)data; |
1734 | struct drm_device *dev = adev_to_drm(adev); | |
a9ffe2a9 AD |
1735 | int r; |
1736 | ||
1737 | r = pm_runtime_get_sync(dev->dev); | |
9eee152a | 1738 | if (r < 0) { |
58144d28 | 1739 | pm_runtime_put_autosuspend(dev->dev); |
a9ffe2a9 | 1740 | return r; |
9eee152a | 1741 | } |
87e90c76 | 1742 | |
58144d28 | 1743 | *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT); |
a9ffe2a9 AD |
1744 | |
1745 | pm_runtime_mark_last_busy(dev->dev); | |
1746 | pm_runtime_put_autosuspend(dev->dev); | |
1747 | ||
87e90c76 CK |
1748 | return 0; |
1749 | } | |
1750 | ||
e7c47231 AD |
1751 | static int amdgpu_debugfs_benchmark(void *data, u64 val) |
1752 | { | |
1753 | struct amdgpu_device *adev = (struct amdgpu_device *)data; | |
1754 | struct drm_device *dev = adev_to_drm(adev); | |
1755 | int r; | |
1756 | ||
1757 | r = pm_runtime_get_sync(dev->dev); | |
1758 | if (r < 0) { | |
1759 | pm_runtime_put_autosuspend(dev->dev); | |
1760 | return r; | |
1761 | } | |
1762 | ||
1763 | r = amdgpu_benchmark(adev, val); | |
1764 | ||
1765 | pm_runtime_mark_last_busy(dev->dev); | |
1766 | pm_runtime_put_autosuspend(dev->dev); | |
1767 | ||
1768 | return r; | |
1769 | } | |
98d28ac2 ND |
1770 | |
1771 | static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused) | |
ff72bc40 | 1772 | { |
109b4d8c | 1773 | struct amdgpu_device *adev = m->private; |
98d28ac2 | 1774 | struct drm_device *dev = adev_to_drm(adev); |
ff72bc40 MBP |
1775 | struct drm_file *file; |
1776 | int r; | |
1777 | ||
1778 | r = mutex_lock_interruptible(&dev->filelist_mutex); | |
1779 | if (r) | |
1780 | return r; | |
1781 | ||
1782 | list_for_each_entry(file, &dev->filelist, lhead) { | |
1783 | struct amdgpu_fpriv *fpriv = file->driver_priv; | |
1784 | struct amdgpu_vm *vm = &fpriv->vm; | |
1785 | ||
1786 | seq_printf(m, "pid:%d\tProcess:%s ----------\n", | |
1787 | vm->task_info.pid, vm->task_info.process_name); | |
391629bd | 1788 | r = amdgpu_bo_reserve(vm->root.bo, true); |
ff72bc40 MBP |
1789 | if (r) |
1790 | break; | |
1791 | amdgpu_debugfs_vm_bo_info(vm, m); | |
391629bd | 1792 | amdgpu_bo_unreserve(vm->root.bo); |
ff72bc40 MBP |
1793 | } |
1794 | ||
1795 | mutex_unlock(&dev->filelist_mutex); | |
1796 | ||
1797 | return r; | |
1798 | } | |
1799 | ||
98d28ac2 ND |
1800 | DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_test_ib); |
1801 | DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_vm_info); | |
1802 | DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_vram_fops, amdgpu_debugfs_evict_vram, | |
1803 | NULL, "%lld\n"); | |
1804 | DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_gtt_fops, amdgpu_debugfs_evict_gtt, | |
1805 | NULL, "%lld\n"); | |
e7c47231 AD |
1806 | DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_benchmark_fops, NULL, amdgpu_debugfs_benchmark, |
1807 | "%lld\n"); | |
75758255 | 1808 | |
6698a3d0 JX |
1809 | static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring, |
1810 | struct dma_fence **fences) | |
1811 | { | |
1812 | struct amdgpu_fence_driver *drv = &ring->fence_drv; | |
1813 | uint32_t sync_seq, last_seq; | |
1814 | ||
1815 | last_seq = atomic_read(&ring->fence_drv.last_seq); | |
1816 | sync_seq = ring->fence_drv.sync_seq; | |
1817 | ||
1818 | last_seq &= drv->num_fences_mask; | |
1819 | sync_seq &= drv->num_fences_mask; | |
1820 | ||
1821 | do { | |
1822 | struct dma_fence *fence, **ptr; | |
1823 | ||
1824 | ++last_seq; | |
1825 | last_seq &= drv->num_fences_mask; | |
1826 | ptr = &drv->fences[last_seq]; | |
1827 | ||
1828 | fence = rcu_dereference_protected(*ptr, 1); | |
1829 | RCU_INIT_POINTER(*ptr, NULL); | |
1830 | ||
1831 | if (!fence) | |
1832 | continue; | |
1833 | ||
1834 | fences[last_seq] = fence; | |
1835 | ||
1836 | } while (last_seq != sync_seq); | |
1837 | } | |
1838 | ||
1839 | static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences, | |
1840 | int length) | |
1841 | { | |
1842 | int i; | |
1843 | struct dma_fence *fence; | |
1844 | ||
1845 | for (i = 0; i < length; i++) { | |
1846 | fence = fences[i]; | |
1847 | if (!fence) | |
1848 | continue; | |
1849 | dma_fence_signal(fence); | |
1850 | dma_fence_put(fence); | |
1851 | } | |
1852 | } | |
1853 | ||
1854 | static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) | |
1855 | { | |
1856 | struct drm_sched_job *s_job; | |
1857 | struct dma_fence *fence; | |
1858 | ||
1859 | spin_lock(&sched->job_list_lock); | |
6efa4b46 | 1860 | list_for_each_entry(s_job, &sched->pending_list, list) { |
6698a3d0 JX |
1861 | fence = sched->ops->run_job(s_job); |
1862 | dma_fence_put(fence); | |
1863 | } | |
1864 | spin_unlock(&sched->job_list_lock); | |
1865 | } | |
1866 | ||
80f8fb91 JX |
1867 | static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) |
1868 | { | |
1869 | struct amdgpu_job *job; | |
7bdb0899 | 1870 | struct drm_sched_job *s_job, *tmp; |
80f8fb91 JX |
1871 | uint32_t preempt_seq; |
1872 | struct dma_fence *fence, **ptr; | |
1873 | struct amdgpu_fence_driver *drv = &ring->fence_drv; | |
1874 | struct drm_gpu_scheduler *sched = &ring->sched; | |
7bdb0899 | 1875 | bool preempted = true; |
80f8fb91 JX |
1876 | |
1877 | if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) | |
1878 | return; | |
1879 | ||
1880 | preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2)); | |
7bdb0899 JX |
1881 | if (preempt_seq <= atomic_read(&drv->last_seq)) { |
1882 | preempted = false; | |
1883 | goto no_preempt; | |
1884 | } | |
80f8fb91 JX |
1885 | |
1886 | preempt_seq &= drv->num_fences_mask; | |
1887 | ptr = &drv->fences[preempt_seq]; | |
1888 | fence = rcu_dereference_protected(*ptr, 1); | |
1889 | ||
7bdb0899 | 1890 | no_preempt: |
80f8fb91 | 1891 | spin_lock(&sched->job_list_lock); |
6efa4b46 | 1892 | list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { |
7bdb0899 JX |
1893 | if (dma_fence_is_signaled(&s_job->s_fence->finished)) { |
1894 | /* remove job from ring_mirror_list */ | |
8935ff00 | 1895 | list_del_init(&s_job->list); |
7bdb0899 JX |
1896 | sched->ops->free_job(s_job); |
1897 | continue; | |
1898 | } | |
80f8fb91 | 1899 | job = to_amdgpu_job(s_job); |
c530b02f | 1900 | if (preempted && (&job->hw_fence) == fence) |
80f8fb91 JX |
1901 | /* mark the job as preempted */ |
1902 | job->preemption_status |= AMDGPU_IB_PREEMPTED; | |
1903 | } | |
1904 | spin_unlock(&sched->job_list_lock); | |
1905 | } | |
1906 | ||
6698a3d0 JX |
1907 | static int amdgpu_debugfs_ib_preempt(void *data, u64 val) |
1908 | { | |
cd3a8a59 | 1909 | int r, length; |
6698a3d0 | 1910 | struct amdgpu_ring *ring; |
6698a3d0 JX |
1911 | struct dma_fence **fences = NULL; |
1912 | struct amdgpu_device *adev = (struct amdgpu_device *)data; | |
1913 | ||
1914 | if (val >= AMDGPU_MAX_RINGS) | |
1915 | return -EINVAL; | |
1916 | ||
1917 | ring = adev->rings[val]; | |
1918 | ||
9749c868 MJ |
1919 | if (!amdgpu_ring_sched_ready(ring) || |
1920 | !ring->funcs->preempt_ib) | |
6698a3d0 JX |
1921 | return -EINVAL; |
1922 | ||
1923 | /* the last preemption failed */ | |
1924 | if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr)) | |
1925 | return -EBUSY; | |
1926 | ||
1927 | length = ring->fence_drv.num_fences_mask + 1; | |
1928 | fences = kcalloc(length, sizeof(void *), GFP_KERNEL); | |
1929 | if (!fences) | |
1930 | return -ENOMEM; | |
1931 | ||
a28fda31 | 1932 | /* Avoid accidently unparking the sched thread during GPU reset */ |
d0fb18b5 | 1933 | r = down_read_killable(&adev->reset_domain->sem); |
6049db43 DL |
1934 | if (r) |
1935 | goto pro_end; | |
a28fda31 | 1936 | |
6698a3d0 | 1937 | /* stop the scheduler */ |
35963cf2 | 1938 | drm_sched_wqueue_stop(&ring->sched); |
6698a3d0 | 1939 | |
6698a3d0 JX |
1940 | /* preempt the IB */ |
1941 | r = amdgpu_ring_preempt_ib(ring); | |
1942 | if (r) { | |
1943 | DRM_WARN("failed to preempt ring %d\n", ring->idx); | |
1944 | goto failure; | |
1945 | } | |
1946 | ||
1947 | amdgpu_fence_process(ring); | |
1948 | ||
1949 | if (atomic_read(&ring->fence_drv.last_seq) != | |
1950 | ring->fence_drv.sync_seq) { | |
1951 | DRM_INFO("ring %d was preempted\n", ring->idx); | |
1952 | ||
80f8fb91 JX |
1953 | amdgpu_ib_preempt_mark_partial_job(ring); |
1954 | ||
6698a3d0 JX |
1955 | /* swap out the old fences */ |
1956 | amdgpu_ib_preempt_fences_swap(ring, fences); | |
1957 | ||
1958 | amdgpu_fence_driver_force_completion(ring); | |
1959 | ||
6698a3d0 JX |
1960 | /* resubmit unfinished jobs */ |
1961 | amdgpu_ib_preempt_job_recovery(&ring->sched); | |
1962 | ||
1963 | /* wait for jobs finished */ | |
1964 | amdgpu_fence_wait_empty(ring); | |
1965 | ||
1966 | /* signal the old fences */ | |
1967 | amdgpu_ib_preempt_signal_fences(fences, length); | |
1968 | } | |
1969 | ||
1970 | failure: | |
1971 | /* restart the scheduler */ | |
35963cf2 | 1972 | drm_sched_wqueue_start(&ring->sched); |
6698a3d0 | 1973 | |
d0fb18b5 | 1974 | up_read(&adev->reset_domain->sem); |
a28fda31 | 1975 | |
6049db43 | 1976 | pro_end: |
20323246 | 1977 | kfree(fences); |
6698a3d0 | 1978 | |
6049db43 | 1979 | return r; |
6698a3d0 JX |
1980 | } |
1981 | ||
0cf64555 CG |
1982 | static int amdgpu_debugfs_sclk_set(void *data, u64 val) |
1983 | { | |
1984 | int ret = 0; | |
1985 | uint32_t max_freq, min_freq; | |
1986 | struct amdgpu_device *adev = (struct amdgpu_device *)data; | |
1987 | ||
1988 | if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) | |
1989 | return -EINVAL; | |
1990 | ||
4a580877 | 1991 | ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); |
9eee152a | 1992 | if (ret < 0) { |
4a580877 | 1993 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); |
0cf64555 | 1994 | return ret; |
9eee152a | 1995 | } |
0cf64555 | 1996 | |
bc143d8b EQ |
1997 | ret = amdgpu_dpm_get_dpm_freq_range(adev, PP_SCLK, &min_freq, &max_freq); |
1998 | if (ret == -EOPNOTSUPP) { | |
1999 | ret = 0; | |
2000 | goto out; | |
0cf64555 | 2001 | } |
bc143d8b EQ |
2002 | if (ret || val > max_freq || val < min_freq) { |
2003 | ret = -EINVAL; | |
2004 | goto out; | |
2005 | } | |
2006 | ||
2007 | ret = amdgpu_dpm_set_soft_freq_range(adev, PP_SCLK, (uint32_t)val, (uint32_t)val); | |
2008 | if (ret) | |
2009 | ret = -EINVAL; | |
0cf64555 | 2010 | |
bc143d8b | 2011 | out: |
4a580877 LT |
2012 | pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); |
2013 | pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); | |
0cf64555 | 2014 | |
bc143d8b | 2015 | return ret; |
0cf64555 CG |
2016 | } |
2017 | ||
7271a5c2 | 2018 | DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL, |
6698a3d0 JX |
2019 | amdgpu_debugfs_ib_preempt, "%llu\n"); |
2020 | ||
7271a5c2 | 2021 | DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL, |
0cf64555 CG |
2022 | amdgpu_debugfs_sclk_set, "%llu\n"); |
2023 | ||
5ce5a584 SA |
2024 | static ssize_t amdgpu_reset_dump_register_list_read(struct file *f, |
2025 | char __user *buf, size_t size, loff_t *pos) | |
2026 | { | |
2027 | struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; | |
2028 | char reg_offset[12]; | |
2029 | int i, ret, len = 0; | |
2030 | ||
2031 | if (*pos) | |
2032 | return 0; | |
2033 | ||
2034 | memset(reg_offset, 0, 12); | |
38a15ad9 | 2035 | ret = down_read_killable(&adev->reset_domain->sem); |
5ce5a584 SA |
2036 | if (ret) |
2037 | return ret; | |
2038 | ||
2d6a2a28 AA |
2039 | for (i = 0; i < adev->reset_info.num_regs; i++) { |
2040 | sprintf(reg_offset, "0x%x\n", adev->reset_info.reset_dump_reg_list[i]); | |
38a15ad9 | 2041 | up_read(&adev->reset_domain->sem); |
5ce5a584 SA |
2042 | if (copy_to_user(buf + len, reg_offset, strlen(reg_offset))) |
2043 | return -EFAULT; | |
2044 | ||
2045 | len += strlen(reg_offset); | |
38a15ad9 | 2046 | ret = down_read_killable(&adev->reset_domain->sem); |
5ce5a584 SA |
2047 | if (ret) |
2048 | return ret; | |
2049 | } | |
2050 | ||
38a15ad9 | 2051 | up_read(&adev->reset_domain->sem); |
5ce5a584 SA |
2052 | *pos += len; |
2053 | ||
2054 | return len; | |
2055 | } | |
2056 | ||
2057 | static ssize_t amdgpu_reset_dump_register_list_write(struct file *f, | |
2058 | const char __user *buf, size_t size, loff_t *pos) | |
2059 | { | |
2060 | struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; | |
2061 | char reg_offset[11]; | |
ad2feebd | 2062 | uint32_t *new = NULL, *tmp = NULL; |
5ce5a584 SA |
2063 | int ret, i = 0, len = 0; |
2064 | ||
2065 | do { | |
2066 | memset(reg_offset, 0, 11); | |
2067 | if (copy_from_user(reg_offset, buf + len, | |
2068 | min(10, ((int)size-len)))) { | |
2069 | ret = -EFAULT; | |
2070 | goto error_free; | |
2071 | } | |
2072 | ||
ca6fcfa8 TR |
2073 | new = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL); |
2074 | if (!new) { | |
2075 | ret = -ENOMEM; | |
2076 | goto error_free; | |
2077 | } | |
2078 | tmp = new; | |
5ce5a584 SA |
2079 | if (sscanf(reg_offset, "%X %n", &tmp[i], &ret) != 1) { |
2080 | ret = -EINVAL; | |
2081 | goto error_free; | |
2082 | } | |
2083 | ||
2084 | len += ret; | |
2085 | i++; | |
2086 | } while (len < size); | |
2087 | ||
651d7ee6 SA |
2088 | new = kmalloc_array(i, sizeof(uint32_t), GFP_KERNEL); |
2089 | if (!new) { | |
2090 | ret = -ENOMEM; | |
2091 | goto error_free; | |
2092 | } | |
38a15ad9 | 2093 | ret = down_write_killable(&adev->reset_domain->sem); |
5ce5a584 SA |
2094 | if (ret) |
2095 | goto error_free; | |
2096 | ||
2d6a2a28 AA |
2097 | swap(adev->reset_info.reset_dump_reg_list, tmp); |
2098 | swap(adev->reset_info.reset_dump_reg_value, new); | |
2099 | adev->reset_info.num_regs = i; | |
38a15ad9 | 2100 | up_write(&adev->reset_domain->sem); |
5ce5a584 SA |
2101 | ret = size; |
2102 | ||
2103 | error_free: | |
ad2feebd SS |
2104 | if (tmp != new) |
2105 | kfree(tmp); | |
651d7ee6 | 2106 | kfree(new); |
5ce5a584 SA |
2107 | return ret; |
2108 | } | |
2109 | ||
2110 | static const struct file_operations amdgpu_reset_dump_register_list = { | |
2111 | .owner = THIS_MODULE, | |
2112 | .read = amdgpu_reset_dump_register_list_read, | |
2113 | .write = amdgpu_reset_dump_register_list_write, | |
2114 | .llseek = default_llseek | |
2115 | }; | |
2116 | ||
75758255 AD |
2117 | int amdgpu_debugfs_init(struct amdgpu_device *adev) |
2118 | { | |
98d28ac2 | 2119 | struct dentry *root = adev_to_drm(adev)->primary->debugfs_root; |
88293c03 | 2120 | struct dentry *ent; |
fd23cfcc | 2121 | int r, i; |
c5820361 | 2122 | |
5b9581df ND |
2123 | if (!debugfs_initialized()) |
2124 | return 0; | |
2125 | ||
6ff7fddb | 2126 | debugfs_create_x32("amdgpu_smu_debug", 0600, root, |
7e31a858 | 2127 | &adev->pm.smu_debug_mask); |
6ff7fddb | 2128 | |
98d28ac2 | 2129 | ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, |
88293c03 | 2130 | &fops_ib_preempt); |
59715cff | 2131 | if (IS_ERR(ent)) { |
6698a3d0 | 2132 | DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); |
59715cff | 2133 | return PTR_ERR(ent); |
6698a3d0 JX |
2134 | } |
2135 | ||
98d28ac2 | 2136 | ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev, |
88293c03 | 2137 | &fops_sclk_set); |
59715cff | 2138 | if (IS_ERR(ent)) { |
0cf64555 | 2139 | DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); |
59715cff | 2140 | return PTR_ERR(ent); |
0cf64555 CG |
2141 | } |
2142 | ||
c5820361 | 2143 | /* Register debugfs entries for amdgpu_ttm */ |
98d28ac2 | 2144 | amdgpu_ttm_debugfs_init(adev); |
373720f7 | 2145 | amdgpu_debugfs_pm_init(adev); |
98d28ac2 ND |
2146 | amdgpu_debugfs_sa_init(adev); |
2147 | amdgpu_debugfs_fence_init(adev); | |
2148 | amdgpu_debugfs_gem_init(adev); | |
3f5cea67 | 2149 | |
f9d64e6c AD |
2150 | r = amdgpu_debugfs_regs_init(adev); |
2151 | if (r) | |
2152 | DRM_ERROR("registering register debugfs failed (%d).\n", r); | |
2153 | ||
98d28ac2 | 2154 | amdgpu_debugfs_firmware_init(adev); |
e50d9ba0 | 2155 | amdgpu_ta_if_debugfs_init(adev); |
cd9e29e7 | 2156 | |
b2662d4c | 2157 | amdgpu_debugfs_mes_event_log_init(adev); |
2158 | ||
d090e7db | 2159 | #if defined(CONFIG_DRM_AMD_DC) |
d09ef243 | 2160 | if (adev->dc_enabled) |
afd3a359 | 2161 | dtn_debugfs_init(adev); |
d090e7db AD |
2162 | #endif |
2163 | ||
fd23cfcc AD |
2164 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
2165 | struct amdgpu_ring *ring = adev->rings[i]; | |
2166 | ||
2167 | if (!ring) | |
2168 | continue; | |
2169 | ||
62d266b2 | 2170 | amdgpu_debugfs_ring_init(adev, ring); |
fd23cfcc AD |
2171 | } |
2172 | ||
8fa76350 | 2173 | for (i = 0; i < adev->vcn.num_vcn_inst; i++) { |
11eb648d RD |
2174 | if (!amdgpu_vcnfw_log) |
2175 | break; | |
2176 | ||
2177 | if (adev->vcn.harvest_config & (1 << i)) | |
2178 | continue; | |
2179 | ||
2180 | amdgpu_debugfs_vcn_fwlog_init(adev, i, &adev->vcn.inst[i]); | |
2181 | } | |
2182 | ||
204eaac6 | 2183 | amdgpu_ras_debugfs_create_all(adev); |
a4322e18 | 2184 | amdgpu_rap_debugfs_init(adev); |
ecaafb7b | 2185 | amdgpu_securedisplay_debugfs_init(adev); |
19ae3330 JC |
2186 | amdgpu_fw_attestation_debugfs_init(adev); |
2187 | ||
98d28ac2 ND |
2188 | debugfs_create_file("amdgpu_evict_vram", 0444, root, adev, |
2189 | &amdgpu_evict_vram_fops); | |
2190 | debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev, | |
2191 | &amdgpu_evict_gtt_fops); | |
2192 | debugfs_create_file("amdgpu_test_ib", 0444, root, adev, | |
2193 | &amdgpu_debugfs_test_ib_fops); | |
2194 | debugfs_create_file("amdgpu_vm_info", 0444, root, adev, | |
2195 | &amdgpu_debugfs_vm_info_fops); | |
e7c47231 AD |
2196 | debugfs_create_file("amdgpu_benchmark", 0200, root, adev, |
2197 | &amdgpu_benchmark_fops); | |
5ce5a584 SA |
2198 | debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev, |
2199 | &amdgpu_reset_dump_register_list); | |
98d28ac2 ND |
2200 | |
2201 | adev->debugfs_vbios_blob.data = adev->bios; | |
2202 | adev->debugfs_vbios_blob.size = adev->bios_size; | |
2203 | debugfs_create_blob("amdgpu_vbios", 0444, root, | |
2204 | &adev->debugfs_vbios_blob); | |
2205 | ||
81d1bf01 AD |
2206 | adev->debugfs_discovery_blob.data = adev->mman.discovery_bin; |
2207 | adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size; | |
2208 | debugfs_create_blob("amdgpu_discovery", 0444, root, | |
2209 | &adev->debugfs_discovery_blob); | |
2210 | ||
98d28ac2 | 2211 | return 0; |
75758255 AD |
2212 | } |
2213 | ||
2214 | #else | |
2215 | int amdgpu_debugfs_init(struct amdgpu_device *adev) | |
2216 | { | |
2217 | return 0; | |
2218 | } | |
2219 | int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) | |
2220 | { | |
2221 | return 0; | |
2222 | } | |
75758255 | 2223 | #endif |