Commit | Line | Data |
---|---|---|
95d0906f LL |
1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | ||
27 | #include <linux/firmware.h> | |
28 | #include <linux/module.h> | |
29 | #include <drm/drmP.h> | |
30 | #include <drm/drm.h> | |
31 | ||
32 | #include "amdgpu.h" | |
33 | #include "amdgpu_pm.h" | |
34 | #include "amdgpu_vcn.h" | |
35 | #include "soc15d.h" | |
36 | #include "soc15_common.h" | |
37 | ||
b1ebd7c0 | 38 | #include "vcn/vcn_1_0_offset.h" |
bd5d5180 | 39 | #include "vcn/vcn_1_0_sh_mask.h" |
95d0906f LL |
40 | |
41 | /* 1 second timeout */ | |
42 | #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) | |
43 | ||
44 | /* Firmware Names */ | |
45 | #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" | |
86771d9a | 46 | #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin" |
8b47cc9b | 47 | #define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin" |
95d0906f LL |
48 | |
49 | MODULE_FIRMWARE(FIRMWARE_RAVEN); | |
86771d9a | 50 | MODULE_FIRMWARE(FIRMWARE_PICASSO); |
8b47cc9b | 51 | MODULE_FIRMWARE(FIRMWARE_RAVEN2); |
95d0906f LL |
52 | |
53 | static void amdgpu_vcn_idle_work_handler(struct work_struct *work); | |
54 | ||
55 | int amdgpu_vcn_sw_init(struct amdgpu_device *adev) | |
56 | { | |
95d0906f LL |
57 | unsigned long bo_size; |
58 | const char *fw_name; | |
59 | const struct common_firmware_header *hdr; | |
62d5b8e3 | 60 | unsigned char fw_check; |
95d0906f LL |
61 | int r; |
62 | ||
63 | INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); | |
64 | ||
65 | switch (adev->asic_type) { | |
66 | case CHIP_RAVEN: | |
741deade | 67 | if (adev->rev_id >= 8) |
8b47cc9b | 68 | fw_name = FIRMWARE_RAVEN2; |
741deade AD |
69 | else if (adev->pdev->device == 0x15d8) |
70 | fw_name = FIRMWARE_PICASSO; | |
8b47cc9b FX |
71 | else |
72 | fw_name = FIRMWARE_RAVEN; | |
95d0906f LL |
73 | break; |
74 | default: | |
75 | return -EINVAL; | |
76 | } | |
77 | ||
78 | r = request_firmware(&adev->vcn.fw, fw_name, adev->dev); | |
79 | if (r) { | |
80 | dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n", | |
81 | fw_name); | |
82 | return r; | |
83 | } | |
84 | ||
85 | r = amdgpu_ucode_validate(adev->vcn.fw); | |
86 | if (r) { | |
87 | dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n", | |
88 | fw_name); | |
89 | release_firmware(adev->vcn.fw); | |
90 | adev->vcn.fw = NULL; | |
91 | return r; | |
92 | } | |
93 | ||
94 | hdr = (const struct common_firmware_header *)adev->vcn.fw->data; | |
a0b2ac29 | 95 | adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); |
95d0906f | 96 | |
62d5b8e3 JZ |
97 | /* Bit 20-23, it is encode major and non-zero for new naming convention. |
98 | * This field is part of version minor and DRM_DISABLED_FLAG in old naming | |
99 | * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG | |
100 | * is zero in old naming convention, this field is always zero so far. | |
101 | * These four bits are used to tell which naming convention is present. | |
102 | */ | |
103 | fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf; | |
104 | if (fw_check) { | |
105 | unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev; | |
106 | ||
107 | fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff; | |
108 | enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff; | |
109 | enc_major = fw_check; | |
110 | dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf; | |
111 | vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf; | |
112 | DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n", | |
113 | enc_major, enc_minor, dec_ver, vep, fw_rev); | |
114 | } else { | |
115 | unsigned int version_major, version_minor, family_id; | |
116 | ||
117 | family_id = le32_to_cpu(hdr->ucode_version) & 0xff; | |
118 | version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; | |
119 | version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; | |
120 | DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n", | |
121 | version_major, version_minor, family_id); | |
122 | } | |
95d0906f | 123 | |
4d77c0f6 | 124 | bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE |
95d0906f | 125 | + AMDGPU_VCN_SESSION_SIZE * 40; |
4d77c0f6 LG |
126 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) |
127 | bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); | |
95d0906f LL |
128 | r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, |
129 | AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo, | |
130 | &adev->vcn.gpu_addr, &adev->vcn.cpu_addr); | |
131 | if (r) { | |
132 | dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); | |
133 | return r; | |
134 | } | |
135 | ||
95d0906f LL |
136 | return 0; |
137 | } | |
138 | ||
139 | int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) | |
140 | { | |
101c6fee LL |
141 | int i; |
142 | ||
c9533d1b | 143 | kvfree(adev->vcn.saved_bo); |
95d0906f | 144 | |
95d0906f LL |
145 | amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo, |
146 | &adev->vcn.gpu_addr, | |
147 | (void **)&adev->vcn.cpu_addr); | |
148 | ||
149 | amdgpu_ring_fini(&adev->vcn.ring_dec); | |
150 | ||
101c6fee LL |
151 | for (i = 0; i < adev->vcn.num_enc_rings; ++i) |
152 | amdgpu_ring_fini(&adev->vcn.ring_enc[i]); | |
153 | ||
0c5e4b3e BZ |
154 | amdgpu_ring_fini(&adev->vcn.ring_jpeg); |
155 | ||
95d0906f LL |
156 | release_firmware(adev->vcn.fw); |
157 | ||
158 | return 0; | |
159 | } | |
160 | ||
161 | int amdgpu_vcn_suspend(struct amdgpu_device *adev) | |
162 | { | |
163 | unsigned size; | |
164 | void *ptr; | |
165 | ||
61ea6f58 RZ |
166 | cancel_delayed_work_sync(&adev->vcn.idle_work); |
167 | ||
95d0906f LL |
168 | if (adev->vcn.vcpu_bo == NULL) |
169 | return 0; | |
170 | ||
95d0906f LL |
171 | size = amdgpu_bo_size(adev->vcn.vcpu_bo); |
172 | ptr = adev->vcn.cpu_addr; | |
173 | ||
c9533d1b | 174 | adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL); |
95d0906f LL |
175 | if (!adev->vcn.saved_bo) |
176 | return -ENOMEM; | |
177 | ||
178 | memcpy_fromio(adev->vcn.saved_bo, ptr, size); | |
179 | ||
180 | return 0; | |
181 | } | |
182 | ||
183 | int amdgpu_vcn_resume(struct amdgpu_device *adev) | |
184 | { | |
185 | unsigned size; | |
186 | void *ptr; | |
187 | ||
188 | if (adev->vcn.vcpu_bo == NULL) | |
189 | return -EINVAL; | |
190 | ||
191 | size = amdgpu_bo_size(adev->vcn.vcpu_bo); | |
192 | ptr = adev->vcn.cpu_addr; | |
193 | ||
194 | if (adev->vcn.saved_bo != NULL) { | |
195 | memcpy_toio(ptr, adev->vcn.saved_bo, size); | |
c9533d1b | 196 | kvfree(adev->vcn.saved_bo); |
95d0906f LL |
197 | adev->vcn.saved_bo = NULL; |
198 | } else { | |
199 | const struct common_firmware_header *hdr; | |
200 | unsigned offset; | |
201 | ||
202 | hdr = (const struct common_firmware_header *)adev->vcn.fw->data; | |
4d77c0f6 LG |
203 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
204 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); | |
205 | memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset, | |
206 | le32_to_cpu(hdr->ucode_size_bytes)); | |
207 | size -= le32_to_cpu(hdr->ucode_size_bytes); | |
208 | ptr += le32_to_cpu(hdr->ucode_size_bytes); | |
209 | } | |
95d0906f LL |
210 | memset_io(ptr, 0, size); |
211 | } | |
212 | ||
213 | return 0; | |
214 | } | |
215 | ||
bd5d5180 | 216 | static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev, |
d30e63b1 | 217 | struct dpg_pause_state *new_state) |
bd5d5180 JZ |
218 | { |
219 | int ret_code; | |
220 | uint32_t reg_data = 0; | |
221 | uint32_t reg_data2 = 0; | |
222 | struct amdgpu_ring *ring; | |
223 | ||
224 | /* pause/unpause if state is changed */ | |
225 | if (adev->vcn.pause_state.fw_based != new_state->fw_based) { | |
226 | DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d", | |
227 | adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg, | |
228 | new_state->fw_based, new_state->jpeg); | |
229 | ||
230 | reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) & | |
d30e63b1 | 231 | (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); |
bd5d5180 JZ |
232 | |
233 | if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { | |
234 | ret_code = 0; | |
235 | ||
236 | if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK)) | |
237 | SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, | |
d30e63b1 AD |
238 | UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, |
239 | UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); | |
bd5d5180 JZ |
240 | |
241 | if (!ret_code) { | |
242 | /* pause DPG non-jpeg */ | |
243 | reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; | |
244 | WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); | |
245 | SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, | |
d30e63b1 AD |
246 | UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, |
247 | UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); | |
bd5d5180 JZ |
248 | |
249 | /* Restore */ | |
250 | ring = &adev->vcn.ring_enc[0]; | |
251 | WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); | |
252 | WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); | |
253 | WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); | |
254 | WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); | |
d30e63b1 | 255 | WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); |
bd5d5180 JZ |
256 | |
257 | ring = &adev->vcn.ring_enc[1]; | |
258 | WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); | |
259 | WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); | |
260 | WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); | |
261 | WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); | |
262 | WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); | |
263 | ||
264 | ring = &adev->vcn.ring_dec; | |
265 | WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, | |
2dc4aa52 | 266 | RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2)); |
bd5d5180 | 267 | SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, |
d30e63b1 AD |
268 | UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, |
269 | UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); | |
bd5d5180 JZ |
270 | } |
271 | } else { | |
272 | /* unpause dpg non-jpeg, no need to wait */ | |
273 | reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; | |
274 | WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); | |
275 | } | |
276 | adev->vcn.pause_state.fw_based = new_state->fw_based; | |
277 | } | |
278 | ||
279 | /* pause/unpause if state is changed */ | |
280 | if (adev->vcn.pause_state.jpeg != new_state->jpeg) { | |
281 | DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d", | |
282 | adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg, | |
283 | new_state->fw_based, new_state->jpeg); | |
284 | ||
285 | reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) & | |
d30e63b1 | 286 | (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK); |
bd5d5180 JZ |
287 | |
288 | if (new_state->jpeg == VCN_DPG_STATE__PAUSE) { | |
289 | ret_code = 0; | |
290 | ||
291 | if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK)) | |
292 | SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, | |
d30e63b1 AD |
293 | UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, |
294 | UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); | |
bd5d5180 JZ |
295 | |
296 | if (!ret_code) { | |
297 | /* Make sure JPRG Snoop is disabled before sending the pause */ | |
298 | reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS); | |
299 | reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK; | |
300 | WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2); | |
301 | ||
302 | /* pause DPG jpeg */ | |
303 | reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK; | |
304 | WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); | |
305 | SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, | |
306 | UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, | |
307 | UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code); | |
308 | ||
309 | /* Restore */ | |
310 | ring = &adev->vcn.ring_jpeg; | |
311 | WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); | |
b17c5249 JZ |
312 | WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, |
313 | UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | | |
314 | UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); | |
bd5d5180 | 315 | WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, |
b17c5249 | 316 | lower_32_bits(ring->gpu_addr)); |
bd5d5180 | 317 | WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, |
b17c5249 | 318 | upper_32_bits(ring->gpu_addr)); |
bd5d5180 JZ |
319 | WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr); |
320 | WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr); | |
b17c5249 JZ |
321 | WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, |
322 | UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); | |
bd5d5180 JZ |
323 | |
324 | ring = &adev->vcn.ring_dec; | |
325 | WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, | |
2dc4aa52 | 326 | RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2)); |
bd5d5180 | 327 | SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, |
d30e63b1 AD |
328 | UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, |
329 | UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); | |
bd5d5180 JZ |
330 | } |
331 | } else { | |
332 | /* unpause dpg jpeg, no need to wait */ | |
333 | reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK; | |
334 | WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); | |
335 | } | |
336 | adev->vcn.pause_state.jpeg = new_state->jpeg; | |
337 | } | |
338 | ||
339 | return 0; | |
340 | } | |
341 | ||
3e1086cf LL |
342 | static void amdgpu_vcn_idle_work_handler(struct work_struct *work) |
343 | { | |
344 | struct amdgpu_device *adev = | |
345 | container_of(work, struct amdgpu_device, vcn.idle_work.work); | |
bd5d5180 JZ |
346 | unsigned int fences = 0; |
347 | unsigned int i; | |
646e906d AD |
348 | |
349 | for (i = 0; i < adev->vcn.num_enc_rings; ++i) { | |
350 | fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); | |
351 | } | |
3e1086cf | 352 | |
bd5d5180 JZ |
353 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { |
354 | struct dpg_pause_state new_state; | |
355 | ||
356 | if (fences) | |
357 | new_state.fw_based = VCN_DPG_STATE__PAUSE; | |
358 | else | |
359 | new_state.fw_based = VCN_DPG_STATE__UNPAUSE; | |
360 | ||
361 | if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg)) | |
362 | new_state.jpeg = VCN_DPG_STATE__PAUSE; | |
363 | else | |
364 | new_state.jpeg = VCN_DPG_STATE__UNPAUSE; | |
365 | ||
366 | amdgpu_vcn_pause_dpg_mode(adev, &new_state); | |
367 | } | |
368 | ||
7b4e54a9 | 369 | fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg); |
bd5d5180 | 370 | fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec); |
7b4e54a9 | 371 | |
3e1086cf | 372 | if (fences == 0) { |
3fded222 | 373 | amdgpu_gfx_off_ctrl(adev, true); |
22cc6c5e | 374 | if (adev->pm.dpm_enabled) |
3e1086cf | 375 | amdgpu_dpm_enable_uvd(adev, false); |
22cc6c5e RZ |
376 | else |
377 | amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, | |
378 | AMD_PG_STATE_GATE); | |
3e1086cf LL |
379 | } else { |
380 | schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); | |
381 | } | |
382 | } | |
383 | ||
384 | void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) | |
385 | { | |
386 | struct amdgpu_device *adev = ring->adev; | |
387 | bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); | |
388 | ||
c1ee15b3 | 389 | if (set_clocks) { |
3fded222 | 390 | amdgpu_gfx_off_ctrl(adev, false); |
22cc6c5e RZ |
391 | if (adev->pm.dpm_enabled) |
392 | amdgpu_dpm_enable_uvd(adev, true); | |
393 | else | |
394 | amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, | |
395 | AMD_PG_STATE_UNGATE); | |
3e1086cf | 396 | } |
bd5d5180 JZ |
397 | |
398 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { | |
399 | struct dpg_pause_state new_state; | |
400 | ||
401 | if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) | |
402 | new_state.fw_based = VCN_DPG_STATE__PAUSE; | |
403 | else | |
404 | new_state.fw_based = adev->vcn.pause_state.fw_based; | |
405 | ||
406 | if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) | |
407 | new_state.jpeg = VCN_DPG_STATE__PAUSE; | |
408 | else | |
409 | new_state.jpeg = adev->vcn.pause_state.jpeg; | |
410 | ||
411 | amdgpu_vcn_pause_dpg_mode(adev, &new_state); | |
412 | } | |
3e1086cf LL |
413 | } |
414 | ||
415 | void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) | |
416 | { | |
417 | schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); | |
418 | } | |
419 | ||
8c303c01 LL |
420 | int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) |
421 | { | |
422 | struct amdgpu_device *adev = ring->adev; | |
423 | uint32_t tmp = 0; | |
424 | unsigned i; | |
425 | int r; | |
426 | ||
21cbe2f3 | 427 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD); |
8c303c01 LL |
428 | r = amdgpu_ring_alloc(ring, 3); |
429 | if (r) { | |
430 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | |
431 | ring->idx, r); | |
432 | return r; | |
433 | } | |
434 | amdgpu_ring_write(ring, | |
21cbe2f3 | 435 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0)); |
8c303c01 LL |
436 | amdgpu_ring_write(ring, 0xDEADBEEF); |
437 | amdgpu_ring_commit(ring); | |
438 | for (i = 0; i < adev->usec_timeout; i++) { | |
21cbe2f3 | 439 | tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9)); |
8c303c01 LL |
440 | if (tmp == 0xDEADBEEF) |
441 | break; | |
442 | DRM_UDELAY(1); | |
443 | } | |
444 | ||
445 | if (i < adev->usec_timeout) { | |
9953b72f | 446 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", |
8c303c01 LL |
447 | ring->idx, i); |
448 | } else { | |
449 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | |
450 | ring->idx, tmp); | |
451 | r = -EINVAL; | |
452 | } | |
453 | return r; | |
454 | } | |
455 | ||
add9f9a8 | 456 | static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, |
4c6530fd | 457 | struct amdgpu_bo *bo, |
add9f9a8 | 458 | struct dma_fence **fence) |
95d0906f | 459 | { |
add9f9a8 CK |
460 | struct amdgpu_device *adev = ring->adev; |
461 | struct dma_fence *f = NULL; | |
95d0906f LL |
462 | struct amdgpu_job *job; |
463 | struct amdgpu_ib *ib; | |
95d0906f LL |
464 | uint64_t addr; |
465 | int i, r; | |
466 | ||
95d0906f LL |
467 | r = amdgpu_job_alloc_with_ib(adev, 64, &job); |
468 | if (r) | |
469 | goto err; | |
470 | ||
471 | ib = &job->ibs[0]; | |
472 | addr = amdgpu_bo_gpu_offset(bo); | |
473 | ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0); | |
474 | ib->ptr[1] = addr; | |
475 | ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0); | |
476 | ib->ptr[3] = addr >> 32; | |
477 | ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0); | |
478 | ib->ptr[5] = 0; | |
479 | for (i = 6; i < 16; i += 2) { | |
480 | ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0); | |
481 | ib->ptr[i+1] = 0; | |
482 | } | |
483 | ib->length_dw = 16; | |
484 | ||
ee913fd9 | 485 | r = amdgpu_job_submit_direct(job, ring, &f); |
4c6530fd LL |
486 | if (r) |
487 | goto err_free; | |
95d0906f | 488 | |
add9f9a8 CK |
489 | amdgpu_bo_fence(bo, f, false); |
490 | amdgpu_bo_unreserve(bo); | |
491 | amdgpu_bo_unref(&bo); | |
95d0906f LL |
492 | |
493 | if (fence) | |
494 | *fence = dma_fence_get(f); | |
95d0906f LL |
495 | dma_fence_put(f); |
496 | ||
497 | return 0; | |
498 | ||
499 | err_free: | |
500 | amdgpu_job_free(job); | |
501 | ||
502 | err: | |
add9f9a8 CK |
503 | amdgpu_bo_unreserve(bo); |
504 | amdgpu_bo_unref(&bo); | |
95d0906f LL |
505 | return r; |
506 | } | |
507 | ||
508 | static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |
509 | struct dma_fence **fence) | |
510 | { | |
511 | struct amdgpu_device *adev = ring->adev; | |
add9f9a8 | 512 | struct amdgpu_bo *bo = NULL; |
95d0906f LL |
513 | uint32_t *msg; |
514 | int r, i; | |
515 | ||
add9f9a8 CK |
516 | r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, |
517 | AMDGPU_GEM_DOMAIN_VRAM, | |
518 | &bo, NULL, (void **)&msg); | |
95d0906f LL |
519 | if (r) |
520 | return r; | |
521 | ||
2d8a425b | 522 | msg[0] = cpu_to_le32(0x00000028); |
3b8f5ab3 | 523 | msg[1] = cpu_to_le32(0x00000038); |
2d8a425b | 524 | msg[2] = cpu_to_le32(0x00000001); |
95d0906f | 525 | msg[3] = cpu_to_le32(0x00000000); |
2d8a425b | 526 | msg[4] = cpu_to_le32(handle); |
95d0906f | 527 | msg[5] = cpu_to_le32(0x00000000); |
2d8a425b LL |
528 | msg[6] = cpu_to_le32(0x00000001); |
529 | msg[7] = cpu_to_le32(0x00000028); | |
3b8f5ab3 | 530 | msg[8] = cpu_to_le32(0x00000010); |
95d0906f | 531 | msg[9] = cpu_to_le32(0x00000000); |
2d8a425b LL |
532 | msg[10] = cpu_to_le32(0x00000007); |
533 | msg[11] = cpu_to_le32(0x00000000); | |
3b8f5ab3 LL |
534 | msg[12] = cpu_to_le32(0x00000780); |
535 | msg[13] = cpu_to_le32(0x00000440); | |
536 | for (i = 14; i < 1024; ++i) | |
95d0906f LL |
537 | msg[i] = cpu_to_le32(0x0); |
538 | ||
4c6530fd | 539 | return amdgpu_vcn_dec_send_msg(ring, bo, fence); |
95d0906f LL |
540 | } |
541 | ||
542 | static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |
4c6530fd | 543 | struct dma_fence **fence) |
95d0906f LL |
544 | { |
545 | struct amdgpu_device *adev = ring->adev; | |
add9f9a8 | 546 | struct amdgpu_bo *bo = NULL; |
95d0906f LL |
547 | uint32_t *msg; |
548 | int r, i; | |
549 | ||
add9f9a8 CK |
550 | r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, |
551 | AMDGPU_GEM_DOMAIN_VRAM, | |
552 | &bo, NULL, (void **)&msg); | |
95d0906f LL |
553 | if (r) |
554 | return r; | |
555 | ||
2d8a425b LL |
556 | msg[0] = cpu_to_le32(0x00000028); |
557 | msg[1] = cpu_to_le32(0x00000018); | |
558 | msg[2] = cpu_to_le32(0x00000000); | |
559 | msg[3] = cpu_to_le32(0x00000002); | |
560 | msg[4] = cpu_to_le32(handle); | |
561 | msg[5] = cpu_to_le32(0x00000000); | |
562 | for (i = 6; i < 1024; ++i) | |
95d0906f LL |
563 | msg[i] = cpu_to_le32(0x0); |
564 | ||
4c6530fd | 565 | return amdgpu_vcn_dec_send_msg(ring, bo, fence); |
95d0906f LL |
566 | } |
567 | ||
95d0906f LL |
568 | int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
569 | { | |
570 | struct dma_fence *fence; | |
571 | long r; | |
572 | ||
573 | r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL); | |
574 | if (r) { | |
575 | DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); | |
576 | goto error; | |
577 | } | |
578 | ||
4c6530fd | 579 | r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence); |
95d0906f LL |
580 | if (r) { |
581 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); | |
582 | goto error; | |
583 | } | |
584 | ||
585 | r = dma_fence_wait_timeout(fence, false, timeout); | |
586 | if (r == 0) { | |
587 | DRM_ERROR("amdgpu: IB test timed out.\n"); | |
588 | r = -ETIMEDOUT; | |
589 | } else if (r < 0) { | |
590 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | |
591 | } else { | |
9953b72f | 592 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); |
95d0906f LL |
593 | r = 0; |
594 | } | |
595 | ||
596 | dma_fence_put(fence); | |
597 | ||
598 | error: | |
599 | return r; | |
600 | } | |
2d531d81 | 601 | |
3e1086cf LL |
602 | int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) |
603 | { | |
604 | struct amdgpu_device *adev = ring->adev; | |
605 | uint32_t rptr = amdgpu_ring_get_rptr(ring); | |
606 | unsigned i; | |
607 | int r; | |
608 | ||
609 | r = amdgpu_ring_alloc(ring, 16); | |
610 | if (r) { | |
611 | DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n", | |
612 | ring->idx, r); | |
613 | return r; | |
614 | } | |
c3bd3040 | 615 | amdgpu_ring_write(ring, VCN_ENC_CMD_END); |
3e1086cf LL |
616 | amdgpu_ring_commit(ring); |
617 | ||
618 | for (i = 0; i < adev->usec_timeout; i++) { | |
619 | if (amdgpu_ring_get_rptr(ring) != rptr) | |
620 | break; | |
621 | DRM_UDELAY(1); | |
622 | } | |
623 | ||
624 | if (i < adev->usec_timeout) { | |
9953b72f | 625 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", |
3e1086cf LL |
626 | ring->idx, i); |
627 | } else { | |
628 | DRM_ERROR("amdgpu: ring %d test failed\n", | |
629 | ring->idx); | |
630 | r = -ETIMEDOUT; | |
631 | } | |
632 | ||
633 | return r; | |
634 | } | |
635 | ||
2d531d81 LL |
636 | static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
637 | struct dma_fence **fence) | |
638 | { | |
25547cfd | 639 | const unsigned ib_size_dw = 16; |
2d531d81 LL |
640 | struct amdgpu_job *job; |
641 | struct amdgpu_ib *ib; | |
642 | struct dma_fence *f = NULL; | |
643 | uint64_t dummy; | |
644 | int i, r; | |
645 | ||
646 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); | |
647 | if (r) | |
648 | return r; | |
649 | ||
650 | ib = &job->ibs[0]; | |
2d531d81 LL |
651 | dummy = ib->gpu_addr + 1024; |
652 | ||
2d531d81 | 653 | ib->length_dw = 0; |
25547cfd LL |
654 | ib->ptr[ib->length_dw++] = 0x00000018; |
655 | ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ | |
2d531d81 | 656 | ib->ptr[ib->length_dw++] = handle; |
25547cfd LL |
657 | ib->ptr[ib->length_dw++] = upper_32_bits(dummy); |
658 | ib->ptr[ib->length_dw++] = dummy; | |
659 | ib->ptr[ib->length_dw++] = 0x0000000b; | |
2d531d81 | 660 | |
25547cfd LL |
661 | ib->ptr[ib->length_dw++] = 0x00000014; |
662 | ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ | |
663 | ib->ptr[ib->length_dw++] = 0x0000001c; | |
2d531d81 LL |
664 | ib->ptr[ib->length_dw++] = 0x00000000; |
665 | ib->ptr[ib->length_dw++] = 0x00000000; | |
666 | ||
25547cfd LL |
667 | ib->ptr[ib->length_dw++] = 0x00000008; |
668 | ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */ | |
2d531d81 LL |
669 | |
670 | for (i = ib->length_dw; i < ib_size_dw; ++i) | |
671 | ib->ptr[i] = 0x0; | |
672 | ||
ee913fd9 | 673 | r = amdgpu_job_submit_direct(job, ring, &f); |
2d531d81 LL |
674 | if (r) |
675 | goto err; | |
676 | ||
2d531d81 LL |
677 | if (fence) |
678 | *fence = dma_fence_get(f); | |
679 | dma_fence_put(f); | |
25547cfd | 680 | |
2d531d81 LL |
681 | return 0; |
682 | ||
683 | err: | |
684 | amdgpu_job_free(job); | |
685 | return r; | |
686 | } | |
687 | ||
688 | static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |
25547cfd | 689 | struct dma_fence **fence) |
2d531d81 | 690 | { |
25547cfd | 691 | const unsigned ib_size_dw = 16; |
2d531d81 LL |
692 | struct amdgpu_job *job; |
693 | struct amdgpu_ib *ib; | |
694 | struct dma_fence *f = NULL; | |
25547cfd | 695 | uint64_t dummy; |
2d531d81 LL |
696 | int i, r; |
697 | ||
698 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); | |
699 | if (r) | |
700 | return r; | |
701 | ||
702 | ib = &job->ibs[0]; | |
25547cfd | 703 | dummy = ib->gpu_addr + 1024; |
2d531d81 | 704 | |
2d531d81 | 705 | ib->length_dw = 0; |
25547cfd LL |
706 | ib->ptr[ib->length_dw++] = 0x00000018; |
707 | ib->ptr[ib->length_dw++] = 0x00000001; | |
2d531d81 | 708 | ib->ptr[ib->length_dw++] = handle; |
25547cfd LL |
709 | ib->ptr[ib->length_dw++] = upper_32_bits(dummy); |
710 | ib->ptr[ib->length_dw++] = dummy; | |
711 | ib->ptr[ib->length_dw++] = 0x0000000b; | |
2d531d81 | 712 | |
25547cfd LL |
713 | ib->ptr[ib->length_dw++] = 0x00000014; |
714 | ib->ptr[ib->length_dw++] = 0x00000002; | |
715 | ib->ptr[ib->length_dw++] = 0x0000001c; | |
2d531d81 | 716 | ib->ptr[ib->length_dw++] = 0x00000000; |
2d531d81 LL |
717 | ib->ptr[ib->length_dw++] = 0x00000000; |
718 | ||
25547cfd LL |
719 | ib->ptr[ib->length_dw++] = 0x00000008; |
720 | ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */ | |
2d531d81 LL |
721 | |
722 | for (i = ib->length_dw; i < ib_size_dw; ++i) | |
723 | ib->ptr[i] = 0x0; | |
724 | ||
ee913fd9 | 725 | r = amdgpu_job_submit_direct(job, ring, &f); |
25547cfd LL |
726 | if (r) |
727 | goto err; | |
2d531d81 LL |
728 | |
729 | if (fence) | |
730 | *fence = dma_fence_get(f); | |
731 | dma_fence_put(f); | |
25547cfd | 732 | |
2d531d81 LL |
733 | return 0; |
734 | ||
735 | err: | |
736 | amdgpu_job_free(job); | |
737 | return r; | |
738 | } | |
739 | ||
2d531d81 LL |
740 | int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
741 | { | |
742 | struct dma_fence *fence = NULL; | |
743 | long r; | |
744 | ||
745 | r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL); | |
746 | if (r) { | |
747 | DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); | |
748 | goto error; | |
749 | } | |
750 | ||
25547cfd | 751 | r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence); |
2d531d81 LL |
752 | if (r) { |
753 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); | |
754 | goto error; | |
755 | } | |
756 | ||
757 | r = dma_fence_wait_timeout(fence, false, timeout); | |
758 | if (r == 0) { | |
759 | DRM_ERROR("amdgpu: IB test timed out.\n"); | |
760 | r = -ETIMEDOUT; | |
761 | } else if (r < 0) { | |
762 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | |
763 | } else { | |
9953b72f | 764 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); |
2d531d81 LL |
765 | r = 0; |
766 | } | |
767 | error: | |
768 | dma_fence_put(fence); | |
769 | return r; | |
770 | } | |
b1d37606 BZ |
771 | |
772 | int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) | |
773 | { | |
774 | struct amdgpu_device *adev = ring->adev; | |
775 | uint32_t tmp = 0; | |
776 | unsigned i; | |
777 | int r; | |
778 | ||
21cbe2f3 | 779 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD); |
b1d37606 BZ |
780 | r = amdgpu_ring_alloc(ring, 3); |
781 | ||
782 | if (r) { | |
783 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | |
784 | ring->idx, r); | |
785 | return r; | |
786 | } | |
787 | ||
788 | amdgpu_ring_write(ring, | |
21cbe2f3 | 789 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0)); |
b1d37606 BZ |
790 | amdgpu_ring_write(ring, 0xDEADBEEF); |
791 | amdgpu_ring_commit(ring); | |
792 | ||
793 | for (i = 0; i < adev->usec_timeout; i++) { | |
21cbe2f3 | 794 | tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9)); |
b1d37606 BZ |
795 | if (tmp == 0xDEADBEEF) |
796 | break; | |
797 | DRM_UDELAY(1); | |
798 | } | |
799 | ||
800 | if (i < adev->usec_timeout) { | |
801 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", | |
802 | ring->idx, i); | |
803 | } else { | |
804 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | |
805 | ring->idx, tmp); | |
806 | r = -EINVAL; | |
807 | } | |
808 | ||
809 | return r; | |
810 | } | |
6173040f BZ |
811 | |
812 | static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle, | |
813 | struct dma_fence **fence) | |
814 | { | |
815 | struct amdgpu_device *adev = ring->adev; | |
816 | struct amdgpu_job *job; | |
817 | struct amdgpu_ib *ib; | |
818 | struct dma_fence *f = NULL; | |
819 | const unsigned ib_size_dw = 16; | |
820 | int i, r; | |
821 | ||
822 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); | |
823 | if (r) | |
824 | return r; | |
825 | ||
826 | ib = &job->ibs[0]; | |
827 | ||
21cbe2f3 | 828 | ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, PACKETJ_TYPE0); |
6173040f BZ |
829 | ib->ptr[1] = 0xDEADBEEF; |
830 | for (i = 2; i < 16; i += 2) { | |
831 | ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); | |
832 | ib->ptr[i+1] = 0; | |
833 | } | |
834 | ib->length_dw = 16; | |
835 | ||
ee913fd9 | 836 | r = amdgpu_job_submit_direct(job, ring, &f); |
6173040f BZ |
837 | if (r) |
838 | goto err; | |
839 | ||
6173040f BZ |
840 | if (fence) |
841 | *fence = dma_fence_get(f); | |
842 | dma_fence_put(f); | |
843 | ||
844 | return 0; | |
845 | ||
846 | err: | |
847 | amdgpu_job_free(job); | |
848 | return r; | |
849 | } | |
850 | ||
851 | int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |
852 | { | |
853 | struct amdgpu_device *adev = ring->adev; | |
854 | uint32_t tmp = 0; | |
855 | unsigned i; | |
856 | struct dma_fence *fence = NULL; | |
857 | long r = 0; | |
858 | ||
859 | r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence); | |
860 | if (r) { | |
861 | DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r); | |
862 | goto error; | |
863 | } | |
864 | ||
865 | r = dma_fence_wait_timeout(fence, false, timeout); | |
866 | if (r == 0) { | |
867 | DRM_ERROR("amdgpu: IB test timed out.\n"); | |
868 | r = -ETIMEDOUT; | |
869 | goto error; | |
870 | } else if (r < 0) { | |
871 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | |
872 | goto error; | |
873 | } else | |
874 | r = 0; | |
875 | ||
876 | for (i = 0; i < adev->usec_timeout; i++) { | |
21cbe2f3 | 877 | tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9)); |
6173040f BZ |
878 | if (tmp == 0xDEADBEEF) |
879 | break; | |
880 | DRM_UDELAY(1); | |
881 | } | |
882 | ||
883 | if (i < adev->usec_timeout) | |
884 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | |
885 | else { | |
886 | DRM_ERROR("ib test failed (0x%08X)\n", tmp); | |
887 | r = -EINVAL; | |
888 | } | |
889 | ||
890 | dma_fence_put(fence); | |
891 | ||
892 | error: | |
893 | return r; | |
894 | } |