Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2011 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Christian König <deathsimple@vodafone.de> | |
29 | */ | |
30 | ||
31 | #include <linux/firmware.h> | |
32 | #include <linux/module.h> | |
fdf2f6c5 | 33 | |
d38ceaf9 | 34 | #include <drm/drm.h> |
f89f8c6b | 35 | #include <drm/drm_drv.h> |
d38ceaf9 AD |
36 | |
37 | #include "amdgpu.h" | |
38 | #include "amdgpu_pm.h" | |
39 | #include "amdgpu_uvd.h" | |
a190f8dc | 40 | #include "amdgpu_cs.h" |
d38ceaf9 AD |
41 | #include "cikd.h" |
42 | #include "uvd/uvd_4_2_d.h" | |
43 | ||
8baaadba LM |
44 | #include "amdgpu_ras.h" |
45 | ||
d38ceaf9 | 46 | /* 1 second timeout */ |
08086635 | 47 | #define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000) |
4cb5877c CK |
48 | |
49 | /* Firmware versions for VI */ | |
50 | #define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8)) | |
51 | #define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8)) | |
52 | #define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8)) | |
53 | #define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8)) | |
54 | ||
8e008dd7 | 55 | /* Polaris10/11 firmware version */ |
4cb5877c | 56 | #define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) |
d38ceaf9 AD |
57 | |
58 | /* Firmware Names */ | |
d1af7ac2 SJ |
59 | #ifdef CONFIG_DRM_AMDGPU_SI |
60 | #define FIRMWARE_TAHITI "amdgpu/tahiti_uvd.bin" | |
61 | #define FIRMWARE_VERDE "amdgpu/verde_uvd.bin" | |
62 | #define FIRMWARE_PITCAIRN "amdgpu/pitcairn_uvd.bin" | |
63 | #define FIRMWARE_OLAND "amdgpu/oland_uvd.bin" | |
64 | #endif | |
d38ceaf9 | 65 | #ifdef CONFIG_DRM_AMDGPU_CIK |
ce206464 AD |
66 | #define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin" |
67 | #define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin" | |
68 | #define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin" | |
69 | #define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin" | |
70 | #define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin" | |
d38ceaf9 | 71 | #endif |
c65444fe JZ |
72 | #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" |
73 | #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" | |
974ee3db | 74 | #define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin" |
a39c8cea | 75 | #define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin" |
2cc0c0b5 | 76 | #define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin" |
925a51c4 | 77 | #define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin" |
c4642a47 | 78 | #define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin" |
ba8f7ad0 | 79 | #define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin" |
d38ceaf9 | 80 | |
09bfb891 | 81 | #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin" |
2327e626 | 82 | #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin" |
cac18c82 | 83 | #define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin" |
09bfb891 | 84 | |
9181dba6 JZ |
85 | /* These are common relative offsets for all asics, from uvd_7_0_offset.h, */ |
86 | #define UVD_GPCOM_VCPU_CMD 0x03c3 | |
87 | #define UVD_GPCOM_VCPU_DATA0 0x03c4 | |
88 | #define UVD_GPCOM_VCPU_DATA1 0x03c5 | |
89 | #define UVD_NO_OP 0x03ff | |
90 | #define UVD_BASE_SI 0x3800 | |
09bfb891 | 91 | |
ce0e124a | 92 | /* |
d38ceaf9 AD |
93 | * amdgpu_uvd_cs_ctx - Command submission parser context |
94 | * | |
95 | * Used for emulating virtual memory support on UVD 4.2. | |
96 | */ | |
97 | struct amdgpu_uvd_cs_ctx { | |
98 | struct amdgpu_cs_parser *parser; | |
f10984a3 SS |
99 | unsigned int reg, count; |
100 | unsigned int data0, data1; | |
101 | unsigned int idx; | |
cdc7893f | 102 | struct amdgpu_ib *ib; |
d38ceaf9 AD |
103 | |
104 | /* does the IB has a msg command */ | |
105 | bool has_msg_cmd; | |
106 | ||
107 | /* minimum buffer sizes */ | |
f10984a3 | 108 | unsigned int *buf_sizes; |
d38ceaf9 AD |
109 | }; |
110 | ||
d1af7ac2 SJ |
111 | #ifdef CONFIG_DRM_AMDGPU_SI |
112 | MODULE_FIRMWARE(FIRMWARE_TAHITI); | |
113 | MODULE_FIRMWARE(FIRMWARE_VERDE); | |
114 | MODULE_FIRMWARE(FIRMWARE_PITCAIRN); | |
115 | MODULE_FIRMWARE(FIRMWARE_OLAND); | |
116 | #endif | |
d38ceaf9 AD |
117 | #ifdef CONFIG_DRM_AMDGPU_CIK |
118 | MODULE_FIRMWARE(FIRMWARE_BONAIRE); | |
119 | MODULE_FIRMWARE(FIRMWARE_KABINI); | |
120 | MODULE_FIRMWARE(FIRMWARE_KAVERI); | |
121 | MODULE_FIRMWARE(FIRMWARE_HAWAII); | |
122 | MODULE_FIRMWARE(FIRMWARE_MULLINS); | |
123 | #endif | |
124 | MODULE_FIRMWARE(FIRMWARE_TONGA); | |
125 | MODULE_FIRMWARE(FIRMWARE_CARRIZO); | |
974ee3db | 126 | MODULE_FIRMWARE(FIRMWARE_FIJI); |
a39c8cea | 127 | MODULE_FIRMWARE(FIRMWARE_STONEY); |
2cc0c0b5 FC |
128 | MODULE_FIRMWARE(FIRMWARE_POLARIS10); |
129 | MODULE_FIRMWARE(FIRMWARE_POLARIS11); | |
c4642a47 | 130 | MODULE_FIRMWARE(FIRMWARE_POLARIS12); |
ba8f7ad0 | 131 | MODULE_FIRMWARE(FIRMWARE_VEGAM); |
d38ceaf9 | 132 | |
09bfb891 | 133 | MODULE_FIRMWARE(FIRMWARE_VEGA10); |
2327e626 | 134 | MODULE_FIRMWARE(FIRMWARE_VEGA12); |
cac18c82 | 135 | MODULE_FIRMWARE(FIRMWARE_VEGA20); |
09bfb891 | 136 | |
d38ceaf9 | 137 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work); |
68331d7c | 138 | static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo); |
139 | ||
140 | static int amdgpu_uvd_create_msg_bo_helper(struct amdgpu_device *adev, | |
141 | uint32_t size, | |
142 | struct amdgpu_bo **bo_ptr) | |
143 | { | |
144 | struct ttm_operation_ctx ctx = { true, false }; | |
145 | struct amdgpu_bo *bo = NULL; | |
146 | void *addr; | |
147 | int r; | |
148 | ||
149 | r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, | |
150 | AMDGPU_GEM_DOMAIN_GTT, | |
151 | &bo, NULL, &addr); | |
152 | if (r) | |
153 | return r; | |
154 | ||
155 | if (adev->uvd.address_64_bit) | |
156 | goto succ; | |
157 | ||
158 | amdgpu_bo_kunmap(bo); | |
159 | amdgpu_bo_unpin(bo); | |
160 | amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); | |
161 | amdgpu_uvd_force_into_uvd_segment(bo); | |
162 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); | |
163 | if (r) | |
164 | goto err; | |
165 | r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM); | |
166 | if (r) | |
167 | goto err_pin; | |
168 | r = amdgpu_bo_kmap(bo, &addr); | |
169 | if (r) | |
170 | goto err_kmap; | |
171 | succ: | |
172 | amdgpu_bo_unreserve(bo); | |
173 | *bo_ptr = bo; | |
174 | return 0; | |
175 | err_kmap: | |
176 | amdgpu_bo_unpin(bo); | |
177 | err_pin: | |
178 | err: | |
179 | amdgpu_bo_unreserve(bo); | |
180 | amdgpu_bo_unref(&bo); | |
181 | return r; | |
182 | } | |
d38ceaf9 AD |
183 | |
184 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | |
185 | { | |
186 | unsigned long bo_size; | |
187 | const char *fw_name; | |
188 | const struct common_firmware_header *hdr; | |
f10984a3 | 189 | unsigned int family_id; |
10dd74ea | 190 | int i, j, r; |
d38ceaf9 | 191 | |
5c53d19b | 192 | INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); |
d38ceaf9 AD |
193 | |
194 | switch (adev->asic_type) { | |
d1af7ac2 SJ |
195 | #ifdef CONFIG_DRM_AMDGPU_SI |
196 | case CHIP_TAHITI: | |
197 | fw_name = FIRMWARE_TAHITI; | |
198 | break; | |
199 | case CHIP_VERDE: | |
200 | fw_name = FIRMWARE_VERDE; | |
201 | break; | |
202 | case CHIP_PITCAIRN: | |
203 | fw_name = FIRMWARE_PITCAIRN; | |
204 | break; | |
205 | case CHIP_OLAND: | |
206 | fw_name = FIRMWARE_OLAND; | |
207 | break; | |
208 | #endif | |
d38ceaf9 AD |
209 | #ifdef CONFIG_DRM_AMDGPU_CIK |
210 | case CHIP_BONAIRE: | |
211 | fw_name = FIRMWARE_BONAIRE; | |
212 | break; | |
213 | case CHIP_KABINI: | |
214 | fw_name = FIRMWARE_KABINI; | |
215 | break; | |
216 | case CHIP_KAVERI: | |
217 | fw_name = FIRMWARE_KAVERI; | |
218 | break; | |
219 | case CHIP_HAWAII: | |
220 | fw_name = FIRMWARE_HAWAII; | |
221 | break; | |
222 | case CHIP_MULLINS: | |
223 | fw_name = FIRMWARE_MULLINS; | |
224 | break; | |
225 | #endif | |
226 | case CHIP_TONGA: | |
227 | fw_name = FIRMWARE_TONGA; | |
228 | break; | |
974ee3db DZ |
229 | case CHIP_FIJI: |
230 | fw_name = FIRMWARE_FIJI; | |
231 | break; | |
d38ceaf9 AD |
232 | case CHIP_CARRIZO: |
233 | fw_name = FIRMWARE_CARRIZO; | |
234 | break; | |
a39c8cea SL |
235 | case CHIP_STONEY: |
236 | fw_name = FIRMWARE_STONEY; | |
237 | break; | |
2cc0c0b5 FC |
238 | case CHIP_POLARIS10: |
239 | fw_name = FIRMWARE_POLARIS10; | |
38d75817 | 240 | break; |
2cc0c0b5 FC |
241 | case CHIP_POLARIS11: |
242 | fw_name = FIRMWARE_POLARIS11; | |
c4642a47 | 243 | break; |
2327e626 AD |
244 | case CHIP_POLARIS12: |
245 | fw_name = FIRMWARE_POLARIS12; | |
246 | break; | |
09bfb891 LL |
247 | case CHIP_VEGA10: |
248 | fw_name = FIRMWARE_VEGA10; | |
249 | break; | |
2327e626 AD |
250 | case CHIP_VEGA12: |
251 | fw_name = FIRMWARE_VEGA12; | |
38d75817 | 252 | break; |
ba8f7ad0 LL |
253 | case CHIP_VEGAM: |
254 | fw_name = FIRMWARE_VEGAM; | |
255 | break; | |
cac18c82 FX |
256 | case CHIP_VEGA20: |
257 | fw_name = FIRMWARE_VEGA20; | |
258 | break; | |
d38ceaf9 AD |
259 | default: |
260 | return -EINVAL; | |
261 | } | |
262 | ||
ea5d4934 | 263 | r = amdgpu_ucode_request(adev, &adev->uvd.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name); |
d38ceaf9 AD |
264 | if (r) { |
265 | dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n", | |
266 | fw_name); | |
b406477c | 267 | amdgpu_ucode_release(&adev->uvd.fw); |
d38ceaf9 AD |
268 | return r; |
269 | } | |
270 | ||
c0365541 AN |
271 | /* Set the default UVD handles that the firmware can handle */ |
272 | adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES; | |
273 | ||
d38ceaf9 AD |
274 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; |
275 | family_id = le32_to_cpu(hdr->ucode_version) & 0xff; | |
dd06eecb JZ |
276 | |
277 | if (adev->asic_type < CHIP_VEGA20) { | |
f10984a3 | 278 | unsigned int version_major, version_minor; |
5c219927 | 279 | |
dd06eecb JZ |
280 | version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; |
281 | version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; | |
0b437e64 | 282 | DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n", |
dd06eecb | 283 | version_major, version_minor, family_id); |
5c219927 AD |
284 | |
285 | /* | |
286 | * Limit the number of UVD handles depending on microcode major | |
287 | * and minor versions. The firmware version which has 40 UVD | |
288 | * instances support is 1.80. So all subsequent versions should | |
289 | * also have the same support. | |
290 | */ | |
291 | if ((version_major > 0x01) || | |
292 | ((version_major == 0x01) && (version_minor >= 0x50))) | |
293 | adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; | |
294 | ||
295 | adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | | |
296 | (family_id << 8)); | |
297 | ||
298 | if ((adev->asic_type == CHIP_POLARIS10 || | |
299 | adev->asic_type == CHIP_POLARIS11) && | |
300 | (adev->uvd.fw_version < FW_1_66_16)) | |
7d98d416 | 301 | DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n", |
5c219927 | 302 | version_major, version_minor); |
dd06eecb JZ |
303 | } else { |
304 | unsigned int enc_major, enc_minor, dec_minor; | |
305 | ||
306 | dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; | |
307 | enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f; | |
308 | enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3; | |
0b437e64 | 309 | DRM_INFO("Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n", |
dd06eecb | 310 | enc_major, enc_minor, dec_minor, family_id); |
d38ceaf9 | 311 | |
c0365541 AN |
312 | adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; |
313 | ||
5c219927 AD |
314 | adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version); |
315 | } | |
8e008dd7 | 316 | |
09bfb891 | 317 | bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE |
c0365541 | 318 | + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; |
09bfb891 LL |
319 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) |
320 | bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); | |
321 | ||
10dd74ea | 322 | for (j = 0; j < adev->uvd.num_uvd_inst; j++) { |
f1e582eb AD |
323 | if (adev->uvd.harvest_config & (1 << j)) |
324 | continue; | |
10dd74ea | 325 | r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, |
58ab2c08 CK |
326 | AMDGPU_GEM_DOMAIN_VRAM | |
327 | AMDGPU_GEM_DOMAIN_GTT, | |
328 | &adev->uvd.inst[j].vcpu_bo, | |
329 | &adev->uvd.inst[j].gpu_addr, | |
330 | &adev->uvd.inst[j].cpu_addr); | |
10dd74ea JZ |
331 | if (r) { |
332 | dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); | |
333 | return r; | |
334 | } | |
5c675bf2 | 335 | } |
ead833ec | 336 | |
5c675bf2 CK |
337 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
338 | atomic_set(&adev->uvd.handles[i], 0); | |
339 | adev->uvd.filp[i] = NULL; | |
340 | } | |
341 | ||
d38ceaf9 | 342 | /* from uvd v5.0 HW addressing capacity increased to 64 bits */ |
2990a1fc | 343 | if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) |
d38ceaf9 AD |
344 | adev->uvd.address_64_bit = true; |
345 | ||
68331d7c | 346 | r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo); |
347 | if (r) | |
348 | return r; | |
349 | ||
4cb5877c CK |
350 | switch (adev->asic_type) { |
351 | case CHIP_TONGA: | |
352 | adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; | |
353 | break; | |
354 | case CHIP_CARRIZO: | |
355 | adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11; | |
356 | break; | |
357 | case CHIP_FIJI: | |
358 | adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12; | |
359 | break; | |
360 | case CHIP_STONEY: | |
361 | adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15; | |
362 | break; | |
363 | default: | |
364 | adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10; | |
365 | } | |
366 | ||
d38ceaf9 AD |
367 | return 0; |
368 | } | |
369 | ||
370 | int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) | |
371 | { | |
68331d7c | 372 | void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo); |
10dd74ea | 373 | int i, j; |
d38ceaf9 | 374 | |
cdc50176 | 375 | drm_sched_entity_destroy(&adev->uvd.entity); |
5c675bf2 | 376 | |
10dd74ea | 377 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { |
f1e582eb AD |
378 | if (adev->uvd.harvest_config & (1 << j)) |
379 | continue; | |
c9533d1b | 380 | kvfree(adev->uvd.inst[j].saved_bo); |
ead833ec | 381 | |
10dd74ea JZ |
382 | amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, |
383 | &adev->uvd.inst[j].gpu_addr, | |
384 | (void **)&adev->uvd.inst[j].cpu_addr); | |
d38ceaf9 | 385 | |
10dd74ea | 386 | amdgpu_ring_fini(&adev->uvd.inst[j].ring); |
4ff184d7 | 387 | |
10dd74ea JZ |
388 | for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) |
389 | amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); | |
390 | } | |
68331d7c | 391 | amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr); |
b406477c | 392 | amdgpu_ucode_release(&adev->uvd.fw); |
d38ceaf9 AD |
393 | |
394 | return 0; | |
395 | } | |
396 | ||
33d5bd07 ED |
397 | /** |
398 | * amdgpu_uvd_entity_init - init entity | |
399 | * | |
400 | * @adev: amdgpu_device pointer | |
8a0173cd | 401 | * @ring: amdgpu_ring pointer to check |
33d5bd07 | 402 | * |
037b98a2 | 403 | * Initialize the entity used for handle management in the kernel driver. |
33d5bd07 | 404 | */ |
037b98a2 | 405 | int amdgpu_uvd_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) |
33d5bd07 | 406 | { |
037b98a2 AD |
407 | if (ring == &adev->uvd.inst[0].ring) { |
408 | struct drm_gpu_scheduler *sched = &ring->sched; | |
409 | int r; | |
33d5bd07 | 410 | |
037b98a2 AD |
411 | r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL, |
412 | &sched, 1, NULL); | |
413 | if (r) { | |
414 | DRM_ERROR("Failed setting up UVD kernel entity.\n"); | |
415 | return r; | |
416 | } | |
33d5bd07 ED |
417 | } |
418 | ||
419 | return 0; | |
420 | } | |
421 | ||
db998890 | 422 | int amdgpu_uvd_prepare_suspend(struct amdgpu_device *adev) |
d38ceaf9 | 423 | { |
f10984a3 | 424 | unsigned int size; |
3f99dd81 | 425 | void *ptr; |
f89f8c6b | 426 | int i, j, idx; |
d38ceaf9 | 427 | |
5c53d19b JZ |
428 | cancel_delayed_work_sync(&adev->uvd.idle_work); |
429 | ||
5c675bf2 CK |
430 | /* only valid for physical mode */ |
431 | if (adev->asic_type < CHIP_POLARIS10) { | |
432 | for (i = 0; i < adev->uvd.max_handles; ++i) | |
433 | if (atomic_read(&adev->uvd.handles[i])) | |
434 | break; | |
435 | ||
436 | if (i == adev->uvd.max_handles) | |
437 | return 0; | |
438 | } | |
439 | ||
10dd74ea | 440 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { |
f1e582eb AD |
441 | if (adev->uvd.harvest_config & (1 << j)) |
442 | continue; | |
10dd74ea JZ |
443 | if (adev->uvd.inst[j].vcpu_bo == NULL) |
444 | continue; | |
d38ceaf9 | 445 | |
10dd74ea JZ |
446 | size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); |
447 | ptr = adev->uvd.inst[j].cpu_addr; | |
d38ceaf9 | 448 | |
c9533d1b | 449 | adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL); |
10dd74ea JZ |
450 | if (!adev->uvd.inst[j].saved_bo) |
451 | return -ENOMEM; | |
d38ceaf9 | 452 | |
c58a863b | 453 | if (drm_dev_enter(adev_to_drm(adev), &idx)) { |
f89f8c6b | 454 | /* re-write 0 since err_event_athub will corrupt VCPU buffer */ |
db998890 | 455 | if (amdgpu_ras_intr_triggered()) |
f89f8c6b AG |
456 | memset(adev->uvd.inst[j].saved_bo, 0, size); |
457 | else | |
458 | memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); | |
459 | ||
460 | drm_dev_exit(idx); | |
461 | } | |
10dd74ea | 462 | } |
76434f75 | 463 | |
db998890 ML |
464 | return 0; |
465 | } | |
466 | ||
467 | int amdgpu_uvd_suspend(struct amdgpu_device *adev) | |
468 | { | |
469 | if (amdgpu_ras_intr_triggered()) | |
76434f75 LM |
470 | DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n"); |
471 | ||
d38ceaf9 AD |
472 | return 0; |
473 | } | |
474 | ||
475 | int amdgpu_uvd_resume(struct amdgpu_device *adev) | |
476 | { | |
f10984a3 | 477 | unsigned int size; |
d38ceaf9 | 478 | void *ptr; |
f89f8c6b | 479 | int i, idx; |
d38ceaf9 | 480 | |
10dd74ea | 481 | for (i = 0; i < adev->uvd.num_uvd_inst; i++) { |
f1e582eb AD |
482 | if (adev->uvd.harvest_config & (1 << i)) |
483 | continue; | |
10dd74ea JZ |
484 | if (adev->uvd.inst[i].vcpu_bo == NULL) |
485 | return -EINVAL; | |
d38ceaf9 | 486 | |
10dd74ea JZ |
487 | size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo); |
488 | ptr = adev->uvd.inst[i].cpu_addr; | |
d38ceaf9 | 489 | |
10dd74ea | 490 | if (adev->uvd.inst[i].saved_bo != NULL) { |
c58a863b | 491 | if (drm_dev_enter(adev_to_drm(adev), &idx)) { |
f89f8c6b AG |
492 | memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); |
493 | drm_dev_exit(idx); | |
494 | } | |
c9533d1b | 495 | kvfree(adev->uvd.inst[i].saved_bo); |
10dd74ea JZ |
496 | adev->uvd.inst[i].saved_bo = NULL; |
497 | } else { | |
498 | const struct common_firmware_header *hdr; | |
f10984a3 | 499 | unsigned int offset; |
10dd74ea JZ |
500 | |
501 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; | |
502 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { | |
503 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); | |
c58a863b | 504 | if (drm_dev_enter(adev_to_drm(adev), &idx)) { |
f89f8c6b AG |
505 | memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, |
506 | le32_to_cpu(hdr->ucode_size_bytes)); | |
507 | drm_dev_exit(idx); | |
508 | } | |
10dd74ea JZ |
509 | size -= le32_to_cpu(hdr->ucode_size_bytes); |
510 | ptr += le32_to_cpu(hdr->ucode_size_bytes); | |
511 | } | |
512 | memset_io(ptr, 0, size); | |
513 | /* to restore uvd fence seq */ | |
514 | amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring); | |
09bfb891 | 515 | } |
d23be4e3 | 516 | } |
d38ceaf9 AD |
517 | return 0; |
518 | } | |
519 | ||
520 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | |
521 | { | |
5c675bf2 CK |
522 | struct amdgpu_ring *ring = &adev->uvd.inst[0].ring; |
523 | int i, r; | |
d38ceaf9 | 524 | |
5c675bf2 CK |
525 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
526 | uint32_t handle = atomic_read(&adev->uvd.handles[i]); | |
d38ceaf9 | 527 | |
5c675bf2 CK |
528 | if (handle != 0 && adev->uvd.filp[i] == filp) { |
529 | struct dma_fence *fence; | |
10dd74ea | 530 | |
5c675bf2 CK |
531 | r = amdgpu_uvd_get_destroy_msg(ring, handle, false, |
532 | &fence); | |
533 | if (r) { | |
534 | DRM_ERROR("Error destroying UVD %d!\n", r); | |
535 | continue; | |
10dd74ea | 536 | } |
5c675bf2 CK |
537 | |
538 | dma_fence_wait(fence, false); | |
539 | dma_fence_put(fence); | |
540 | ||
541 | adev->uvd.filp[i] = NULL; | |
542 | atomic_set(&adev->uvd.handles[i], 0); | |
d38ceaf9 AD |
543 | } |
544 | } | |
545 | } | |
546 | ||
765e7fbf | 547 | static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo) |
d38ceaf9 AD |
548 | { |
549 | int i; | |
f10984a3 | 550 | |
765e7fbf CK |
551 | for (i = 0; i < abo->placement.num_placement; ++i) { |
552 | abo->placements[i].fpfn = 0 >> PAGE_SHIFT; | |
553 | abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; | |
12f325bc CK |
554 | if (abo->placements[i].mem_type == TTM_PL_VRAM) |
555 | abo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS; | |
d38ceaf9 AD |
556 | } |
557 | } | |
558 | ||
80983e4d AD |
559 | static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx) |
560 | { | |
561 | uint32_t lo, hi; | |
562 | uint64_t addr; | |
563 | ||
cdc7893f CK |
564 | lo = amdgpu_ib_get_value(ctx->ib, ctx->data0); |
565 | hi = amdgpu_ib_get_value(ctx->ib, ctx->data1); | |
80983e4d AD |
566 | addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); |
567 | ||
568 | return addr; | |
569 | } | |
570 | ||
d38ceaf9 AD |
571 | /** |
572 | * amdgpu_uvd_cs_pass1 - first parsing round | |
573 | * | |
574 | * @ctx: UVD parser context | |
575 | * | |
576 | * Make sure UVD message and feedback buffers are in VRAM and | |
577 | * nobody is violating an 256MB boundary. | |
578 | */ | |
579 | static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) | |
580 | { | |
19be5570 | 581 | struct ttm_operation_ctx tctx = { false, false }; |
d38ceaf9 AD |
582 | struct amdgpu_bo_va_mapping *mapping; |
583 | struct amdgpu_bo *bo; | |
80983e4d AD |
584 | uint32_t cmd; |
585 | uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); | |
d38ceaf9 AD |
586 | int r = 0; |
587 | ||
9cca0b8e CK |
588 | r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping); |
589 | if (r) { | |
f10984a3 | 590 | DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr); |
9cca0b8e | 591 | return r; |
d38ceaf9 AD |
592 | } |
593 | ||
594 | if (!ctx->parser->adev->uvd.address_64_bit) { | |
595 | /* check if it's a message or feedback command */ | |
cdc7893f | 596 | cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx) >> 1; |
d38ceaf9 AD |
597 | if (cmd == 0x0 || cmd == 0x3) { |
598 | /* yes, force it into VRAM */ | |
599 | uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; | |
f10984a3 | 600 | |
c704ab18 | 601 | amdgpu_bo_placement_from_domain(bo, domain); |
d38ceaf9 AD |
602 | } |
603 | amdgpu_uvd_force_into_uvd_segment(bo); | |
604 | ||
19be5570 | 605 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx); |
d38ceaf9 AD |
606 | } |
607 | ||
608 | return r; | |
609 | } | |
610 | ||
611 | /** | |
612 | * amdgpu_uvd_cs_msg_decode - handle UVD decode message | |
613 | * | |
ce0e124a | 614 | * @adev: amdgpu_device pointer |
d38ceaf9 | 615 | * @msg: pointer to message structure |
05a7e1cf | 616 | * @buf_sizes: placeholder to put the different buffer lengths |
d38ceaf9 AD |
617 | * |
618 | * Peek into the decode message and calculate the necessary buffer sizes. | |
619 | */ | |
8e008dd7 | 620 | static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, |
f10984a3 | 621 | unsigned int buf_sizes[]) |
d38ceaf9 | 622 | { |
f10984a3 SS |
623 | unsigned int stream_type = msg[4]; |
624 | unsigned int width = msg[6]; | |
625 | unsigned int height = msg[7]; | |
626 | unsigned int dpb_size = msg[9]; | |
627 | unsigned int pitch = msg[28]; | |
628 | unsigned int level = msg[57]; | |
d38ceaf9 | 629 | |
f10984a3 SS |
630 | unsigned int width_in_mb = width / 16; |
631 | unsigned int height_in_mb = ALIGN(height / 16, 2); | |
632 | unsigned int fs_in_mb = width_in_mb * height_in_mb; | |
d38ceaf9 | 633 | |
f10984a3 SS |
634 | unsigned int image_size, tmp, min_dpb_size, num_dpb_buffer; |
635 | unsigned int min_ctx_size = ~0; | |
d38ceaf9 AD |
636 | |
637 | image_size = width * height; | |
638 | image_size += image_size / 2; | |
639 | image_size = ALIGN(image_size, 1024); | |
640 | ||
641 | switch (stream_type) { | |
642 | case 0: /* H264 */ | |
f10984a3 | 643 | switch (level) { |
d38ceaf9 AD |
644 | case 30: |
645 | num_dpb_buffer = 8100 / fs_in_mb; | |
646 | break; | |
647 | case 31: | |
648 | num_dpb_buffer = 18000 / fs_in_mb; | |
649 | break; | |
650 | case 32: | |
651 | num_dpb_buffer = 20480 / fs_in_mb; | |
652 | break; | |
653 | case 41: | |
654 | num_dpb_buffer = 32768 / fs_in_mb; | |
655 | break; | |
656 | case 42: | |
657 | num_dpb_buffer = 34816 / fs_in_mb; | |
658 | break; | |
659 | case 50: | |
660 | num_dpb_buffer = 110400 / fs_in_mb; | |
661 | break; | |
662 | case 51: | |
663 | num_dpb_buffer = 184320 / fs_in_mb; | |
664 | break; | |
665 | default: | |
666 | num_dpb_buffer = 184320 / fs_in_mb; | |
667 | break; | |
668 | } | |
669 | num_dpb_buffer++; | |
670 | if (num_dpb_buffer > 17) | |
671 | num_dpb_buffer = 17; | |
672 | ||
673 | /* reference picture buffer */ | |
674 | min_dpb_size = image_size * num_dpb_buffer; | |
675 | ||
676 | /* macroblock context buffer */ | |
677 | min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192; | |
678 | ||
679 | /* IT surface buffer */ | |
680 | min_dpb_size += width_in_mb * height_in_mb * 32; | |
681 | break; | |
682 | ||
683 | case 1: /* VC1 */ | |
684 | ||
685 | /* reference picture buffer */ | |
686 | min_dpb_size = image_size * 3; | |
687 | ||
688 | /* CONTEXT_BUFFER */ | |
689 | min_dpb_size += width_in_mb * height_in_mb * 128; | |
690 | ||
691 | /* IT surface buffer */ | |
692 | min_dpb_size += width_in_mb * 64; | |
693 | ||
694 | /* DB surface buffer */ | |
695 | min_dpb_size += width_in_mb * 128; | |
696 | ||
697 | /* BP */ | |
698 | tmp = max(width_in_mb, height_in_mb); | |
699 | min_dpb_size += ALIGN(tmp * 7 * 16, 64); | |
700 | break; | |
701 | ||
702 | case 3: /* MPEG2 */ | |
703 | ||
704 | /* reference picture buffer */ | |
705 | min_dpb_size = image_size * 3; | |
706 | break; | |
707 | ||
708 | case 4: /* MPEG4 */ | |
709 | ||
710 | /* reference picture buffer */ | |
711 | min_dpb_size = image_size * 3; | |
712 | ||
713 | /* CM */ | |
714 | min_dpb_size += width_in_mb * height_in_mb * 64; | |
715 | ||
716 | /* IT surface buffer */ | |
717 | min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); | |
718 | break; | |
719 | ||
8e008dd7 | 720 | case 7: /* H264 Perf */ |
f10984a3 | 721 | switch (level) { |
8e008dd7 SJ |
722 | case 30: |
723 | num_dpb_buffer = 8100 / fs_in_mb; | |
724 | break; | |
725 | case 31: | |
726 | num_dpb_buffer = 18000 / fs_in_mb; | |
727 | break; | |
728 | case 32: | |
729 | num_dpb_buffer = 20480 / fs_in_mb; | |
730 | break; | |
731 | case 41: | |
732 | num_dpb_buffer = 32768 / fs_in_mb; | |
733 | break; | |
734 | case 42: | |
735 | num_dpb_buffer = 34816 / fs_in_mb; | |
736 | break; | |
737 | case 50: | |
738 | num_dpb_buffer = 110400 / fs_in_mb; | |
739 | break; | |
740 | case 51: | |
741 | num_dpb_buffer = 184320 / fs_in_mb; | |
742 | break; | |
743 | default: | |
744 | num_dpb_buffer = 184320 / fs_in_mb; | |
745 | break; | |
746 | } | |
747 | num_dpb_buffer++; | |
748 | if (num_dpb_buffer > 17) | |
749 | num_dpb_buffer = 17; | |
750 | ||
751 | /* reference picture buffer */ | |
752 | min_dpb_size = image_size * num_dpb_buffer; | |
753 | ||
f10984a3 | 754 | if (!adev->uvd.use_ctx_buf) { |
8e008dd7 SJ |
755 | /* macroblock context buffer */ |
756 | min_dpb_size += | |
757 | width_in_mb * height_in_mb * num_dpb_buffer * 192; | |
758 | ||
759 | /* IT surface buffer */ | |
760 | min_dpb_size += width_in_mb * height_in_mb * 32; | |
761 | } else { | |
762 | /* macroblock context buffer */ | |
763 | min_ctx_size = | |
764 | width_in_mb * height_in_mb * num_dpb_buffer * 192; | |
765 | } | |
766 | break; | |
767 | ||
d0b83d41 LL |
768 | case 8: /* MJPEG */ |
769 | min_dpb_size = 0; | |
770 | break; | |
771 | ||
86fa0bdc CK |
772 | case 16: /* H265 */ |
773 | image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2; | |
774 | image_size = ALIGN(image_size, 256); | |
775 | ||
776 | num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; | |
777 | min_dpb_size = image_size * num_dpb_buffer; | |
8c8bac59 BZ |
778 | min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16) |
779 | * 16 * num_dpb_buffer + 52 * 1024; | |
86fa0bdc CK |
780 | break; |
781 | ||
d38ceaf9 AD |
782 | default: |
783 | DRM_ERROR("UVD codec not handled %d!\n", stream_type); | |
784 | return -EINVAL; | |
785 | } | |
786 | ||
787 | if (width > pitch) { | |
788 | DRM_ERROR("Invalid UVD decoding target pitch!\n"); | |
789 | return -EINVAL; | |
790 | } | |
791 | ||
792 | if (dpb_size < min_dpb_size) { | |
793 | DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n", | |
794 | dpb_size, min_dpb_size); | |
795 | return -EINVAL; | |
796 | } | |
797 | ||
798 | buf_sizes[0x1] = dpb_size; | |
799 | buf_sizes[0x2] = image_size; | |
8c8bac59 | 800 | buf_sizes[0x4] = min_ctx_size; |
8ca606de GS |
801 | /* store image width to adjust nb memory pstate */ |
802 | adev->uvd.decode_image_width = width; | |
d38ceaf9 AD |
803 | return 0; |
804 | } | |
805 | ||
806 | /** | |
807 | * amdgpu_uvd_cs_msg - handle UVD message | |
808 | * | |
809 | * @ctx: UVD parser context | |
810 | * @bo: buffer object containing the message | |
811 | * @offset: offset into the buffer object | |
812 | * | |
813 | * Peek into the UVD message and extract the session id. | |
814 | * Make sure that we don't open up to many sessions. | |
815 | */ | |
816 | static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |
f10984a3 | 817 | struct amdgpu_bo *bo, unsigned int offset) |
d38ceaf9 AD |
818 | { |
819 | struct amdgpu_device *adev = ctx->parser->adev; | |
820 | int32_t *msg, msg_type, handle; | |
d38ceaf9 | 821 | void *ptr; |
4127a59e CK |
822 | long r; |
823 | int i; | |
d38ceaf9 AD |
824 | |
825 | if (offset & 0x3F) { | |
5c675bf2 | 826 | DRM_ERROR("UVD messages must be 64 byte aligned!\n"); |
d38ceaf9 AD |
827 | return -EINVAL; |
828 | } | |
829 | ||
d38ceaf9 AD |
830 | r = amdgpu_bo_kmap(bo, &ptr); |
831 | if (r) { | |
5c675bf2 | 832 | DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r); |
d38ceaf9 AD |
833 | return r; |
834 | } | |
835 | ||
836 | msg = ptr + offset; | |
837 | ||
838 | msg_type = msg[1]; | |
839 | handle = msg[2]; | |
840 | ||
841 | if (handle == 0) { | |
db7b8154 | 842 | amdgpu_bo_kunmap(bo); |
5c675bf2 | 843 | DRM_ERROR("Invalid UVD handle!\n"); |
d38ceaf9 AD |
844 | return -EINVAL; |
845 | } | |
846 | ||
5146419e LL |
847 | switch (msg_type) { |
848 | case 0: | |
849 | /* it's a create msg, calc image size (width * height) */ | |
850 | amdgpu_bo_kunmap(bo); | |
851 | ||
852 | /* try to alloc a new handle */ | |
c0365541 | 853 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
5c675bf2 CK |
854 | if (atomic_read(&adev->uvd.handles[i]) == handle) { |
855 | DRM_ERROR(")Handle 0x%x already in use!\n", | |
856 | handle); | |
5146419e LL |
857 | return -EINVAL; |
858 | } | |
859 | ||
5c675bf2 CK |
860 | if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { |
861 | adev->uvd.filp[i] = ctx->parser->filp; | |
5146419e LL |
862 | return 0; |
863 | } | |
864 | } | |
865 | ||
5c675bf2 | 866 | DRM_ERROR("No more free UVD handles!\n"); |
7129d3ae | 867 | return -ENOSPC; |
5146419e LL |
868 | |
869 | case 1: | |
d38ceaf9 | 870 | /* it's a decode msg, calc buffer sizes */ |
8e008dd7 | 871 | r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes); |
d38ceaf9 AD |
872 | amdgpu_bo_kunmap(bo); |
873 | if (r) | |
874 | return r; | |
875 | ||
5146419e | 876 | /* validate the handle */ |
c0365541 | 877 | for (i = 0; i < adev->uvd.max_handles; ++i) { |
5c675bf2 CK |
878 | if (atomic_read(&adev->uvd.handles[i]) == handle) { |
879 | if (adev->uvd.filp[i] != ctx->parser->filp) { | |
880 | DRM_ERROR("UVD handle collision detected!\n"); | |
5146419e LL |
881 | return -EINVAL; |
882 | } | |
883 | return 0; | |
884 | } | |
885 | } | |
886 | ||
5c675bf2 | 887 | DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); |
5146419e LL |
888 | return -ENOENT; |
889 | ||
890 | case 2: | |
d38ceaf9 | 891 | /* it's a destroy msg, free the handle */ |
c0365541 | 892 | for (i = 0; i < adev->uvd.max_handles; ++i) |
5c675bf2 | 893 | atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); |
d38ceaf9 AD |
894 | amdgpu_bo_kunmap(bo); |
895 | return 0; | |
d38ceaf9 | 896 | |
5146419e | 897 | default: |
5c675bf2 | 898 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); |
d38ceaf9 | 899 | } |
66c46621 | 900 | |
db7b8154 | 901 | amdgpu_bo_kunmap(bo); |
d38ceaf9 AD |
902 | return -EINVAL; |
903 | } | |
904 | ||
905 | /** | |
906 | * amdgpu_uvd_cs_pass2 - second parsing round | |
907 | * | |
908 | * @ctx: UVD parser context | |
909 | * | |
910 | * Patch buffer addresses, make sure buffer sizes are correct. | |
911 | */ | |
912 | static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) | |
913 | { | |
914 | struct amdgpu_bo_va_mapping *mapping; | |
915 | struct amdgpu_bo *bo; | |
80983e4d | 916 | uint32_t cmd; |
d38ceaf9 | 917 | uint64_t start, end; |
80983e4d | 918 | uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); |
d38ceaf9 AD |
919 | int r; |
920 | ||
9cca0b8e CK |
921 | r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping); |
922 | if (r) { | |
f10984a3 | 923 | DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr); |
9cca0b8e | 924 | return r; |
042eb910 | 925 | } |
d38ceaf9 AD |
926 | |
927 | start = amdgpu_bo_gpu_offset(bo); | |
928 | ||
a9f87f64 | 929 | end = (mapping->last + 1 - mapping->start); |
d38ceaf9 AD |
930 | end = end * AMDGPU_GPU_PAGE_SIZE + start; |
931 | ||
a9f87f64 | 932 | addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE; |
d38ceaf9 AD |
933 | start += addr; |
934 | ||
cdc7893f CK |
935 | amdgpu_ib_set_value(ctx->ib, ctx->data0, lower_32_bits(start)); |
936 | amdgpu_ib_set_value(ctx->ib, ctx->data1, upper_32_bits(start)); | |
d38ceaf9 | 937 | |
cdc7893f | 938 | cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx) >> 1; |
d38ceaf9 AD |
939 | if (cmd < 0x4) { |
940 | if ((end - start) < ctx->buf_sizes[cmd]) { | |
941 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, | |
f10984a3 | 942 | (unsigned int)(end - start), |
d38ceaf9 AD |
943 | ctx->buf_sizes[cmd]); |
944 | return -EINVAL; | |
945 | } | |
946 | ||
8c8bac59 BZ |
947 | } else if (cmd == 0x206) { |
948 | if ((end - start) < ctx->buf_sizes[4]) { | |
949 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, | |
f10984a3 | 950 | (unsigned int)(end - start), |
8c8bac59 BZ |
951 | ctx->buf_sizes[4]); |
952 | return -EINVAL; | |
953 | } | |
d38ceaf9 AD |
954 | } else if ((cmd != 0x100) && (cmd != 0x204)) { |
955 | DRM_ERROR("invalid UVD command %X!\n", cmd); | |
956 | return -EINVAL; | |
957 | } | |
958 | ||
959 | if (!ctx->parser->adev->uvd.address_64_bit) { | |
960 | if ((start >> 28) != ((end - 1) >> 28)) { | |
f10984a3 | 961 | DRM_ERROR("reloc %llx-%llx crossing 256MB boundary!\n", |
d38ceaf9 AD |
962 | start, end); |
963 | return -EINVAL; | |
964 | } | |
965 | ||
966 | if ((cmd == 0 || cmd == 0x3) && | |
2bb795f5 | 967 | (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) { |
f10984a3 | 968 | DRM_ERROR("msg/fb buffer %llx-%llx out of 256MB segment!\n", |
d38ceaf9 AD |
969 | start, end); |
970 | return -EINVAL; | |
971 | } | |
972 | } | |
973 | ||
974 | if (cmd == 0) { | |
975 | ctx->has_msg_cmd = true; | |
976 | r = amdgpu_uvd_cs_msg(ctx, bo, addr); | |
977 | if (r) | |
978 | return r; | |
979 | } else if (!ctx->has_msg_cmd) { | |
980 | DRM_ERROR("Message needed before other commands are send!\n"); | |
981 | return -EINVAL; | |
982 | } | |
983 | ||
984 | return 0; | |
985 | } | |
986 | ||
987 | /** | |
988 | * amdgpu_uvd_cs_reg - parse register writes | |
989 | * | |
990 | * @ctx: UVD parser context | |
991 | * @cb: callback function | |
992 | * | |
993 | * Parse the register writes, call cb on each complete command. | |
994 | */ | |
995 | static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, | |
996 | int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) | |
997 | { | |
d38ceaf9 AD |
998 | int i, r; |
999 | ||
1000 | ctx->idx++; | |
1001 | for (i = 0; i <= ctx->count; ++i) { | |
f10984a3 | 1002 | unsigned int reg = ctx->reg + i; |
d38ceaf9 | 1003 | |
cdc7893f | 1004 | if (ctx->idx >= ctx->ib->length_dw) { |
d38ceaf9 AD |
1005 | DRM_ERROR("Register command after end of CS!\n"); |
1006 | return -EINVAL; | |
1007 | } | |
1008 | ||
1009 | switch (reg) { | |
1010 | case mmUVD_GPCOM_VCPU_DATA0: | |
1011 | ctx->data0 = ctx->idx; | |
1012 | break; | |
1013 | case mmUVD_GPCOM_VCPU_DATA1: | |
1014 | ctx->data1 = ctx->idx; | |
1015 | break; | |
1016 | case mmUVD_GPCOM_VCPU_CMD: | |
1017 | r = cb(ctx); | |
1018 | if (r) | |
1019 | return r; | |
1020 | break; | |
1021 | case mmUVD_ENGINE_CNTL: | |
8dd31d74 | 1022 | case mmUVD_NO_OP: |
d38ceaf9 AD |
1023 | break; |
1024 | default: | |
1025 | DRM_ERROR("Invalid reg 0x%X!\n", reg); | |
1026 | return -EINVAL; | |
1027 | } | |
1028 | ctx->idx++; | |
1029 | } | |
1030 | return 0; | |
1031 | } | |
1032 | ||
1033 | /** | |
1034 | * amdgpu_uvd_cs_packets - parse UVD packets | |
1035 | * | |
1036 | * @ctx: UVD parser context | |
1037 | * @cb: callback function | |
1038 | * | |
1039 | * Parse the command stream packets. | |
1040 | */ | |
1041 | static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx, | |
1042 | int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) | |
1043 | { | |
d38ceaf9 AD |
1044 | int r; |
1045 | ||
cdc7893f CK |
1046 | for (ctx->idx = 0 ; ctx->idx < ctx->ib->length_dw; ) { |
1047 | uint32_t cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx); | |
f10984a3 SS |
1048 | unsigned int type = CP_PACKET_GET_TYPE(cmd); |
1049 | ||
d38ceaf9 AD |
1050 | switch (type) { |
1051 | case PACKET_TYPE0: | |
1052 | ctx->reg = CP_PACKET0_GET_REG(cmd); | |
1053 | ctx->count = CP_PACKET_GET_COUNT(cmd); | |
1054 | r = amdgpu_uvd_cs_reg(ctx, cb); | |
1055 | if (r) | |
1056 | return r; | |
1057 | break; | |
1058 | case PACKET_TYPE2: | |
1059 | ++ctx->idx; | |
1060 | break; | |
1061 | default: | |
1062 | DRM_ERROR("Unknown packet type %d !\n", type); | |
1063 | return -EINVAL; | |
1064 | } | |
1065 | } | |
1066 | return 0; | |
1067 | } | |
1068 | ||
1069 | /** | |
1070 | * amdgpu_uvd_ring_parse_cs - UVD command submission parser | |
1071 | * | |
1072 | * @parser: Command submission parser context | |
cdc7893f CK |
1073 | * @job: the job to parse |
1074 | * @ib: the IB to patch | |
d38ceaf9 AD |
1075 | * |
1076 | * Parse the command stream, patch in addresses as necessary. | |
1077 | */ | |
cdc7893f CK |
1078 | int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, |
1079 | struct amdgpu_job *job, | |
1080 | struct amdgpu_ib *ib) | |
d38ceaf9 AD |
1081 | { |
1082 | struct amdgpu_uvd_cs_ctx ctx = {}; | |
f10984a3 | 1083 | unsigned int buf_sizes[] = { |
d38ceaf9 | 1084 | [0x00000000] = 2048, |
8c8bac59 BZ |
1085 | [0x00000001] = 0xFFFFFFFF, |
1086 | [0x00000002] = 0xFFFFFFFF, | |
d38ceaf9 | 1087 | [0x00000003] = 2048, |
8c8bac59 | 1088 | [0x00000004] = 0xFFFFFFFF, |
d38ceaf9 | 1089 | }; |
d38ceaf9 AD |
1090 | int r; |
1091 | ||
cdc7893f | 1092 | job->vm = NULL; |
45088efc | 1093 | |
d38ceaf9 AD |
1094 | if (ib->length_dw % 16) { |
1095 | DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", | |
1096 | ib->length_dw); | |
1097 | return -EINVAL; | |
1098 | } | |
1099 | ||
1100 | ctx.parser = parser; | |
1101 | ctx.buf_sizes = buf_sizes; | |
cdc7893f | 1102 | ctx.ib = ib; |
d38ceaf9 | 1103 | |
042eb910 AD |
1104 | /* first round only required on chips without UVD 64 bit address support */ |
1105 | if (!parser->adev->uvd.address_64_bit) { | |
1106 | /* first round, make sure the buffers are actually in the UVD segment */ | |
1107 | r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1); | |
1108 | if (r) | |
1109 | return r; | |
1110 | } | |
d38ceaf9 AD |
1111 | |
1112 | /* second round, patch buffer addresses into the command stream */ | |
1113 | r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2); | |
1114 | if (r) | |
1115 | return r; | |
1116 | ||
1117 | if (!ctx.has_msg_cmd) { | |
1118 | DRM_ERROR("UVD-IBs need a msg command!\n"); | |
1119 | return -EINVAL; | |
1120 | } | |
1121 | ||
d38ceaf9 AD |
1122 | return 0; |
1123 | } | |
1124 | ||
d7af97db | 1125 | static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, |
f54d1867 | 1126 | bool direct, struct dma_fence **fence) |
d38ceaf9 | 1127 | { |
4ab91cfb CK |
1128 | struct amdgpu_device *adev = ring->adev; |
1129 | struct dma_fence *f = NULL; | |
b059cba5 | 1130 | uint32_t offset, data[4]; |
d71518b5 CK |
1131 | struct amdgpu_job *job; |
1132 | struct amdgpu_ib *ib; | |
4ab91cfb | 1133 | uint64_t addr; |
c1aafd63 | 1134 | int i, r; |
d38ceaf9 | 1135 | |
f7d66fb2 CK |
1136 | r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity, |
1137 | AMDGPU_FENCE_OWNER_UNDEFINED, | |
1138 | 64, direct ? AMDGPU_IB_POOL_DIRECT : | |
9ecefb19 | 1139 | AMDGPU_IB_POOL_DELAYED, &job); |
7b5ec431 | 1140 | if (r) |
68331d7c | 1141 | return r; |
d38ceaf9 | 1142 | |
b059cba5 CK |
1143 | if (adev->asic_type >= CHIP_VEGA10) |
1144 | offset = adev->reg_offset[UVD_HWIP][ring->me][1]; | |
1145 | else | |
1146 | offset = UVD_BASE_SI; | |
09bfb891 | 1147 | |
b059cba5 CK |
1148 | data[0] = PACKET0(offset + UVD_GPCOM_VCPU_DATA0, 0); |
1149 | data[1] = PACKET0(offset + UVD_GPCOM_VCPU_DATA1, 0); | |
1150 | data[2] = PACKET0(offset + UVD_GPCOM_VCPU_CMD, 0); | |
1151 | data[3] = PACKET0(offset + UVD_NO_OP, 0); | |
9181dba6 | 1152 | |
d71518b5 | 1153 | ib = &job->ibs[0]; |
d38ceaf9 | 1154 | addr = amdgpu_bo_gpu_offset(bo); |
09bfb891 | 1155 | ib->ptr[0] = data[0]; |
7b5ec431 | 1156 | ib->ptr[1] = addr; |
09bfb891 | 1157 | ib->ptr[2] = data[1]; |
7b5ec431 | 1158 | ib->ptr[3] = addr >> 32; |
09bfb891 | 1159 | ib->ptr[4] = data[2]; |
7b5ec431 | 1160 | ib->ptr[5] = 0; |
c8b4f288 | 1161 | for (i = 6; i < 16; i += 2) { |
09bfb891 | 1162 | ib->ptr[i] = data[3]; |
c8b4f288 AD |
1163 | ib->ptr[i+1] = 0; |
1164 | } | |
7b5ec431 | 1165 | ib->length_dw = 16; |
d38ceaf9 | 1166 | |
d7af97db | 1167 | if (direct) { |
ee913fd9 | 1168 | r = amdgpu_job_submit_direct(job, ring, &f); |
d7af97db CK |
1169 | if (r) |
1170 | goto err_free; | |
d7af97db | 1171 | } else { |
46e0270c CK |
1172 | r = drm_sched_job_add_resv_dependencies(&job->base, |
1173 | bo->tbo.base.resv, | |
1174 | DMA_RESV_USAGE_KERNEL); | |
4ab91cfb CK |
1175 | if (r) |
1176 | goto err_free; | |
1177 | ||
f7d66fb2 | 1178 | f = amdgpu_job_submit(job); |
d7af97db | 1179 | } |
d38ceaf9 | 1180 | |
68331d7c | 1181 | amdgpu_bo_reserve(bo, true); |
4ab91cfb CK |
1182 | amdgpu_bo_fence(bo, f, false); |
1183 | amdgpu_bo_unreserve(bo); | |
d38ceaf9 | 1184 | |
7b5ec431 | 1185 | if (fence) |
f54d1867 | 1186 | *fence = dma_fence_get(f); |
f54d1867 | 1187 | dma_fence_put(f); |
7b5ec431 | 1188 | |
7b5ec431 | 1189 | return 0; |
d71518b5 CK |
1190 | |
1191 | err_free: | |
1192 | amdgpu_job_free(job); | |
d38ceaf9 AD |
1193 | return r; |
1194 | } | |
1195 | ||
1196 | /* multiple fence commands without any stream commands in between can | |
f10984a3 SS |
1197 | * crash the vcpu so just try to emmit a dummy create/destroy msg to |
1198 | * avoid this | |
1199 | */ | |
d38ceaf9 | 1200 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
f54d1867 | 1201 | struct dma_fence **fence) |
d38ceaf9 AD |
1202 | { |
1203 | struct amdgpu_device *adev = ring->adev; | |
68331d7c | 1204 | struct amdgpu_bo *bo = adev->uvd.ib_bo; |
d38ceaf9 | 1205 | uint32_t *msg; |
68331d7c | 1206 | int i; |
d38ceaf9 | 1207 | |
68331d7c | 1208 | msg = amdgpu_bo_kptr(bo); |
d38ceaf9 AD |
1209 | /* stitch together an UVD create msg */ |
1210 | msg[0] = cpu_to_le32(0x00000de4); | |
1211 | msg[1] = cpu_to_le32(0x00000000); | |
1212 | msg[2] = cpu_to_le32(handle); | |
1213 | msg[3] = cpu_to_le32(0x00000000); | |
1214 | msg[4] = cpu_to_le32(0x00000000); | |
1215 | msg[5] = cpu_to_le32(0x00000000); | |
1216 | msg[6] = cpu_to_le32(0x00000000); | |
1217 | msg[7] = cpu_to_le32(0x00000780); | |
1218 | msg[8] = cpu_to_le32(0x00000440); | |
1219 | msg[9] = cpu_to_le32(0x00000000); | |
1220 | msg[10] = cpu_to_le32(0x01b37000); | |
1221 | for (i = 11; i < 1024; ++i) | |
1222 | msg[i] = cpu_to_le32(0x0); | |
1223 | ||
d7af97db | 1224 | return amdgpu_uvd_send_msg(ring, bo, true, fence); |
68331d7c | 1225 | |
d38ceaf9 AD |
1226 | } |
1227 | ||
1228 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |
f54d1867 | 1229 | bool direct, struct dma_fence **fence) |
d38ceaf9 AD |
1230 | { |
1231 | struct amdgpu_device *adev = ring->adev; | |
4ab91cfb | 1232 | struct amdgpu_bo *bo = NULL; |
d38ceaf9 AD |
1233 | uint32_t *msg; |
1234 | int r, i; | |
1235 | ||
68331d7c | 1236 | if (direct) { |
1237 | bo = adev->uvd.ib_bo; | |
1238 | } else { | |
1239 | r = amdgpu_uvd_create_msg_bo_helper(adev, 4096, &bo); | |
1240 | if (r) | |
1241 | return r; | |
1242 | } | |
d38ceaf9 | 1243 | |
68331d7c | 1244 | msg = amdgpu_bo_kptr(bo); |
d38ceaf9 AD |
1245 | /* stitch together an UVD destroy msg */ |
1246 | msg[0] = cpu_to_le32(0x00000de4); | |
1247 | msg[1] = cpu_to_le32(0x00000002); | |
1248 | msg[2] = cpu_to_le32(handle); | |
1249 | msg[3] = cpu_to_le32(0x00000000); | |
1250 | for (i = 4; i < 1024; ++i) | |
1251 | msg[i] = cpu_to_le32(0x0); | |
1252 | ||
68331d7c | 1253 | r = amdgpu_uvd_send_msg(ring, bo, direct, fence); |
1254 | ||
1255 | if (!direct) | |
1256 | amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); | |
1257 | ||
1258 | return r; | |
d38ceaf9 AD |
1259 | } |
1260 | ||
1261 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work) | |
1262 | { | |
1263 | struct amdgpu_device *adev = | |
5c53d19b | 1264 | container_of(work, struct amdgpu_device, uvd.idle_work.work); |
f10984a3 | 1265 | unsigned int fences = 0, i, j; |
6f0fd919 AD |
1266 | |
1267 | for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { | |
f1e582eb AD |
1268 | if (adev->uvd.harvest_config & (1 << i)) |
1269 | continue; | |
6f0fd919 | 1270 | fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); |
f10984a3 | 1271 | for (j = 0; j < adev->uvd.num_enc_rings; ++j) |
4bd2c5dd | 1272 | fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); |
6f0fd919 | 1273 | } |
d38ceaf9 | 1274 | |
713c0021 | 1275 | if (fences == 0) { |
d38ceaf9 AD |
1276 | if (adev->pm.dpm_enabled) { |
1277 | amdgpu_dpm_enable_uvd(adev, false); | |
1278 | } else { | |
1279 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); | |
e38ca2b3 | 1280 | /* shutdown the UVD block */ |
2990a1fc AD |
1281 | amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, |
1282 | AMD_PG_STATE_GATE); | |
1283 | amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | |
1284 | AMD_CG_STATE_GATE); | |
d38ceaf9 AD |
1285 | } |
1286 | } else { | |
5c53d19b | 1287 | schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); |
d38ceaf9 AD |
1288 | } |
1289 | } | |
1290 | ||
c4120d55 | 1291 | void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) |
d38ceaf9 | 1292 | { |
c4120d55 | 1293 | struct amdgpu_device *adev = ring->adev; |
14a8032a | 1294 | bool set_clocks; |
d38ceaf9 | 1295 | |
d9af2259 XY |
1296 | if (amdgpu_sriov_vf(adev)) |
1297 | return; | |
1298 | ||
5c53d19b | 1299 | set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); |
d38ceaf9 AD |
1300 | if (set_clocks) { |
1301 | if (adev->pm.dpm_enabled) { | |
1302 | amdgpu_dpm_enable_uvd(adev, true); | |
1303 | } else { | |
1304 | amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); | |
2990a1fc AD |
1305 | amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, |
1306 | AMD_CG_STATE_UNGATE); | |
1307 | amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | |
1308 | AMD_PG_STATE_UNGATE); | |
d38ceaf9 AD |
1309 | } |
1310 | } | |
1311 | } | |
c4120d55 CK |
1312 | |
1313 | void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) | |
1314 | { | |
14a8032a | 1315 | if (!amdgpu_sriov_vf(ring->adev)) |
5c53d19b | 1316 | schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); |
c4120d55 | 1317 | } |
8de190c9 CK |
1318 | |
1319 | /** | |
1320 | * amdgpu_uvd_ring_test_ib - test ib execution | |
1321 | * | |
1322 | * @ring: amdgpu_ring pointer | |
ce0e124a | 1323 | * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT |
8de190c9 CK |
1324 | * |
1325 | * Test if we can successfully execute an IB | |
1326 | */ | |
bbec97aa | 1327 | int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
8de190c9 | 1328 | { |
f54d1867 | 1329 | struct dma_fence *fence; |
bbec97aa | 1330 | long r; |
8de190c9 | 1331 | |
0a226780 | 1332 | r = amdgpu_uvd_get_create_msg(ring, 1, &fence); |
98079389 | 1333 | if (r) |
8de190c9 | 1334 | goto error; |
8de190c9 | 1335 | |
0a226780 | 1336 | r = dma_fence_wait_timeout(fence, false, timeout); |
1337 | dma_fence_put(fence); | |
1338 | if (r == 0) | |
1339 | r = -ETIMEDOUT; | |
1340 | if (r < 0) | |
1341 | goto error; | |
1342 | ||
8de190c9 | 1343 | r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); |
98079389 | 1344 | if (r) |
8de190c9 | 1345 | goto error; |
8de190c9 | 1346 | |
f54d1867 | 1347 | r = dma_fence_wait_timeout(fence, false, timeout); |
98079389 | 1348 | if (r == 0) |
bbec97aa | 1349 | r = -ETIMEDOUT; |
98079389 | 1350 | else if (r > 0) |
bbec97aa | 1351 | r = 0; |
bbec97aa | 1352 | |
f54d1867 | 1353 | dma_fence_put(fence); |
c2a4c5b7 JC |
1354 | |
1355 | error: | |
8de190c9 CK |
1356 | return r; |
1357 | } | |
44879b62 AN |
1358 | |
1359 | /** | |
1360 | * amdgpu_uvd_used_handles - returns used UVD handles | |
1361 | * | |
1362 | * @adev: amdgpu_device pointer | |
1363 | * | |
1364 | * Returns the number of UVD handles in use | |
1365 | */ | |
1366 | uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) | |
1367 | { | |
f10984a3 | 1368 | unsigned int i; |
44879b62 AN |
1369 | uint32_t used_handles = 0; |
1370 | ||
1371 | for (i = 0; i < adev->uvd.max_handles; ++i) { | |
1372 | /* | |
1373 | * Handles can be freed in any order, and not | |
1374 | * necessarily linear. So we need to count | |
1375 | * all non-zero handles. | |
1376 | */ | |
5c675bf2 | 1377 | if (atomic_read(&adev->uvd.handles[i])) |
44879b62 AN |
1378 | used_handles++; |
1379 | } | |
1380 | ||
1381 | return used_handles; | |
1382 | } |