Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
0875dc9e | 28 | #include <linux/kthread.h> |
d38ceaf9 AD |
29 | #include <linux/console.h> |
30 | #include <linux/slab.h> | |
31 | #include <linux/debugfs.h> | |
32 | #include <drm/drmP.h> | |
33 | #include <drm/drm_crtc_helper.h> | |
4562236b | 34 | #include <drm/drm_atomic_helper.h> |
d38ceaf9 AD |
35 | #include <drm/amdgpu_drm.h> |
36 | #include <linux/vgaarb.h> | |
37 | #include <linux/vga_switcheroo.h> | |
38 | #include <linux/efi.h> | |
39 | #include "amdgpu.h" | |
f4b373f4 | 40 | #include "amdgpu_trace.h" |
d38ceaf9 AD |
41 | #include "amdgpu_i2c.h" |
42 | #include "atom.h" | |
43 | #include "amdgpu_atombios.h" | |
a5bde2f9 | 44 | #include "amdgpu_atomfirmware.h" |
d0dd7f0c | 45 | #include "amd_pcie.h" |
33f34802 KW |
46 | #ifdef CONFIG_DRM_AMDGPU_SI |
47 | #include "si.h" | |
48 | #endif | |
a2e73f56 AD |
49 | #ifdef CONFIG_DRM_AMDGPU_CIK |
50 | #include "cik.h" | |
51 | #endif | |
aaa36a97 | 52 | #include "vi.h" |
460826e6 | 53 | #include "soc15.h" |
d38ceaf9 | 54 | #include "bif/bif_4_1_d.h" |
9accf2fd | 55 | #include <linux/pci.h> |
bec86378 | 56 | #include <linux/firmware.h> |
89041940 | 57 | #include "amdgpu_vf_error.h" |
d38ceaf9 | 58 | |
ba997709 | 59 | #include "amdgpu_amdkfd.h" |
d2f52ac8 | 60 | #include "amdgpu_pm.h" |
d38ceaf9 | 61 | |
e2a75f88 | 62 | MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); |
2d2e5e7e | 63 | MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); |
e2a75f88 | 64 | |
2dc80b00 S |
65 | #define AMDGPU_RESUME_MS 2000 |
66 | ||
d38ceaf9 AD |
67 | static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); |
68 | static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); | |
763efb6c | 69 | static int amdgpu_debugfs_init(struct amdgpu_device *adev); |
d38ceaf9 AD |
70 | |
71 | static const char *amdgpu_asic_name[] = { | |
da69c161 KW |
72 | "TAHITI", |
73 | "PITCAIRN", | |
74 | "VERDE", | |
75 | "OLAND", | |
76 | "HAINAN", | |
d38ceaf9 AD |
77 | "BONAIRE", |
78 | "KAVERI", | |
79 | "KABINI", | |
80 | "HAWAII", | |
81 | "MULLINS", | |
82 | "TOPAZ", | |
83 | "TONGA", | |
48299f95 | 84 | "FIJI", |
d38ceaf9 | 85 | "CARRIZO", |
139f4917 | 86 | "STONEY", |
2cc0c0b5 FC |
87 | "POLARIS10", |
88 | "POLARIS11", | |
c4642a47 | 89 | "POLARIS12", |
d4196f01 | 90 | "VEGA10", |
2ca8a5d2 | 91 | "RAVEN", |
d38ceaf9 AD |
92 | "LAST", |
93 | }; | |
94 | ||
95 | bool amdgpu_device_is_px(struct drm_device *dev) | |
96 | { | |
97 | struct amdgpu_device *adev = dev->dev_private; | |
98 | ||
2f7d10b3 | 99 | if (adev->flags & AMD_IS_PX) |
d38ceaf9 AD |
100 | return true; |
101 | return false; | |
102 | } | |
103 | ||
104 | /* | |
105 | * MMIO register access helper functions. | |
106 | */ | |
107 | uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, | |
15d72fd7 | 108 | uint32_t acc_flags) |
d38ceaf9 | 109 | { |
f4b373f4 TSD |
110 | uint32_t ret; |
111 | ||
43ca8efa | 112 | if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) |
bc992ba5 | 113 | return amdgpu_virt_kiq_rreg(adev, reg); |
bc992ba5 | 114 | |
15d72fd7 | 115 | if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) |
f4b373f4 | 116 | ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); |
d38ceaf9 AD |
117 | else { |
118 | unsigned long flags; | |
d38ceaf9 AD |
119 | |
120 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); | |
121 | writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); | |
122 | ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); | |
123 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); | |
d38ceaf9 | 124 | } |
f4b373f4 TSD |
125 | trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret); |
126 | return ret; | |
d38ceaf9 AD |
127 | } |
128 | ||
129 | void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, | |
15d72fd7 | 130 | uint32_t acc_flags) |
d38ceaf9 | 131 | { |
f4b373f4 | 132 | trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); |
4e99a44e | 133 | |
47ed4e1c KW |
134 | if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { |
135 | adev->last_mm_index = v; | |
136 | } | |
137 | ||
43ca8efa | 138 | if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) |
bc992ba5 | 139 | return amdgpu_virt_kiq_wreg(adev, reg, v); |
bc992ba5 | 140 | |
15d72fd7 | 141 | if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) |
d38ceaf9 AD |
142 | writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); |
143 | else { | |
144 | unsigned long flags; | |
145 | ||
146 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); | |
147 | writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); | |
148 | writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); | |
149 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); | |
150 | } | |
47ed4e1c KW |
151 | |
152 | if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { | |
153 | udelay(500); | |
154 | } | |
d38ceaf9 AD |
155 | } |
156 | ||
157 | u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) | |
158 | { | |
159 | if ((reg * 4) < adev->rio_mem_size) | |
160 | return ioread32(adev->rio_mem + (reg * 4)); | |
161 | else { | |
162 | iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); | |
163 | return ioread32(adev->rio_mem + (mmMM_DATA * 4)); | |
164 | } | |
165 | } | |
166 | ||
167 | void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |
168 | { | |
47ed4e1c KW |
169 | if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { |
170 | adev->last_mm_index = v; | |
171 | } | |
d38ceaf9 AD |
172 | |
173 | if ((reg * 4) < adev->rio_mem_size) | |
174 | iowrite32(v, adev->rio_mem + (reg * 4)); | |
175 | else { | |
176 | iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); | |
177 | iowrite32(v, adev->rio_mem + (mmMM_DATA * 4)); | |
178 | } | |
47ed4e1c KW |
179 | |
180 | if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { | |
181 | udelay(500); | |
182 | } | |
d38ceaf9 AD |
183 | } |
184 | ||
185 | /** | |
186 | * amdgpu_mm_rdoorbell - read a doorbell dword | |
187 | * | |
188 | * @adev: amdgpu_device pointer | |
189 | * @index: doorbell index | |
190 | * | |
191 | * Returns the value in the doorbell aperture at the | |
192 | * requested doorbell index (CIK). | |
193 | */ | |
194 | u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) | |
195 | { | |
196 | if (index < adev->doorbell.num_doorbells) { | |
197 | return readl(adev->doorbell.ptr + index); | |
198 | } else { | |
199 | DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); | |
200 | return 0; | |
201 | } | |
202 | } | |
203 | ||
204 | /** | |
205 | * amdgpu_mm_wdoorbell - write a doorbell dword | |
206 | * | |
207 | * @adev: amdgpu_device pointer | |
208 | * @index: doorbell index | |
209 | * @v: value to write | |
210 | * | |
211 | * Writes @v to the doorbell aperture at the | |
212 | * requested doorbell index (CIK). | |
213 | */ | |
214 | void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) | |
215 | { | |
216 | if (index < adev->doorbell.num_doorbells) { | |
217 | writel(v, adev->doorbell.ptr + index); | |
218 | } else { | |
219 | DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); | |
220 | } | |
221 | } | |
222 | ||
832be404 KW |
223 | /** |
224 | * amdgpu_mm_rdoorbell64 - read a doorbell Qword | |
225 | * | |
226 | * @adev: amdgpu_device pointer | |
227 | * @index: doorbell index | |
228 | * | |
229 | * Returns the value in the doorbell aperture at the | |
230 | * requested doorbell index (VEGA10+). | |
231 | */ | |
232 | u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) | |
233 | { | |
234 | if (index < adev->doorbell.num_doorbells) { | |
235 | return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); | |
236 | } else { | |
237 | DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); | |
238 | return 0; | |
239 | } | |
240 | } | |
241 | ||
242 | /** | |
243 | * amdgpu_mm_wdoorbell64 - write a doorbell Qword | |
244 | * | |
245 | * @adev: amdgpu_device pointer | |
246 | * @index: doorbell index | |
247 | * @v: value to write | |
248 | * | |
249 | * Writes @v to the doorbell aperture at the | |
250 | * requested doorbell index (VEGA10+). | |
251 | */ | |
252 | void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) | |
253 | { | |
254 | if (index < adev->doorbell.num_doorbells) { | |
255 | atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); | |
256 | } else { | |
257 | DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); | |
258 | } | |
259 | } | |
260 | ||
d38ceaf9 AD |
261 | /** |
262 | * amdgpu_invalid_rreg - dummy reg read function | |
263 | * | |
264 | * @adev: amdgpu device pointer | |
265 | * @reg: offset of register | |
266 | * | |
267 | * Dummy register read function. Used for register blocks | |
268 | * that certain asics don't have (all asics). | |
269 | * Returns the value in the register. | |
270 | */ | |
271 | static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) | |
272 | { | |
273 | DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); | |
274 | BUG(); | |
275 | return 0; | |
276 | } | |
277 | ||
278 | /** | |
279 | * amdgpu_invalid_wreg - dummy reg write function | |
280 | * | |
281 | * @adev: amdgpu device pointer | |
282 | * @reg: offset of register | |
283 | * @v: value to write to the register | |
284 | * | |
285 | * Dummy register read function. Used for register blocks | |
286 | * that certain asics don't have (all asics). | |
287 | */ | |
288 | static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) | |
289 | { | |
290 | DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", | |
291 | reg, v); | |
292 | BUG(); | |
293 | } | |
294 | ||
295 | /** | |
296 | * amdgpu_block_invalid_rreg - dummy reg read function | |
297 | * | |
298 | * @adev: amdgpu device pointer | |
299 | * @block: offset of instance | |
300 | * @reg: offset of register | |
301 | * | |
302 | * Dummy register read function. Used for register blocks | |
303 | * that certain asics don't have (all asics). | |
304 | * Returns the value in the register. | |
305 | */ | |
306 | static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, | |
307 | uint32_t block, uint32_t reg) | |
308 | { | |
309 | DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", | |
310 | reg, block); | |
311 | BUG(); | |
312 | return 0; | |
313 | } | |
314 | ||
315 | /** | |
316 | * amdgpu_block_invalid_wreg - dummy reg write function | |
317 | * | |
318 | * @adev: amdgpu device pointer | |
319 | * @block: offset of instance | |
320 | * @reg: offset of register | |
321 | * @v: value to write to the register | |
322 | * | |
323 | * Dummy register read function. Used for register blocks | |
324 | * that certain asics don't have (all asics). | |
325 | */ | |
326 | static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, | |
327 | uint32_t block, | |
328 | uint32_t reg, uint32_t v) | |
329 | { | |
330 | DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", | |
331 | reg, block, v); | |
332 | BUG(); | |
333 | } | |
334 | ||
335 | static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) | |
336 | { | |
a4a02777 CK |
337 | return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, |
338 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | |
339 | &adev->vram_scratch.robj, | |
340 | &adev->vram_scratch.gpu_addr, | |
341 | (void **)&adev->vram_scratch.ptr); | |
d38ceaf9 AD |
342 | } |
343 | ||
344 | static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) | |
345 | { | |
078af1a3 | 346 | amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); |
d38ceaf9 AD |
347 | } |
348 | ||
349 | /** | |
350 | * amdgpu_program_register_sequence - program an array of registers. | |
351 | * | |
352 | * @adev: amdgpu_device pointer | |
353 | * @registers: pointer to the register array | |
354 | * @array_size: size of the register array | |
355 | * | |
356 | * Programs an array or registers with and and or masks. | |
357 | * This is a helper for setting golden registers. | |
358 | */ | |
359 | void amdgpu_program_register_sequence(struct amdgpu_device *adev, | |
360 | const u32 *registers, | |
361 | const u32 array_size) | |
362 | { | |
363 | u32 tmp, reg, and_mask, or_mask; | |
364 | int i; | |
365 | ||
366 | if (array_size % 3) | |
367 | return; | |
368 | ||
369 | for (i = 0; i < array_size; i +=3) { | |
370 | reg = registers[i + 0]; | |
371 | and_mask = registers[i + 1]; | |
372 | or_mask = registers[i + 2]; | |
373 | ||
374 | if (and_mask == 0xffffffff) { | |
375 | tmp = or_mask; | |
376 | } else { | |
377 | tmp = RREG32(reg); | |
378 | tmp &= ~and_mask; | |
379 | tmp |= or_mask; | |
380 | } | |
381 | WREG32(reg, tmp); | |
382 | } | |
383 | } | |
384 | ||
385 | void amdgpu_pci_config_reset(struct amdgpu_device *adev) | |
386 | { | |
387 | pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); | |
388 | } | |
389 | ||
390 | /* | |
391 | * GPU doorbell aperture helpers function. | |
392 | */ | |
393 | /** | |
394 | * amdgpu_doorbell_init - Init doorbell driver information. | |
395 | * | |
396 | * @adev: amdgpu_device pointer | |
397 | * | |
398 | * Init doorbell driver information (CIK) | |
399 | * Returns 0 on success, error on failure. | |
400 | */ | |
401 | static int amdgpu_doorbell_init(struct amdgpu_device *adev) | |
402 | { | |
705e519e CK |
403 | /* No doorbell on SI hardware generation */ |
404 | if (adev->asic_type < CHIP_BONAIRE) { | |
405 | adev->doorbell.base = 0; | |
406 | adev->doorbell.size = 0; | |
407 | adev->doorbell.num_doorbells = 0; | |
408 | adev->doorbell.ptr = NULL; | |
409 | return 0; | |
410 | } | |
411 | ||
d6895ad3 CK |
412 | if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) |
413 | return -EINVAL; | |
414 | ||
d38ceaf9 AD |
415 | /* doorbell bar mapping */ |
416 | adev->doorbell.base = pci_resource_start(adev->pdev, 2); | |
417 | adev->doorbell.size = pci_resource_len(adev->pdev, 2); | |
418 | ||
edf600da | 419 | adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), |
d38ceaf9 AD |
420 | AMDGPU_DOORBELL_MAX_ASSIGNMENT+1); |
421 | if (adev->doorbell.num_doorbells == 0) | |
422 | return -EINVAL; | |
423 | ||
8972e5d2 CK |
424 | adev->doorbell.ptr = ioremap(adev->doorbell.base, |
425 | adev->doorbell.num_doorbells * | |
426 | sizeof(u32)); | |
427 | if (adev->doorbell.ptr == NULL) | |
d38ceaf9 | 428 | return -ENOMEM; |
d38ceaf9 AD |
429 | |
430 | return 0; | |
431 | } | |
432 | ||
433 | /** | |
434 | * amdgpu_doorbell_fini - Tear down doorbell driver information. | |
435 | * | |
436 | * @adev: amdgpu_device pointer | |
437 | * | |
438 | * Tear down doorbell driver information (CIK) | |
439 | */ | |
440 | static void amdgpu_doorbell_fini(struct amdgpu_device *adev) | |
441 | { | |
442 | iounmap(adev->doorbell.ptr); | |
443 | adev->doorbell.ptr = NULL; | |
444 | } | |
445 | ||
446 | /** | |
447 | * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to | |
448 | * setup amdkfd | |
449 | * | |
450 | * @adev: amdgpu_device pointer | |
451 | * @aperture_base: output returning doorbell aperture base physical address | |
452 | * @aperture_size: output returning doorbell aperture size in bytes | |
453 | * @start_offset: output returning # of doorbell bytes reserved for amdgpu. | |
454 | * | |
455 | * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up, | |
456 | * takes doorbells required for its own rings and reports the setup to amdkfd. | |
457 | * amdgpu reserved doorbells are at the start of the doorbell aperture. | |
458 | */ | |
459 | void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, | |
460 | phys_addr_t *aperture_base, | |
461 | size_t *aperture_size, | |
462 | size_t *start_offset) | |
463 | { | |
464 | /* | |
465 | * The first num_doorbells are used by amdgpu. | |
466 | * amdkfd takes whatever's left in the aperture. | |
467 | */ | |
468 | if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) { | |
469 | *aperture_base = adev->doorbell.base; | |
470 | *aperture_size = adev->doorbell.size; | |
471 | *start_offset = adev->doorbell.num_doorbells * sizeof(u32); | |
472 | } else { | |
473 | *aperture_base = 0; | |
474 | *aperture_size = 0; | |
475 | *start_offset = 0; | |
476 | } | |
477 | } | |
478 | ||
479 | /* | |
480 | * amdgpu_wb_*() | |
455a7bc2 | 481 | * Writeback is the method by which the GPU updates special pages in memory |
ea81a173 | 482 | * with the status of certain GPU events (fences, ring pointers,etc.). |
d38ceaf9 AD |
483 | */ |
484 | ||
485 | /** | |
486 | * amdgpu_wb_fini - Disable Writeback and free memory | |
487 | * | |
488 | * @adev: amdgpu_device pointer | |
489 | * | |
490 | * Disables Writeback and frees the Writeback memory (all asics). | |
491 | * Used at driver shutdown. | |
492 | */ | |
493 | static void amdgpu_wb_fini(struct amdgpu_device *adev) | |
494 | { | |
495 | if (adev->wb.wb_obj) { | |
a76ed485 AD |
496 | amdgpu_bo_free_kernel(&adev->wb.wb_obj, |
497 | &adev->wb.gpu_addr, | |
498 | (void **)&adev->wb.wb); | |
d38ceaf9 AD |
499 | adev->wb.wb_obj = NULL; |
500 | } | |
501 | } | |
502 | ||
503 | /** | |
504 | * amdgpu_wb_init- Init Writeback driver info and allocate memory | |
505 | * | |
506 | * @adev: amdgpu_device pointer | |
507 | * | |
455a7bc2 | 508 | * Initializes writeback and allocates writeback memory (all asics). |
d38ceaf9 AD |
509 | * Used at driver startup. |
510 | * Returns 0 on success or an -error on failure. | |
511 | */ | |
512 | static int amdgpu_wb_init(struct amdgpu_device *adev) | |
513 | { | |
514 | int r; | |
515 | ||
516 | if (adev->wb.wb_obj == NULL) { | |
97407b63 AD |
517 | /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ |
518 | r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, | |
a76ed485 AD |
519 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, |
520 | &adev->wb.wb_obj, &adev->wb.gpu_addr, | |
521 | (void **)&adev->wb.wb); | |
d38ceaf9 AD |
522 | if (r) { |
523 | dev_warn(adev->dev, "(%d) create WB bo failed\n", r); | |
524 | return r; | |
525 | } | |
d38ceaf9 AD |
526 | |
527 | adev->wb.num_wb = AMDGPU_MAX_WB; | |
528 | memset(&adev->wb.used, 0, sizeof(adev->wb.used)); | |
529 | ||
530 | /* clear wb memory */ | |
60a970a6 | 531 | memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t)); |
d38ceaf9 AD |
532 | } |
533 | ||
534 | return 0; | |
535 | } | |
536 | ||
537 | /** | |
538 | * amdgpu_wb_get - Allocate a wb entry | |
539 | * | |
540 | * @adev: amdgpu_device pointer | |
541 | * @wb: wb index | |
542 | * | |
543 | * Allocate a wb slot for use by the driver (all asics). | |
544 | * Returns 0 on success or -EINVAL on failure. | |
545 | */ | |
546 | int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb) | |
547 | { | |
548 | unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); | |
d38ceaf9 | 549 | |
97407b63 | 550 | if (offset < adev->wb.num_wb) { |
7014285a | 551 | __set_bit(offset, adev->wb.used); |
63ae07ca | 552 | *wb = offset << 3; /* convert to dw offset */ |
0915fdbc ML |
553 | return 0; |
554 | } else { | |
555 | return -EINVAL; | |
556 | } | |
557 | } | |
558 | ||
d38ceaf9 AD |
559 | /** |
560 | * amdgpu_wb_free - Free a wb entry | |
561 | * | |
562 | * @adev: amdgpu_device pointer | |
563 | * @wb: wb index | |
564 | * | |
565 | * Free a wb slot allocated for use by the driver (all asics) | |
566 | */ | |
567 | void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb) | |
568 | { | |
569 | if (wb < adev->wb.num_wb) | |
63ae07ca | 570 | __clear_bit(wb >> 3, adev->wb.used); |
d38ceaf9 AD |
571 | } |
572 | ||
573 | /** | |
574 | * amdgpu_vram_location - try to find VRAM location | |
575 | * @adev: amdgpu device structure holding all necessary informations | |
576 | * @mc: memory controller structure holding memory informations | |
577 | * @base: base address at which to put VRAM | |
578 | * | |
455a7bc2 | 579 | * Function will try to place VRAM at base address provided |
3d647c8f | 580 | * as parameter. |
d38ceaf9 AD |
581 | */ |
582 | void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base) | |
583 | { | |
584 | uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; | |
585 | ||
586 | mc->vram_start = base; | |
d38ceaf9 AD |
587 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; |
588 | if (limit && limit < mc->real_vram_size) | |
589 | mc->real_vram_size = limit; | |
590 | dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", | |
591 | mc->mc_vram_size >> 20, mc->vram_start, | |
592 | mc->vram_end, mc->real_vram_size >> 20); | |
593 | } | |
594 | ||
595 | /** | |
6f02a696 | 596 | * amdgpu_gart_location - try to find GTT location |
d38ceaf9 AD |
597 | * @adev: amdgpu device structure holding all necessary informations |
598 | * @mc: memory controller structure holding memory informations | |
599 | * | |
600 | * Function will place try to place GTT before or after VRAM. | |
601 | * | |
602 | * If GTT size is bigger than space left then we ajust GTT size. | |
603 | * Thus function will never fails. | |
604 | * | |
605 | * FIXME: when reducing GTT size align new size on power of 2. | |
606 | */ | |
6f02a696 | 607 | void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) |
d38ceaf9 AD |
608 | { |
609 | u64 size_af, size_bf; | |
610 | ||
ed21c047 CK |
611 | size_af = adev->mc.mc_mask - mc->vram_end; |
612 | size_bf = mc->vram_start; | |
d38ceaf9 | 613 | if (size_bf > size_af) { |
6f02a696 | 614 | if (mc->gart_size > size_bf) { |
d38ceaf9 | 615 | dev_warn(adev->dev, "limiting GTT\n"); |
6f02a696 | 616 | mc->gart_size = size_bf; |
d38ceaf9 | 617 | } |
6f02a696 | 618 | mc->gart_start = 0; |
d38ceaf9 | 619 | } else { |
6f02a696 | 620 | if (mc->gart_size > size_af) { |
d38ceaf9 | 621 | dev_warn(adev->dev, "limiting GTT\n"); |
6f02a696 | 622 | mc->gart_size = size_af; |
d38ceaf9 | 623 | } |
b98f1b9e CK |
624 | /* VCE doesn't like it when BOs cross a 4GB segment, so align |
625 | * the GART base on a 4GB boundary as well. | |
626 | */ | |
627 | mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL); | |
d38ceaf9 | 628 | } |
6f02a696 | 629 | mc->gart_end = mc->gart_start + mc->gart_size - 1; |
d38ceaf9 | 630 | dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", |
6f02a696 | 631 | mc->gart_size >> 20, mc->gart_start, mc->gart_end); |
d38ceaf9 AD |
632 | } |
633 | ||
a05502e5 HC |
634 | /* |
635 | * Firmware Reservation functions | |
636 | */ | |
637 | /** | |
638 | * amdgpu_fw_reserve_vram_fini - free fw reserved vram | |
639 | * | |
640 | * @adev: amdgpu_device pointer | |
641 | * | |
642 | * free fw reserved vram if it has been reserved. | |
643 | */ | |
644 | void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev) | |
645 | { | |
646 | amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo, | |
647 | NULL, &adev->fw_vram_usage.va); | |
648 | } | |
649 | ||
650 | /** | |
651 | * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw | |
652 | * | |
653 | * @adev: amdgpu_device pointer | |
654 | * | |
655 | * create bo vram reservation from fw. | |
656 | */ | |
657 | int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev) | |
658 | { | |
c13c55d6 | 659 | struct ttm_operation_ctx ctx = { false, false }; |
a05502e5 | 660 | int r = 0; |
3c738893 | 661 | int i; |
a05502e5 | 662 | u64 vram_size = adev->mc.visible_vram_size; |
3c738893 HC |
663 | u64 offset = adev->fw_vram_usage.start_offset; |
664 | u64 size = adev->fw_vram_usage.size; | |
665 | struct amdgpu_bo *bo; | |
a05502e5 HC |
666 | |
667 | adev->fw_vram_usage.va = NULL; | |
668 | adev->fw_vram_usage.reserved_bo = NULL; | |
669 | ||
670 | if (adev->fw_vram_usage.size > 0 && | |
671 | adev->fw_vram_usage.size <= vram_size) { | |
672 | ||
673 | r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, | |
3c738893 | 674 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, |
a05502e5 HC |
675 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
676 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0, | |
677 | &adev->fw_vram_usage.reserved_bo); | |
678 | if (r) | |
679 | goto error_create; | |
680 | ||
681 | r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false); | |
682 | if (r) | |
683 | goto error_reserve; | |
3c738893 HC |
684 | |
685 | /* remove the original mem node and create a new one at the | |
686 | * request position | |
687 | */ | |
688 | bo = adev->fw_vram_usage.reserved_bo; | |
689 | offset = ALIGN(offset, PAGE_SIZE); | |
690 | for (i = 0; i < bo->placement.num_placement; ++i) { | |
691 | bo->placements[i].fpfn = offset >> PAGE_SHIFT; | |
692 | bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; | |
693 | } | |
694 | ||
695 | ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); | |
c13c55d6 CK |
696 | r = ttm_bo_mem_space(&bo->tbo, &bo->placement, |
697 | &bo->tbo.mem, &ctx); | |
3c738893 HC |
698 | if (r) |
699 | goto error_pin; | |
700 | ||
a05502e5 HC |
701 | r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo, |
702 | AMDGPU_GEM_DOMAIN_VRAM, | |
703 | adev->fw_vram_usage.start_offset, | |
704 | (adev->fw_vram_usage.start_offset + | |
9921167d | 705 | adev->fw_vram_usage.size), NULL); |
a05502e5 HC |
706 | if (r) |
707 | goto error_pin; | |
708 | r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo, | |
709 | &adev->fw_vram_usage.va); | |
710 | if (r) | |
711 | goto error_kmap; | |
712 | ||
713 | amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); | |
714 | } | |
715 | return r; | |
716 | ||
717 | error_kmap: | |
718 | amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo); | |
719 | error_pin: | |
720 | amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); | |
721 | error_reserve: | |
722 | amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo); | |
723 | error_create: | |
724 | adev->fw_vram_usage.va = NULL; | |
725 | adev->fw_vram_usage.reserved_bo = NULL; | |
726 | return r; | |
727 | } | |
728 | ||
d6895ad3 CK |
729 | /** |
730 | * amdgpu_device_resize_fb_bar - try to resize FB BAR | |
731 | * | |
732 | * @adev: amdgpu_device pointer | |
733 | * | |
734 | * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not | |
735 | * to fail, but if any of the BARs is not accessible after the size we abort | |
736 | * driver loading by returning -ENODEV. | |
737 | */ | |
738 | int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) | |
739 | { | |
740 | u64 space_needed = roundup_pow_of_two(adev->mc.real_vram_size); | |
741 | u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1; | |
31b8adab CK |
742 | struct pci_bus *root; |
743 | struct resource *res; | |
744 | unsigned i; | |
d6895ad3 CK |
745 | u16 cmd; |
746 | int r; | |
747 | ||
0c03b912 | 748 | /* Bypass for VF */ |
749 | if (amdgpu_sriov_vf(adev)) | |
750 | return 0; | |
751 | ||
31b8adab CK |
752 | /* Check if the root BUS has 64bit memory resources */ |
753 | root = adev->pdev->bus; | |
754 | while (root->parent) | |
755 | root = root->parent; | |
756 | ||
757 | pci_bus_for_each_resource(root, res, i) { | |
758 | if (res && res->flags & IORESOURCE_MEM_64 && | |
759 | res->start > 0x100000000ull) | |
760 | break; | |
761 | } | |
762 | ||
763 | /* Trying to resize is pointless without a root hub window above 4GB */ | |
764 | if (!res) | |
765 | return 0; | |
766 | ||
d6895ad3 CK |
767 | /* Disable memory decoding while we change the BAR addresses and size */ |
768 | pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); | |
769 | pci_write_config_word(adev->pdev, PCI_COMMAND, | |
770 | cmd & ~PCI_COMMAND_MEMORY); | |
771 | ||
772 | /* Free the VRAM and doorbell BAR, we most likely need to move both. */ | |
773 | amdgpu_doorbell_fini(adev); | |
774 | if (adev->asic_type >= CHIP_BONAIRE) | |
775 | pci_release_resource(adev->pdev, 2); | |
776 | ||
777 | pci_release_resource(adev->pdev, 0); | |
778 | ||
779 | r = pci_resize_resource(adev->pdev, 0, rbar_size); | |
780 | if (r == -ENOSPC) | |
781 | DRM_INFO("Not enough PCI address space for a large BAR."); | |
782 | else if (r && r != -ENOTSUPP) | |
783 | DRM_ERROR("Problem resizing BAR0 (%d).", r); | |
784 | ||
785 | pci_assign_unassigned_bus_resources(adev->pdev->bus); | |
786 | ||
787 | /* When the doorbell or fb BAR isn't available we have no chance of | |
788 | * using the device. | |
789 | */ | |
790 | r = amdgpu_doorbell_init(adev); | |
791 | if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) | |
792 | return -ENODEV; | |
793 | ||
794 | pci_write_config_word(adev->pdev, PCI_COMMAND, cmd); | |
795 | ||
796 | return 0; | |
797 | } | |
a05502e5 | 798 | |
d38ceaf9 AD |
799 | /* |
800 | * GPU helpers function. | |
801 | */ | |
802 | /** | |
c836fec5 | 803 | * amdgpu_need_post - check if the hw need post or not |
d38ceaf9 AD |
804 | * |
805 | * @adev: amdgpu_device pointer | |
806 | * | |
c836fec5 JQ |
807 | * Check if the asic has been initialized (all asics) at driver startup |
808 | * or post is needed if hw reset is performed. | |
809 | * Returns true if need or false if not. | |
d38ceaf9 | 810 | */ |
c836fec5 | 811 | bool amdgpu_need_post(struct amdgpu_device *adev) |
d38ceaf9 AD |
812 | { |
813 | uint32_t reg; | |
814 | ||
bec86378 ML |
815 | if (amdgpu_sriov_vf(adev)) |
816 | return false; | |
817 | ||
818 | if (amdgpu_passthrough(adev)) { | |
1da2c326 ML |
819 | /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot |
820 | * some old smc fw still need driver do vPost otherwise gpu hang, while | |
821 | * those smc fw version above 22.15 doesn't have this flaw, so we force | |
822 | * vpost executed for smc version below 22.15 | |
bec86378 ML |
823 | */ |
824 | if (adev->asic_type == CHIP_FIJI) { | |
825 | int err; | |
826 | uint32_t fw_ver; | |
827 | err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); | |
828 | /* force vPost if error occured */ | |
829 | if (err) | |
830 | return true; | |
831 | ||
832 | fw_ver = *((uint32_t *)adev->pm.fw->data + 69); | |
1da2c326 ML |
833 | if (fw_ver < 0x00160e00) |
834 | return true; | |
bec86378 | 835 | } |
bec86378 | 836 | } |
91fe77eb | 837 | |
838 | if (adev->has_hw_reset) { | |
839 | adev->has_hw_reset = false; | |
840 | return true; | |
841 | } | |
842 | ||
843 | /* bios scratch used on CIK+ */ | |
844 | if (adev->asic_type >= CHIP_BONAIRE) | |
845 | return amdgpu_atombios_scratch_need_asic_init(adev); | |
846 | ||
847 | /* check MEM_SIZE for older asics */ | |
848 | reg = amdgpu_asic_get_config_memsize(adev); | |
849 | ||
850 | if ((reg != 0) && (reg != 0xffffffff)) | |
851 | return false; | |
852 | ||
853 | return true; | |
bec86378 ML |
854 | } |
855 | ||
d38ceaf9 AD |
856 | /** |
857 | * amdgpu_dummy_page_init - init dummy page used by the driver | |
858 | * | |
859 | * @adev: amdgpu_device pointer | |
860 | * | |
861 | * Allocate the dummy page used by the driver (all asics). | |
862 | * This dummy page is used by the driver as a filler for gart entries | |
863 | * when pages are taken out of the GART | |
864 | * Returns 0 on sucess, -ENOMEM on failure. | |
865 | */ | |
866 | int amdgpu_dummy_page_init(struct amdgpu_device *adev) | |
867 | { | |
868 | if (adev->dummy_page.page) | |
869 | return 0; | |
870 | adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); | |
871 | if (adev->dummy_page.page == NULL) | |
872 | return -ENOMEM; | |
873 | adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page, | |
874 | 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
875 | if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) { | |
876 | dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n"); | |
877 | __free_page(adev->dummy_page.page); | |
878 | adev->dummy_page.page = NULL; | |
879 | return -ENOMEM; | |
880 | } | |
881 | return 0; | |
882 | } | |
883 | ||
884 | /** | |
885 | * amdgpu_dummy_page_fini - free dummy page used by the driver | |
886 | * | |
887 | * @adev: amdgpu_device pointer | |
888 | * | |
889 | * Frees the dummy page used by the driver (all asics). | |
890 | */ | |
891 | void amdgpu_dummy_page_fini(struct amdgpu_device *adev) | |
892 | { | |
893 | if (adev->dummy_page.page == NULL) | |
894 | return; | |
895 | pci_unmap_page(adev->pdev, adev->dummy_page.addr, | |
896 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
897 | __free_page(adev->dummy_page.page); | |
898 | adev->dummy_page.page = NULL; | |
899 | } | |
900 | ||
d38ceaf9 AD |
901 | /* if we get transitioned to only one device, take VGA back */ |
902 | /** | |
903 | * amdgpu_vga_set_decode - enable/disable vga decode | |
904 | * | |
905 | * @cookie: amdgpu_device pointer | |
906 | * @state: enable/disable vga decode | |
907 | * | |
908 | * Enable/disable vga decode (all asics). | |
909 | * Returns VGA resource flags. | |
910 | */ | |
911 | static unsigned int amdgpu_vga_set_decode(void *cookie, bool state) | |
912 | { | |
913 | struct amdgpu_device *adev = cookie; | |
914 | amdgpu_asic_set_vga_state(adev, state); | |
915 | if (state) | |
916 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | |
917 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
918 | else | |
919 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
920 | } | |
921 | ||
bab4fee7 | 922 | static void amdgpu_check_block_size(struct amdgpu_device *adev) |
a1adf8be CZ |
923 | { |
924 | /* defines number of bits in page table versus page directory, | |
925 | * a page is 4KB so we have 12 bits offset, minimum 9 bits in the | |
926 | * page table and the remaining bits are in the page directory */ | |
bab4fee7 JZ |
927 | if (amdgpu_vm_block_size == -1) |
928 | return; | |
a1adf8be | 929 | |
bab4fee7 | 930 | if (amdgpu_vm_block_size < 9) { |
a1adf8be CZ |
931 | dev_warn(adev->dev, "VM page table size (%d) too small\n", |
932 | amdgpu_vm_block_size); | |
97489129 | 933 | amdgpu_vm_block_size = -1; |
a1adf8be | 934 | } |
a1adf8be CZ |
935 | } |
936 | ||
83ca145d ZJ |
937 | static void amdgpu_check_vm_size(struct amdgpu_device *adev) |
938 | { | |
64dab074 AD |
939 | /* no need to check the default value */ |
940 | if (amdgpu_vm_size == -1) | |
941 | return; | |
942 | ||
83ca145d ZJ |
943 | if (amdgpu_vm_size < 1) { |
944 | dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", | |
945 | amdgpu_vm_size); | |
f3368128 | 946 | amdgpu_vm_size = -1; |
83ca145d | 947 | } |
83ca145d ZJ |
948 | } |
949 | ||
d38ceaf9 AD |
950 | /** |
951 | * amdgpu_check_arguments - validate module params | |
952 | * | |
953 | * @adev: amdgpu_device pointer | |
954 | * | |
955 | * Validates certain module parameters and updates | |
956 | * the associated values used by the driver (all asics). | |
957 | */ | |
958 | static void amdgpu_check_arguments(struct amdgpu_device *adev) | |
959 | { | |
5b011235 CZ |
960 | if (amdgpu_sched_jobs < 4) { |
961 | dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", | |
962 | amdgpu_sched_jobs); | |
963 | amdgpu_sched_jobs = 4; | |
76117507 | 964 | } else if (!is_power_of_2(amdgpu_sched_jobs)){ |
5b011235 CZ |
965 | dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", |
966 | amdgpu_sched_jobs); | |
967 | amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); | |
968 | } | |
d38ceaf9 | 969 | |
83e74db6 | 970 | if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { |
f9321cc4 CK |
971 | /* gart size must be greater or equal to 32M */ |
972 | dev_warn(adev->dev, "gart size (%d) too small\n", | |
973 | amdgpu_gart_size); | |
83e74db6 | 974 | amdgpu_gart_size = -1; |
d38ceaf9 AD |
975 | } |
976 | ||
36d38372 | 977 | if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { |
c4e1a13a | 978 | /* gtt size must be greater or equal to 32M */ |
36d38372 CK |
979 | dev_warn(adev->dev, "gtt size (%d) too small\n", |
980 | amdgpu_gtt_size); | |
981 | amdgpu_gtt_size = -1; | |
d38ceaf9 AD |
982 | } |
983 | ||
d07f14be RH |
984 | /* valid range is between 4 and 9 inclusive */ |
985 | if (amdgpu_vm_fragment_size != -1 && | |
986 | (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { | |
987 | dev_warn(adev->dev, "valid range is between 4 and 9\n"); | |
988 | amdgpu_vm_fragment_size = -1; | |
989 | } | |
990 | ||
83ca145d | 991 | amdgpu_check_vm_size(adev); |
d38ceaf9 | 992 | |
bab4fee7 | 993 | amdgpu_check_block_size(adev); |
6a7f76e7 | 994 | |
526bae37 | 995 | if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 || |
76117507 | 996 | !is_power_of_2(amdgpu_vram_page_split))) { |
6a7f76e7 CK |
997 | dev_warn(adev->dev, "invalid VRAM page split (%d)\n", |
998 | amdgpu_vram_page_split); | |
999 | amdgpu_vram_page_split = 1024; | |
1000 | } | |
8854695a AG |
1001 | |
1002 | if (amdgpu_lockup_timeout == 0) { | |
1003 | dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n"); | |
1004 | amdgpu_lockup_timeout = 10000; | |
1005 | } | |
d38ceaf9 AD |
1006 | } |
1007 | ||
1008 | /** | |
1009 | * amdgpu_switcheroo_set_state - set switcheroo state | |
1010 | * | |
1011 | * @pdev: pci dev pointer | |
1694467b | 1012 | * @state: vga_switcheroo state |
d38ceaf9 AD |
1013 | * |
1014 | * Callback for the switcheroo driver. Suspends or resumes the | |
1015 | * the asics before or after it is powered up using ACPI methods. | |
1016 | */ | |
1017 | static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) | |
1018 | { | |
1019 | struct drm_device *dev = pci_get_drvdata(pdev); | |
1020 | ||
1021 | if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF) | |
1022 | return; | |
1023 | ||
1024 | if (state == VGA_SWITCHEROO_ON) { | |
7ca85295 | 1025 | pr_info("amdgpu: switched on\n"); |
d38ceaf9 AD |
1026 | /* don't suspend or resume card normally */ |
1027 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
1028 | ||
810ddc3a | 1029 | amdgpu_device_resume(dev, true, true); |
d38ceaf9 | 1030 | |
d38ceaf9 AD |
1031 | dev->switch_power_state = DRM_SWITCH_POWER_ON; |
1032 | drm_kms_helper_poll_enable(dev); | |
1033 | } else { | |
7ca85295 | 1034 | pr_info("amdgpu: switched off\n"); |
d38ceaf9 AD |
1035 | drm_kms_helper_poll_disable(dev); |
1036 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
810ddc3a | 1037 | amdgpu_device_suspend(dev, true, true); |
d38ceaf9 AD |
1038 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; |
1039 | } | |
1040 | } | |
1041 | ||
1042 | /** | |
1043 | * amdgpu_switcheroo_can_switch - see if switcheroo state can change | |
1044 | * | |
1045 | * @pdev: pci dev pointer | |
1046 | * | |
1047 | * Callback for the switcheroo driver. Check of the switcheroo | |
1048 | * state can be changed. | |
1049 | * Returns true if the state can be changed, false if not. | |
1050 | */ | |
1051 | static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) | |
1052 | { | |
1053 | struct drm_device *dev = pci_get_drvdata(pdev); | |
1054 | ||
1055 | /* | |
1056 | * FIXME: open_count is protected by drm_global_mutex but that would lead to | |
1057 | * locking inversion with the driver load path. And the access here is | |
1058 | * completely racy anyway. So don't bother with locking for now. | |
1059 | */ | |
1060 | return dev->open_count == 0; | |
1061 | } | |
1062 | ||
1063 | static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { | |
1064 | .set_gpu_state = amdgpu_switcheroo_set_state, | |
1065 | .reprobe = NULL, | |
1066 | .can_switch = amdgpu_switcheroo_can_switch, | |
1067 | }; | |
1068 | ||
1069 | int amdgpu_set_clockgating_state(struct amdgpu_device *adev, | |
5fc3aeeb | 1070 | enum amd_ip_block_type block_type, |
1071 | enum amd_clockgating_state state) | |
d38ceaf9 AD |
1072 | { |
1073 | int i, r = 0; | |
1074 | ||
1075 | for (i = 0; i < adev->num_ip_blocks; i++) { | |
a1255107 | 1076 | if (!adev->ip_blocks[i].status.valid) |
9ecbe7f5 | 1077 | continue; |
c722865a RZ |
1078 | if (adev->ip_blocks[i].version->type != block_type) |
1079 | continue; | |
1080 | if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) | |
1081 | continue; | |
1082 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state( | |
1083 | (void *)adev, state); | |
1084 | if (r) | |
1085 | DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", | |
1086 | adev->ip_blocks[i].version->funcs->name, r); | |
d38ceaf9 AD |
1087 | } |
1088 | return r; | |
1089 | } | |
1090 | ||
1091 | int amdgpu_set_powergating_state(struct amdgpu_device *adev, | |
5fc3aeeb | 1092 | enum amd_ip_block_type block_type, |
1093 | enum amd_powergating_state state) | |
d38ceaf9 AD |
1094 | { |
1095 | int i, r = 0; | |
1096 | ||
1097 | for (i = 0; i < adev->num_ip_blocks; i++) { | |
a1255107 | 1098 | if (!adev->ip_blocks[i].status.valid) |
9ecbe7f5 | 1099 | continue; |
c722865a RZ |
1100 | if (adev->ip_blocks[i].version->type != block_type) |
1101 | continue; | |
1102 | if (!adev->ip_blocks[i].version->funcs->set_powergating_state) | |
1103 | continue; | |
1104 | r = adev->ip_blocks[i].version->funcs->set_powergating_state( | |
1105 | (void *)adev, state); | |
1106 | if (r) | |
1107 | DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", | |
1108 | adev->ip_blocks[i].version->funcs->name, r); | |
d38ceaf9 AD |
1109 | } |
1110 | return r; | |
1111 | } | |
1112 | ||
6cb2d4e4 HR |
1113 | void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags) |
1114 | { | |
1115 | int i; | |
1116 | ||
1117 | for (i = 0; i < adev->num_ip_blocks; i++) { | |
1118 | if (!adev->ip_blocks[i].status.valid) | |
1119 | continue; | |
1120 | if (adev->ip_blocks[i].version->funcs->get_clockgating_state) | |
1121 | adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags); | |
1122 | } | |
1123 | } | |
1124 | ||
5dbbb60b AD |
1125 | int amdgpu_wait_for_idle(struct amdgpu_device *adev, |
1126 | enum amd_ip_block_type block_type) | |
1127 | { | |
1128 | int i, r; | |
1129 | ||
1130 | for (i = 0; i < adev->num_ip_blocks; i++) { | |
a1255107 | 1131 | if (!adev->ip_blocks[i].status.valid) |
9ecbe7f5 | 1132 | continue; |
a1255107 AD |
1133 | if (adev->ip_blocks[i].version->type == block_type) { |
1134 | r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); | |
5dbbb60b AD |
1135 | if (r) |
1136 | return r; | |
1137 | break; | |
1138 | } | |
1139 | } | |
1140 | return 0; | |
1141 | ||
1142 | } | |
1143 | ||
1144 | bool amdgpu_is_idle(struct amdgpu_device *adev, | |
1145 | enum amd_ip_block_type block_type) | |
1146 | { | |
1147 | int i; | |
1148 | ||
1149 | for (i = 0; i < adev->num_ip_blocks; i++) { | |
a1255107 | 1150 | if (!adev->ip_blocks[i].status.valid) |
9ecbe7f5 | 1151 | continue; |
a1255107 AD |
1152 | if (adev->ip_blocks[i].version->type == block_type) |
1153 | return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); | |
5dbbb60b AD |
1154 | } |
1155 | return true; | |
1156 | ||
1157 | } | |
1158 | ||
a1255107 AD |
1159 | struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev, |
1160 | enum amd_ip_block_type type) | |
d38ceaf9 AD |
1161 | { |
1162 | int i; | |
1163 | ||
1164 | for (i = 0; i < adev->num_ip_blocks; i++) | |
a1255107 | 1165 | if (adev->ip_blocks[i].version->type == type) |
d38ceaf9 AD |
1166 | return &adev->ip_blocks[i]; |
1167 | ||
1168 | return NULL; | |
1169 | } | |
1170 | ||
1171 | /** | |
1172 | * amdgpu_ip_block_version_cmp | |
1173 | * | |
1174 | * @adev: amdgpu_device pointer | |
5fc3aeeb | 1175 | * @type: enum amd_ip_block_type |
d38ceaf9 AD |
1176 | * @major: major version |
1177 | * @minor: minor version | |
1178 | * | |
1179 | * return 0 if equal or greater | |
1180 | * return 1 if smaller or the ip_block doesn't exist | |
1181 | */ | |
1182 | int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, | |
5fc3aeeb | 1183 | enum amd_ip_block_type type, |
d38ceaf9 AD |
1184 | u32 major, u32 minor) |
1185 | { | |
a1255107 | 1186 | struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type); |
d38ceaf9 | 1187 | |
a1255107 AD |
1188 | if (ip_block && ((ip_block->version->major > major) || |
1189 | ((ip_block->version->major == major) && | |
1190 | (ip_block->version->minor >= minor)))) | |
d38ceaf9 AD |
1191 | return 0; |
1192 | ||
1193 | return 1; | |
1194 | } | |
1195 | ||
a1255107 AD |
1196 | /** |
1197 | * amdgpu_ip_block_add | |
1198 | * | |
1199 | * @adev: amdgpu_device pointer | |
1200 | * @ip_block_version: pointer to the IP to add | |
1201 | * | |
1202 | * Adds the IP block driver information to the collection of IPs | |
1203 | * on the asic. | |
1204 | */ | |
1205 | int amdgpu_ip_block_add(struct amdgpu_device *adev, | |
1206 | const struct amdgpu_ip_block_version *ip_block_version) | |
1207 | { | |
1208 | if (!ip_block_version) | |
1209 | return -EINVAL; | |
1210 | ||
a0bae357 HR |
1211 | DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks, |
1212 | ip_block_version->funcs->name); | |
1213 | ||
a1255107 AD |
1214 | adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; |
1215 | ||
1216 | return 0; | |
1217 | } | |
1218 | ||
483ef985 | 1219 | static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) |
9accf2fd ED |
1220 | { |
1221 | adev->enable_virtual_display = false; | |
1222 | ||
1223 | if (amdgpu_virtual_display) { | |
1224 | struct drm_device *ddev = adev->ddev; | |
1225 | const char *pci_address_name = pci_name(ddev->pdev); | |
0f66356d | 1226 | char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; |
9accf2fd ED |
1227 | |
1228 | pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); | |
1229 | pciaddstr_tmp = pciaddstr; | |
0f66356d ED |
1230 | while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { |
1231 | pciaddname = strsep(&pciaddname_tmp, ","); | |
967de2a9 YT |
1232 | if (!strcmp("all", pciaddname) |
1233 | || !strcmp(pci_address_name, pciaddname)) { | |
0f66356d ED |
1234 | long num_crtc; |
1235 | int res = -1; | |
1236 | ||
9accf2fd | 1237 | adev->enable_virtual_display = true; |
0f66356d ED |
1238 | |
1239 | if (pciaddname_tmp) | |
1240 | res = kstrtol(pciaddname_tmp, 10, | |
1241 | &num_crtc); | |
1242 | ||
1243 | if (!res) { | |
1244 | if (num_crtc < 1) | |
1245 | num_crtc = 1; | |
1246 | if (num_crtc > 6) | |
1247 | num_crtc = 6; | |
1248 | adev->mode_info.num_crtc = num_crtc; | |
1249 | } else { | |
1250 | adev->mode_info.num_crtc = 1; | |
1251 | } | |
9accf2fd ED |
1252 | break; |
1253 | } | |
1254 | } | |
1255 | ||
0f66356d ED |
1256 | DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", |
1257 | amdgpu_virtual_display, pci_address_name, | |
1258 | adev->enable_virtual_display, adev->mode_info.num_crtc); | |
9accf2fd ED |
1259 | |
1260 | kfree(pciaddstr); | |
1261 | } | |
1262 | } | |
1263 | ||
e2a75f88 AD |
1264 | static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) |
1265 | { | |
e2a75f88 AD |
1266 | const char *chip_name; |
1267 | char fw_name[30]; | |
1268 | int err; | |
1269 | const struct gpu_info_firmware_header_v1_0 *hdr; | |
1270 | ||
ab4fe3e1 HR |
1271 | adev->firmware.gpu_info_fw = NULL; |
1272 | ||
e2a75f88 AD |
1273 | switch (adev->asic_type) { |
1274 | case CHIP_TOPAZ: | |
1275 | case CHIP_TONGA: | |
1276 | case CHIP_FIJI: | |
1277 | case CHIP_POLARIS11: | |
1278 | case CHIP_POLARIS10: | |
1279 | case CHIP_POLARIS12: | |
1280 | case CHIP_CARRIZO: | |
1281 | case CHIP_STONEY: | |
1282 | #ifdef CONFIG_DRM_AMDGPU_SI | |
1283 | case CHIP_VERDE: | |
1284 | case CHIP_TAHITI: | |
1285 | case CHIP_PITCAIRN: | |
1286 | case CHIP_OLAND: | |
1287 | case CHIP_HAINAN: | |
1288 | #endif | |
1289 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
1290 | case CHIP_BONAIRE: | |
1291 | case CHIP_HAWAII: | |
1292 | case CHIP_KAVERI: | |
1293 | case CHIP_KABINI: | |
1294 | case CHIP_MULLINS: | |
1295 | #endif | |
1296 | default: | |
1297 | return 0; | |
1298 | case CHIP_VEGA10: | |
1299 | chip_name = "vega10"; | |
1300 | break; | |
2d2e5e7e AD |
1301 | case CHIP_RAVEN: |
1302 | chip_name = "raven"; | |
1303 | break; | |
e2a75f88 AD |
1304 | } |
1305 | ||
1306 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); | |
ab4fe3e1 | 1307 | err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev); |
e2a75f88 AD |
1308 | if (err) { |
1309 | dev_err(adev->dev, | |
1310 | "Failed to load gpu_info firmware \"%s\"\n", | |
1311 | fw_name); | |
1312 | goto out; | |
1313 | } | |
ab4fe3e1 | 1314 | err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw); |
e2a75f88 AD |
1315 | if (err) { |
1316 | dev_err(adev->dev, | |
1317 | "Failed to validate gpu_info firmware \"%s\"\n", | |
1318 | fw_name); | |
1319 | goto out; | |
1320 | } | |
1321 | ||
ab4fe3e1 | 1322 | hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; |
e2a75f88 AD |
1323 | amdgpu_ucode_print_gpu_info_hdr(&hdr->header); |
1324 | ||
1325 | switch (hdr->version_major) { | |
1326 | case 1: | |
1327 | { | |
1328 | const struct gpu_info_firmware_v1_0 *gpu_info_fw = | |
ab4fe3e1 | 1329 | (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + |
e2a75f88 AD |
1330 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
1331 | ||
b5ab16bf AD |
1332 | adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); |
1333 | adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); | |
1334 | adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); | |
1335 | adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); | |
e2a75f88 | 1336 | adev->gfx.config.max_texture_channel_caches = |
b5ab16bf AD |
1337 | le32_to_cpu(gpu_info_fw->gc_num_tccs); |
1338 | adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); | |
1339 | adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); | |
1340 | adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); | |
1341 | adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); | |
e2a75f88 | 1342 | adev->gfx.config.double_offchip_lds_buf = |
b5ab16bf AD |
1343 | le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer); |
1344 | adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); | |
51fd0370 HZ |
1345 | adev->gfx.cu_info.max_waves_per_simd = |
1346 | le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd); | |
1347 | adev->gfx.cu_info.max_scratch_slots_per_cu = | |
1348 | le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); | |
1349 | adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); | |
e2a75f88 AD |
1350 | break; |
1351 | } | |
1352 | default: | |
1353 | dev_err(adev->dev, | |
1354 | "Unsupported gpu_info table %d\n", hdr->header.ucode_version); | |
1355 | err = -EINVAL; | |
1356 | goto out; | |
1357 | } | |
1358 | out: | |
e2a75f88 AD |
1359 | return err; |
1360 | } | |
1361 | ||
d38ceaf9 AD |
1362 | static int amdgpu_early_init(struct amdgpu_device *adev) |
1363 | { | |
aaa36a97 | 1364 | int i, r; |
d38ceaf9 | 1365 | |
483ef985 | 1366 | amdgpu_device_enable_virtual_display(adev); |
a6be7570 | 1367 | |
d38ceaf9 | 1368 | switch (adev->asic_type) { |
aaa36a97 AD |
1369 | case CHIP_TOPAZ: |
1370 | case CHIP_TONGA: | |
48299f95 | 1371 | case CHIP_FIJI: |
2cc0c0b5 FC |
1372 | case CHIP_POLARIS11: |
1373 | case CHIP_POLARIS10: | |
c4642a47 | 1374 | case CHIP_POLARIS12: |
aaa36a97 | 1375 | case CHIP_CARRIZO: |
39bb0c92 SL |
1376 | case CHIP_STONEY: |
1377 | if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) | |
aaa36a97 AD |
1378 | adev->family = AMDGPU_FAMILY_CZ; |
1379 | else | |
1380 | adev->family = AMDGPU_FAMILY_VI; | |
1381 | ||
1382 | r = vi_set_ip_blocks(adev); | |
1383 | if (r) | |
1384 | return r; | |
1385 | break; | |
33f34802 KW |
1386 | #ifdef CONFIG_DRM_AMDGPU_SI |
1387 | case CHIP_VERDE: | |
1388 | case CHIP_TAHITI: | |
1389 | case CHIP_PITCAIRN: | |
1390 | case CHIP_OLAND: | |
1391 | case CHIP_HAINAN: | |
295d0daf | 1392 | adev->family = AMDGPU_FAMILY_SI; |
33f34802 KW |
1393 | r = si_set_ip_blocks(adev); |
1394 | if (r) | |
1395 | return r; | |
1396 | break; | |
1397 | #endif | |
a2e73f56 AD |
1398 | #ifdef CONFIG_DRM_AMDGPU_CIK |
1399 | case CHIP_BONAIRE: | |
1400 | case CHIP_HAWAII: | |
1401 | case CHIP_KAVERI: | |
1402 | case CHIP_KABINI: | |
1403 | case CHIP_MULLINS: | |
1404 | if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII)) | |
1405 | adev->family = AMDGPU_FAMILY_CI; | |
1406 | else | |
1407 | adev->family = AMDGPU_FAMILY_KV; | |
1408 | ||
1409 | r = cik_set_ip_blocks(adev); | |
1410 | if (r) | |
1411 | return r; | |
1412 | break; | |
1413 | #endif | |
2ca8a5d2 CZ |
1414 | case CHIP_VEGA10: |
1415 | case CHIP_RAVEN: | |
1416 | if (adev->asic_type == CHIP_RAVEN) | |
1417 | adev->family = AMDGPU_FAMILY_RV; | |
1418 | else | |
1419 | adev->family = AMDGPU_FAMILY_AI; | |
460826e6 KW |
1420 | |
1421 | r = soc15_set_ip_blocks(adev); | |
1422 | if (r) | |
1423 | return r; | |
1424 | break; | |
d38ceaf9 AD |
1425 | default: |
1426 | /* FIXME: not supported yet */ | |
1427 | return -EINVAL; | |
1428 | } | |
1429 | ||
e2a75f88 AD |
1430 | r = amdgpu_device_parse_gpu_info_fw(adev); |
1431 | if (r) | |
1432 | return r; | |
1433 | ||
1884734a | 1434 | amdgpu_amdkfd_device_probe(adev); |
1435 | ||
3149d9da XY |
1436 | if (amdgpu_sriov_vf(adev)) { |
1437 | r = amdgpu_virt_request_full_gpu(adev, true); | |
1438 | if (r) | |
5ffa61c1 | 1439 | return -EAGAIN; |
3149d9da XY |
1440 | } |
1441 | ||
d38ceaf9 AD |
1442 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1443 | if ((amdgpu_ip_block_mask & (1 << i)) == 0) { | |
ed8cf00c HR |
1444 | DRM_ERROR("disabled ip block: %d <%s>\n", |
1445 | i, adev->ip_blocks[i].version->funcs->name); | |
a1255107 | 1446 | adev->ip_blocks[i].status.valid = false; |
d38ceaf9 | 1447 | } else { |
a1255107 AD |
1448 | if (adev->ip_blocks[i].version->funcs->early_init) { |
1449 | r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); | |
2c1a2784 | 1450 | if (r == -ENOENT) { |
a1255107 | 1451 | adev->ip_blocks[i].status.valid = false; |
2c1a2784 | 1452 | } else if (r) { |
a1255107 AD |
1453 | DRM_ERROR("early_init of IP block <%s> failed %d\n", |
1454 | adev->ip_blocks[i].version->funcs->name, r); | |
d38ceaf9 | 1455 | return r; |
2c1a2784 | 1456 | } else { |
a1255107 | 1457 | adev->ip_blocks[i].status.valid = true; |
2c1a2784 | 1458 | } |
974e6b64 | 1459 | } else { |
a1255107 | 1460 | adev->ip_blocks[i].status.valid = true; |
d38ceaf9 | 1461 | } |
d38ceaf9 AD |
1462 | } |
1463 | } | |
1464 | ||
395d1fb9 NH |
1465 | adev->cg_flags &= amdgpu_cg_mask; |
1466 | adev->pg_flags &= amdgpu_pg_mask; | |
1467 | ||
d38ceaf9 AD |
1468 | return 0; |
1469 | } | |
1470 | ||
1471 | static int amdgpu_init(struct amdgpu_device *adev) | |
1472 | { | |
1473 | int i, r; | |
1474 | ||
1475 | for (i = 0; i < adev->num_ip_blocks; i++) { | |
a1255107 | 1476 | if (!adev->ip_blocks[i].status.valid) |
d38ceaf9 | 1477 | continue; |
a1255107 | 1478 | r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); |
2c1a2784 | 1479 | if (r) { |
a1255107 AD |
1480 | DRM_ERROR("sw_init of IP block <%s> failed %d\n", |
1481 | adev->ip_blocks[i].version->funcs->name, r); | |
d38ceaf9 | 1482 | return r; |
2c1a2784 | 1483 | } |
a1255107 | 1484 | adev->ip_blocks[i].status.sw = true; |
d38ceaf9 | 1485 | /* need to do gmc hw init early so we can allocate gpu mem */ |
a1255107 | 1486 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { |
d38ceaf9 | 1487 | r = amdgpu_vram_scratch_init(adev); |
2c1a2784 AD |
1488 | if (r) { |
1489 | DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); | |
d38ceaf9 | 1490 | return r; |
2c1a2784 | 1491 | } |
a1255107 | 1492 | r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); |
2c1a2784 AD |
1493 | if (r) { |
1494 | DRM_ERROR("hw_init %d failed %d\n", i, r); | |
d38ceaf9 | 1495 | return r; |
2c1a2784 | 1496 | } |
d38ceaf9 | 1497 | r = amdgpu_wb_init(adev); |
2c1a2784 AD |
1498 | if (r) { |
1499 | DRM_ERROR("amdgpu_wb_init failed %d\n", r); | |
d38ceaf9 | 1500 | return r; |
2c1a2784 | 1501 | } |
a1255107 | 1502 | adev->ip_blocks[i].status.hw = true; |
2493664f ML |
1503 | |
1504 | /* right after GMC hw init, we create CSA */ | |
1505 | if (amdgpu_sriov_vf(adev)) { | |
1506 | r = amdgpu_allocate_static_csa(adev); | |
1507 | if (r) { | |
1508 | DRM_ERROR("allocate CSA failed %d\n", r); | |
1509 | return r; | |
1510 | } | |
1511 | } | |
d38ceaf9 AD |
1512 | } |
1513 | } | |
1514 | ||
1515 | for (i = 0; i < adev->num_ip_blocks; i++) { | |
a1255107 | 1516 | if (!adev->ip_blocks[i].status.sw) |
d38ceaf9 AD |
1517 | continue; |
1518 | /* gmc hw init is done early */ | |
a1255107 | 1519 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) |
d38ceaf9 | 1520 | continue; |
a1255107 | 1521 | r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); |
2c1a2784 | 1522 | if (r) { |
a1255107 AD |
1523 | DRM_ERROR("hw_init of IP block <%s> failed %d\n", |
1524 | adev->ip_blocks[i].version->funcs->name, r); | |
d38ceaf9 | 1525 | return r; |
2c1a2784 | 1526 | } |
a1255107 | 1527 | adev->ip_blocks[i].status.hw = true; |
d38ceaf9 AD |
1528 | } |
1529 | ||
1884734a | 1530 | amdgpu_amdkfd_device_init(adev); |
c6332b97 | 1531 | |
1532 | if (amdgpu_sriov_vf(adev)) | |
1533 | amdgpu_virt_release_full_gpu(adev, true); | |
1534 | ||
d38ceaf9 AD |
1535 | return 0; |
1536 | } | |
1537 | ||
0c49e0b8 CZ |
1538 | static void amdgpu_fill_reset_magic(struct amdgpu_device *adev) |
1539 | { | |
1540 | memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); | |
1541 | } | |
1542 | ||
1543 | static bool amdgpu_check_vram_lost(struct amdgpu_device *adev) | |
1544 | { | |
1545 | return !!memcmp(adev->gart.ptr, adev->reset_magic, | |
1546 | AMDGPU_RESET_MAGIC_NUM); | |
1547 | } | |
1548 | ||
2dc80b00 | 1549 | static int amdgpu_late_set_cg_state(struct amdgpu_device *adev) |
d38ceaf9 AD |
1550 | { |
1551 | int i = 0, r; | |
1552 | ||
1553 | for (i = 0; i < adev->num_ip_blocks; i++) { | |
a1255107 | 1554 | if (!adev->ip_blocks[i].status.valid) |
d38ceaf9 | 1555 | continue; |
4a446d55 | 1556 | /* skip CG for VCE/UVD, it's handled specially */ |
a1255107 AD |
1557 | if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && |
1558 | adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { | |
4a446d55 | 1559 | /* enable clockgating to save power */ |
a1255107 AD |
1560 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, |
1561 | AMD_CG_STATE_GATE); | |
4a446d55 AD |
1562 | if (r) { |
1563 | DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", | |
a1255107 | 1564 | adev->ip_blocks[i].version->funcs->name, r); |
4a446d55 AD |
1565 | return r; |
1566 | } | |
b0b00ff1 | 1567 | } |
d38ceaf9 | 1568 | } |
2dc80b00 S |
1569 | return 0; |
1570 | } | |
1571 | ||
1572 | static int amdgpu_late_init(struct amdgpu_device *adev) | |
1573 | { | |
1574 | int i = 0, r; | |
1575 | ||
1576 | for (i = 0; i < adev->num_ip_blocks; i++) { | |
1577 | if (!adev->ip_blocks[i].status.valid) | |
1578 | continue; | |
1579 | if (adev->ip_blocks[i].version->funcs->late_init) { | |
1580 | r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); | |
1581 | if (r) { | |
1582 | DRM_ERROR("late_init of IP block <%s> failed %d\n", | |
1583 | adev->ip_blocks[i].version->funcs->name, r); | |
1584 | return r; | |
1585 | } | |
1586 | adev->ip_blocks[i].status.late_initialized = true; | |
1587 | } | |
1588 | } | |
1589 | ||
1590 | mod_delayed_work(system_wq, &adev->late_init_work, | |
1591 | msecs_to_jiffies(AMDGPU_RESUME_MS)); | |
d38ceaf9 | 1592 | |
0c49e0b8 | 1593 | amdgpu_fill_reset_magic(adev); |
d38ceaf9 AD |
1594 | |
1595 | return 0; | |
1596 | } | |
1597 | ||
1598 | static int amdgpu_fini(struct amdgpu_device *adev) | |
1599 | { | |
1600 | int i, r; | |
1601 | ||
1884734a | 1602 | amdgpu_amdkfd_device_fini(adev); |
3e96dbfd AD |
1603 | /* need to disable SMC first */ |
1604 | for (i = 0; i < adev->num_ip_blocks; i++) { | |
a1255107 | 1605 | if (!adev->ip_blocks[i].status.hw) |
3e96dbfd | 1606 | continue; |
a1255107 | 1607 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { |
3e96dbfd | 1608 | /* ungate blocks before hw fini so that we can shutdown the blocks safely */ |
a1255107 AD |
1609 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, |
1610 | AMD_CG_STATE_UNGATE); | |
3e96dbfd AD |
1611 | if (r) { |
1612 | DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", | |
a1255107 | 1613 | adev->ip_blocks[i].version->funcs->name, r); |
3e96dbfd AD |
1614 | return r; |
1615 | } | |
a1255107 | 1616 | r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); |
3e96dbfd AD |
1617 | /* XXX handle errors */ |
1618 | if (r) { | |
1619 | DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", | |
a1255107 | 1620 | adev->ip_blocks[i].version->funcs->name, r); |
3e96dbfd | 1621 | } |
a1255107 | 1622 | adev->ip_blocks[i].status.hw = false; |
3e96dbfd AD |
1623 | break; |
1624 | } | |
1625 | } | |
1626 | ||
d38ceaf9 | 1627 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
a1255107 | 1628 | if (!adev->ip_blocks[i].status.hw) |
d38ceaf9 | 1629 | continue; |
a1255107 | 1630 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { |
84e5b516 | 1631 | amdgpu_free_static_csa(adev); |
d38ceaf9 AD |
1632 | amdgpu_wb_fini(adev); |
1633 | amdgpu_vram_scratch_fini(adev); | |
1634 | } | |
8201a67a RZ |
1635 | |
1636 | if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && | |
1637 | adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { | |
1638 | /* ungate blocks before hw fini so that we can shutdown the blocks safely */ | |
1639 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, | |
1640 | AMD_CG_STATE_UNGATE); | |
1641 | if (r) { | |
1642 | DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", | |
1643 | adev->ip_blocks[i].version->funcs->name, r); | |
1644 | return r; | |
1645 | } | |
2c1a2784 | 1646 | } |
8201a67a | 1647 | |
a1255107 | 1648 | r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); |
d38ceaf9 | 1649 | /* XXX handle errors */ |
2c1a2784 | 1650 | if (r) { |
a1255107 AD |
1651 | DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", |
1652 | adev->ip_blocks[i].version->funcs->name, r); | |
2c1a2784 | 1653 | } |
8201a67a | 1654 | |
a1255107 | 1655 | adev->ip_blocks[i].status.hw = false; |
d38ceaf9 AD |
1656 | } |
1657 | ||
1658 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | |
a1255107 | 1659 | if (!adev->ip_blocks[i].status.sw) |
d38ceaf9 | 1660 | continue; |
a1255107 | 1661 | r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); |
d38ceaf9 | 1662 | /* XXX handle errors */ |
2c1a2784 | 1663 | if (r) { |
a1255107 AD |
1664 | DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", |
1665 | adev->ip_blocks[i].version->funcs->name, r); | |
2c1a2784 | 1666 | } |
a1255107 AD |
1667 | adev->ip_blocks[i].status.sw = false; |
1668 | adev->ip_blocks[i].status.valid = false; | |
d38ceaf9 AD |
1669 | } |
1670 | ||
a6dcfd9c | 1671 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
a1255107 | 1672 | if (!adev->ip_blocks[i].status.late_initialized) |
8a2eef1d | 1673 | continue; |
a1255107 AD |
1674 | if (adev->ip_blocks[i].version->funcs->late_fini) |
1675 | adev->ip_blocks[i].version->funcs->late_fini((void *)adev); | |
1676 | adev->ip_blocks[i].status.late_initialized = false; | |
a6dcfd9c ML |
1677 | } |
1678 | ||
030308fc | 1679 | if (amdgpu_sriov_vf(adev)) |
24136135 ML |
1680 | if (amdgpu_virt_release_full_gpu(adev, false)) |
1681 | DRM_ERROR("failed to release exclusive mode on fini\n"); | |
2493664f | 1682 | |
d38ceaf9 AD |
1683 | return 0; |
1684 | } | |
1685 | ||
2dc80b00 S |
1686 | static void amdgpu_late_init_func_handler(struct work_struct *work) |
1687 | { | |
1688 | struct amdgpu_device *adev = | |
1689 | container_of(work, struct amdgpu_device, late_init_work.work); | |
1690 | amdgpu_late_set_cg_state(adev); | |
1691 | } | |
1692 | ||
faefba95 | 1693 | int amdgpu_suspend(struct amdgpu_device *adev) |
d38ceaf9 AD |
1694 | { |
1695 | int i, r; | |
1696 | ||
e941ea99 XY |
1697 | if (amdgpu_sriov_vf(adev)) |
1698 | amdgpu_virt_request_full_gpu(adev, false); | |
1699 | ||
c5a93a28 FC |
1700 | /* ungate SMC block first */ |
1701 | r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC, | |
1702 | AMD_CG_STATE_UNGATE); | |
1703 | if (r) { | |
1704 | DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r); | |
1705 | } | |
1706 | ||
d38ceaf9 | 1707 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
a1255107 | 1708 | if (!adev->ip_blocks[i].status.valid) |
d38ceaf9 AD |
1709 | continue; |
1710 | /* ungate blocks so that suspend can properly shut them down */ | |
c5a93a28 | 1711 | if (i != AMD_IP_BLOCK_TYPE_SMC) { |
a1255107 AD |
1712 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, |
1713 | AMD_CG_STATE_UNGATE); | |
c5a93a28 | 1714 | if (r) { |
a1255107 AD |
1715 | DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", |
1716 | adev->ip_blocks[i].version->funcs->name, r); | |
c5a93a28 | 1717 | } |
2c1a2784 | 1718 | } |
d38ceaf9 | 1719 | /* XXX handle errors */ |
a1255107 | 1720 | r = adev->ip_blocks[i].version->funcs->suspend(adev); |
d38ceaf9 | 1721 | /* XXX handle errors */ |
2c1a2784 | 1722 | if (r) { |
a1255107 AD |
1723 | DRM_ERROR("suspend of IP block <%s> failed %d\n", |
1724 | adev->ip_blocks[i].version->funcs->name, r); | |
2c1a2784 | 1725 | } |
d38ceaf9 AD |
1726 | } |
1727 | ||
e941ea99 XY |
1728 | if (amdgpu_sriov_vf(adev)) |
1729 | amdgpu_virt_release_full_gpu(adev, false); | |
1730 | ||
d38ceaf9 AD |
1731 | return 0; |
1732 | } | |
1733 | ||
e4f0fdcc | 1734 | static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev) |
a90ad3c2 ML |
1735 | { |
1736 | int i, r; | |
1737 | ||
2cb681b6 ML |
1738 | static enum amd_ip_block_type ip_order[] = { |
1739 | AMD_IP_BLOCK_TYPE_GMC, | |
1740 | AMD_IP_BLOCK_TYPE_COMMON, | |
2cb681b6 ML |
1741 | AMD_IP_BLOCK_TYPE_IH, |
1742 | }; | |
a90ad3c2 | 1743 | |
2cb681b6 ML |
1744 | for (i = 0; i < ARRAY_SIZE(ip_order); i++) { |
1745 | int j; | |
1746 | struct amdgpu_ip_block *block; | |
a90ad3c2 | 1747 | |
2cb681b6 ML |
1748 | for (j = 0; j < adev->num_ip_blocks; j++) { |
1749 | block = &adev->ip_blocks[j]; | |
1750 | ||
1751 | if (block->version->type != ip_order[i] || | |
1752 | !block->status.valid) | |
1753 | continue; | |
1754 | ||
1755 | r = block->version->funcs->hw_init(adev); | |
1756 | DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed"); | |
a90ad3c2 ML |
1757 | } |
1758 | } | |
1759 | ||
1760 | return 0; | |
1761 | } | |
1762 | ||
e4f0fdcc | 1763 | static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev) |
a90ad3c2 ML |
1764 | { |
1765 | int i, r; | |
1766 | ||
2cb681b6 ML |
1767 | static enum amd_ip_block_type ip_order[] = { |
1768 | AMD_IP_BLOCK_TYPE_SMC, | |
ef4c166d | 1769 | AMD_IP_BLOCK_TYPE_PSP, |
2cb681b6 ML |
1770 | AMD_IP_BLOCK_TYPE_DCE, |
1771 | AMD_IP_BLOCK_TYPE_GFX, | |
1772 | AMD_IP_BLOCK_TYPE_SDMA, | |
257deb8c FM |
1773 | AMD_IP_BLOCK_TYPE_UVD, |
1774 | AMD_IP_BLOCK_TYPE_VCE | |
2cb681b6 | 1775 | }; |
a90ad3c2 | 1776 | |
2cb681b6 ML |
1777 | for (i = 0; i < ARRAY_SIZE(ip_order); i++) { |
1778 | int j; | |
1779 | struct amdgpu_ip_block *block; | |
a90ad3c2 | 1780 | |
2cb681b6 ML |
1781 | for (j = 0; j < adev->num_ip_blocks; j++) { |
1782 | block = &adev->ip_blocks[j]; | |
1783 | ||
1784 | if (block->version->type != ip_order[i] || | |
1785 | !block->status.valid) | |
1786 | continue; | |
1787 | ||
1788 | r = block->version->funcs->hw_init(adev); | |
1789 | DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed"); | |
a90ad3c2 ML |
1790 | } |
1791 | } | |
1792 | ||
1793 | return 0; | |
1794 | } | |
1795 | ||
fcf0649f | 1796 | static int amdgpu_resume_phase1(struct amdgpu_device *adev) |
d38ceaf9 AD |
1797 | { |
1798 | int i, r; | |
1799 | ||
a90ad3c2 ML |
1800 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1801 | if (!adev->ip_blocks[i].status.valid) | |
1802 | continue; | |
a90ad3c2 ML |
1803 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || |
1804 | adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || | |
fcf0649f CZ |
1805 | adev->ip_blocks[i].version->type == |
1806 | AMD_IP_BLOCK_TYPE_IH) { | |
1807 | r = adev->ip_blocks[i].version->funcs->resume(adev); | |
1808 | if (r) { | |
1809 | DRM_ERROR("resume of IP block <%s> failed %d\n", | |
1810 | adev->ip_blocks[i].version->funcs->name, r); | |
1811 | return r; | |
1812 | } | |
a90ad3c2 ML |
1813 | } |
1814 | } | |
1815 | ||
1816 | return 0; | |
1817 | } | |
1818 | ||
fcf0649f | 1819 | static int amdgpu_resume_phase2(struct amdgpu_device *adev) |
d38ceaf9 AD |
1820 | { |
1821 | int i, r; | |
1822 | ||
1823 | for (i = 0; i < adev->num_ip_blocks; i++) { | |
a1255107 | 1824 | if (!adev->ip_blocks[i].status.valid) |
d38ceaf9 | 1825 | continue; |
fcf0649f CZ |
1826 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || |
1827 | adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || | |
1828 | adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ) | |
1829 | continue; | |
a1255107 | 1830 | r = adev->ip_blocks[i].version->funcs->resume(adev); |
2c1a2784 | 1831 | if (r) { |
a1255107 AD |
1832 | DRM_ERROR("resume of IP block <%s> failed %d\n", |
1833 | adev->ip_blocks[i].version->funcs->name, r); | |
d38ceaf9 | 1834 | return r; |
2c1a2784 | 1835 | } |
d38ceaf9 AD |
1836 | } |
1837 | ||
1838 | return 0; | |
1839 | } | |
1840 | ||
fcf0649f CZ |
1841 | static int amdgpu_resume(struct amdgpu_device *adev) |
1842 | { | |
1843 | int r; | |
1844 | ||
1845 | r = amdgpu_resume_phase1(adev); | |
1846 | if (r) | |
1847 | return r; | |
1848 | r = amdgpu_resume_phase2(adev); | |
1849 | ||
1850 | return r; | |
1851 | } | |
1852 | ||
4e99a44e | 1853 | static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) |
048765ad | 1854 | { |
6867e1b5 ML |
1855 | if (amdgpu_sriov_vf(adev)) { |
1856 | if (adev->is_atom_fw) { | |
1857 | if (amdgpu_atomfirmware_gpu_supports_virtualization(adev)) | |
1858 | adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; | |
1859 | } else { | |
1860 | if (amdgpu_atombios_has_gpu_virtualization_table(adev)) | |
1861 | adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; | |
1862 | } | |
1863 | ||
1864 | if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)) | |
1865 | amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); | |
a5bde2f9 | 1866 | } |
048765ad AR |
1867 | } |
1868 | ||
4562236b HW |
1869 | bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) |
1870 | { | |
1871 | switch (asic_type) { | |
1872 | #if defined(CONFIG_DRM_AMD_DC) | |
1873 | case CHIP_BONAIRE: | |
1874 | case CHIP_HAWAII: | |
0d6fbccb | 1875 | case CHIP_KAVERI: |
4562236b HW |
1876 | case CHIP_CARRIZO: |
1877 | case CHIP_STONEY: | |
1878 | case CHIP_POLARIS11: | |
1879 | case CHIP_POLARIS10: | |
2c8ad2d5 | 1880 | case CHIP_POLARIS12: |
4562236b HW |
1881 | case CHIP_TONGA: |
1882 | case CHIP_FIJI: | |
1883 | #if defined(CONFIG_DRM_AMD_DC_PRE_VEGA) | |
1884 | return amdgpu_dc != 0; | |
4562236b | 1885 | #endif |
17b7cf8c AD |
1886 | case CHIP_KABINI: |
1887 | case CHIP_MULLINS: | |
1888 | return amdgpu_dc > 0; | |
42f8ffa1 HW |
1889 | case CHIP_VEGA10: |
1890 | #if defined(CONFIG_DRM_AMD_DC_DCN1_0) | |
fd187853 | 1891 | case CHIP_RAVEN: |
42f8ffa1 | 1892 | #endif |
fd187853 | 1893 | return amdgpu_dc != 0; |
4562236b HW |
1894 | #endif |
1895 | default: | |
1896 | return false; | |
1897 | } | |
1898 | } | |
1899 | ||
1900 | /** | |
1901 | * amdgpu_device_has_dc_support - check if dc is supported | |
1902 | * | |
1903 | * @adev: amdgpu_device_pointer | |
1904 | * | |
1905 | * Returns true for supported, false for not supported | |
1906 | */ | |
1907 | bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) | |
1908 | { | |
2555039d XY |
1909 | if (amdgpu_sriov_vf(adev)) |
1910 | return false; | |
1911 | ||
4562236b HW |
1912 | return amdgpu_device_asic_has_dc_support(adev->asic_type); |
1913 | } | |
1914 | ||
d38ceaf9 AD |
1915 | /** |
1916 | * amdgpu_device_init - initialize the driver | |
1917 | * | |
1918 | * @adev: amdgpu_device pointer | |
1919 | * @pdev: drm dev pointer | |
1920 | * @pdev: pci dev pointer | |
1921 | * @flags: driver flags | |
1922 | * | |
1923 | * Initializes the driver info and hw (all asics). | |
1924 | * Returns 0 for success or an error on failure. | |
1925 | * Called at driver startup. | |
1926 | */ | |
1927 | int amdgpu_device_init(struct amdgpu_device *adev, | |
1928 | struct drm_device *ddev, | |
1929 | struct pci_dev *pdev, | |
1930 | uint32_t flags) | |
1931 | { | |
1932 | int r, i; | |
1933 | bool runtime = false; | |
95844d20 | 1934 | u32 max_MBps; |
d38ceaf9 AD |
1935 | |
1936 | adev->shutdown = false; | |
1937 | adev->dev = &pdev->dev; | |
1938 | adev->ddev = ddev; | |
1939 | adev->pdev = pdev; | |
1940 | adev->flags = flags; | |
2f7d10b3 | 1941 | adev->asic_type = flags & AMD_ASIC_MASK; |
d38ceaf9 | 1942 | adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; |
6f02a696 | 1943 | adev->mc.gart_size = 512 * 1024 * 1024; |
d38ceaf9 AD |
1944 | adev->accel_working = false; |
1945 | adev->num_rings = 0; | |
1946 | adev->mman.buffer_funcs = NULL; | |
1947 | adev->mman.buffer_funcs_ring = NULL; | |
1948 | adev->vm_manager.vm_pte_funcs = NULL; | |
2d55e45a | 1949 | adev->vm_manager.vm_pte_num_rings = 0; |
d38ceaf9 | 1950 | adev->gart.gart_funcs = NULL; |
f54d1867 | 1951 | adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); |
b8866c26 | 1952 | bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); |
d38ceaf9 AD |
1953 | |
1954 | adev->smc_rreg = &amdgpu_invalid_rreg; | |
1955 | adev->smc_wreg = &amdgpu_invalid_wreg; | |
1956 | adev->pcie_rreg = &amdgpu_invalid_rreg; | |
1957 | adev->pcie_wreg = &amdgpu_invalid_wreg; | |
36b9a952 HR |
1958 | adev->pciep_rreg = &amdgpu_invalid_rreg; |
1959 | adev->pciep_wreg = &amdgpu_invalid_wreg; | |
d38ceaf9 AD |
1960 | adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; |
1961 | adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; | |
1962 | adev->didt_rreg = &amdgpu_invalid_rreg; | |
1963 | adev->didt_wreg = &amdgpu_invalid_wreg; | |
ccdbb20a RZ |
1964 | adev->gc_cac_rreg = &amdgpu_invalid_rreg; |
1965 | adev->gc_cac_wreg = &amdgpu_invalid_wreg; | |
d38ceaf9 AD |
1966 | adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; |
1967 | adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; | |
1968 | ||
3e39ab90 AD |
1969 | DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", |
1970 | amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, | |
1971 | pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); | |
d38ceaf9 AD |
1972 | |
1973 | /* mutex initialization are all done here so we | |
1974 | * can recall function without having locking issues */ | |
d38ceaf9 | 1975 | atomic_set(&adev->irq.ih.lock, 0); |
0e5ca0d1 | 1976 | mutex_init(&adev->firmware.mutex); |
d38ceaf9 AD |
1977 | mutex_init(&adev->pm.mutex); |
1978 | mutex_init(&adev->gfx.gpu_clock_mutex); | |
1979 | mutex_init(&adev->srbm_mutex); | |
b8866c26 | 1980 | mutex_init(&adev->gfx.pipe_reserve_mutex); |
d38ceaf9 | 1981 | mutex_init(&adev->grbm_idx_mutex); |
d38ceaf9 | 1982 | mutex_init(&adev->mn_lock); |
e23b74aa | 1983 | mutex_init(&adev->virt.vf_errors.lock); |
d38ceaf9 | 1984 | hash_init(adev->mn_hash); |
13a752e3 | 1985 | mutex_init(&adev->lock_reset); |
d38ceaf9 AD |
1986 | |
1987 | amdgpu_check_arguments(adev); | |
1988 | ||
d38ceaf9 AD |
1989 | spin_lock_init(&adev->mmio_idx_lock); |
1990 | spin_lock_init(&adev->smc_idx_lock); | |
1991 | spin_lock_init(&adev->pcie_idx_lock); | |
1992 | spin_lock_init(&adev->uvd_ctx_idx_lock); | |
1993 | spin_lock_init(&adev->didt_idx_lock); | |
ccdbb20a | 1994 | spin_lock_init(&adev->gc_cac_idx_lock); |
16abb5d2 | 1995 | spin_lock_init(&adev->se_cac_idx_lock); |
d38ceaf9 | 1996 | spin_lock_init(&adev->audio_endpt_idx_lock); |
95844d20 | 1997 | spin_lock_init(&adev->mm_stats.lock); |
d38ceaf9 | 1998 | |
0c4e7fa5 CZ |
1999 | INIT_LIST_HEAD(&adev->shadow_list); |
2000 | mutex_init(&adev->shadow_list_lock); | |
2001 | ||
795f2813 AR |
2002 | INIT_LIST_HEAD(&adev->ring_lru_list); |
2003 | spin_lock_init(&adev->ring_lru_list_lock); | |
2004 | ||
2dc80b00 S |
2005 | INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler); |
2006 | ||
0fa49558 AX |
2007 | /* Registers mapping */ |
2008 | /* TODO: block userspace mapping of io register */ | |
da69c161 KW |
2009 | if (adev->asic_type >= CHIP_BONAIRE) { |
2010 | adev->rmmio_base = pci_resource_start(adev->pdev, 5); | |
2011 | adev->rmmio_size = pci_resource_len(adev->pdev, 5); | |
2012 | } else { | |
2013 | adev->rmmio_base = pci_resource_start(adev->pdev, 2); | |
2014 | adev->rmmio_size = pci_resource_len(adev->pdev, 2); | |
2015 | } | |
d38ceaf9 | 2016 | |
d38ceaf9 AD |
2017 | adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); |
2018 | if (adev->rmmio == NULL) { | |
2019 | return -ENOMEM; | |
2020 | } | |
2021 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); | |
2022 | DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); | |
2023 | ||
705e519e CK |
2024 | /* doorbell bar mapping */ |
2025 | amdgpu_doorbell_init(adev); | |
d38ceaf9 AD |
2026 | |
2027 | /* io port mapping */ | |
2028 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
2029 | if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) { | |
2030 | adev->rio_mem_size = pci_resource_len(adev->pdev, i); | |
2031 | adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size); | |
2032 | break; | |
2033 | } | |
2034 | } | |
2035 | if (adev->rio_mem == NULL) | |
b64a18c5 | 2036 | DRM_INFO("PCI I/O BAR is not found.\n"); |
d38ceaf9 AD |
2037 | |
2038 | /* early init functions */ | |
2039 | r = amdgpu_early_init(adev); | |
2040 | if (r) | |
2041 | return r; | |
2042 | ||
2043 | /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ | |
2044 | /* this will fail for cards that aren't VGA class devices, just | |
2045 | * ignore it */ | |
2046 | vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode); | |
2047 | ||
2048 | if (amdgpu_runtime_pm == 1) | |
2049 | runtime = true; | |
e9bef455 | 2050 | if (amdgpu_device_is_px(ddev)) |
d38ceaf9 | 2051 | runtime = true; |
84c8b22e LW |
2052 | if (!pci_is_thunderbolt_attached(adev->pdev)) |
2053 | vga_switcheroo_register_client(adev->pdev, | |
2054 | &amdgpu_switcheroo_ops, runtime); | |
d38ceaf9 AD |
2055 | if (runtime) |
2056 | vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); | |
2057 | ||
2058 | /* Read BIOS */ | |
83ba126a AD |
2059 | if (!amdgpu_get_bios(adev)) { |
2060 | r = -EINVAL; | |
2061 | goto failed; | |
2062 | } | |
f7e9e9fe | 2063 | |
d38ceaf9 | 2064 | r = amdgpu_atombios_init(adev); |
2c1a2784 AD |
2065 | if (r) { |
2066 | dev_err(adev->dev, "amdgpu_atombios_init failed\n"); | |
e23b74aa | 2067 | amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); |
83ba126a | 2068 | goto failed; |
2c1a2784 | 2069 | } |
d38ceaf9 | 2070 | |
4e99a44e ML |
2071 | /* detect if we are with an SRIOV vbios */ |
2072 | amdgpu_device_detect_sriov_bios(adev); | |
048765ad | 2073 | |
d38ceaf9 | 2074 | /* Post card if necessary */ |
91fe77eb | 2075 | if (amdgpu_need_post(adev)) { |
d38ceaf9 | 2076 | if (!adev->bios) { |
bec86378 | 2077 | dev_err(adev->dev, "no vBIOS found\n"); |
83ba126a AD |
2078 | r = -EINVAL; |
2079 | goto failed; | |
d38ceaf9 | 2080 | } |
bec86378 | 2081 | DRM_INFO("GPU posting now...\n"); |
4e99a44e ML |
2082 | r = amdgpu_atom_asic_init(adev->mode_info.atom_context); |
2083 | if (r) { | |
2084 | dev_err(adev->dev, "gpu post error!\n"); | |
2085 | goto failed; | |
2086 | } | |
d38ceaf9 AD |
2087 | } |
2088 | ||
88b64e95 AD |
2089 | if (adev->is_atom_fw) { |
2090 | /* Initialize clocks */ | |
2091 | r = amdgpu_atomfirmware_get_clock_info(adev); | |
2092 | if (r) { | |
2093 | dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); | |
e23b74aa | 2094 | amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); |
88b64e95 AD |
2095 | goto failed; |
2096 | } | |
2097 | } else { | |
a5bde2f9 AD |
2098 | /* Initialize clocks */ |
2099 | r = amdgpu_atombios_get_clock_info(adev); | |
2100 | if (r) { | |
2101 | dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); | |
e23b74aa | 2102 | amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); |
89041940 | 2103 | goto failed; |
a5bde2f9 AD |
2104 | } |
2105 | /* init i2c buses */ | |
4562236b HW |
2106 | if (!amdgpu_device_has_dc_support(adev)) |
2107 | amdgpu_atombios_i2c_init(adev); | |
2c1a2784 | 2108 | } |
d38ceaf9 AD |
2109 | |
2110 | /* Fence driver */ | |
2111 | r = amdgpu_fence_driver_init(adev); | |
2c1a2784 AD |
2112 | if (r) { |
2113 | dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); | |
e23b74aa | 2114 | amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); |
83ba126a | 2115 | goto failed; |
2c1a2784 | 2116 | } |
d38ceaf9 AD |
2117 | |
2118 | /* init the mode config */ | |
2119 | drm_mode_config_init(adev->ddev); | |
2120 | ||
2121 | r = amdgpu_init(adev); | |
2122 | if (r) { | |
8840a387 | 2123 | /* failed in exclusive mode due to timeout */ |
2124 | if (amdgpu_sriov_vf(adev) && | |
2125 | !amdgpu_sriov_runtime(adev) && | |
2126 | amdgpu_virt_mmio_blocked(adev) && | |
2127 | !amdgpu_virt_wait_reset(adev)) { | |
2128 | dev_err(adev->dev, "VF exclusive mode timeout\n"); | |
1daee8b4 PD |
2129 | /* Don't send request since VF is inactive. */ |
2130 | adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; | |
2131 | adev->virt.ops = NULL; | |
8840a387 | 2132 | r = -EAGAIN; |
2133 | goto failed; | |
2134 | } | |
2c1a2784 | 2135 | dev_err(adev->dev, "amdgpu_init failed\n"); |
e23b74aa | 2136 | amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); |
d38ceaf9 | 2137 | amdgpu_fini(adev); |
83ba126a | 2138 | goto failed; |
d38ceaf9 AD |
2139 | } |
2140 | ||
2141 | adev->accel_working = true; | |
2142 | ||
e59c0205 AX |
2143 | amdgpu_vm_check_compute_bug(adev); |
2144 | ||
95844d20 MO |
2145 | /* Initialize the buffer migration limit. */ |
2146 | if (amdgpu_moverate >= 0) | |
2147 | max_MBps = amdgpu_moverate; | |
2148 | else | |
2149 | max_MBps = 8; /* Allow 8 MB/s. */ | |
2150 | /* Get a log2 for easy divisions. */ | |
2151 | adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); | |
2152 | ||
d38ceaf9 AD |
2153 | r = amdgpu_ib_pool_init(adev); |
2154 | if (r) { | |
2155 | dev_err(adev->dev, "IB initialization failed (%d).\n", r); | |
e23b74aa | 2156 | amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); |
83ba126a | 2157 | goto failed; |
d38ceaf9 AD |
2158 | } |
2159 | ||
2160 | r = amdgpu_ib_ring_tests(adev); | |
2161 | if (r) | |
2162 | DRM_ERROR("ib ring test failed (%d).\n", r); | |
2163 | ||
2dc8f81e HC |
2164 | if (amdgpu_sriov_vf(adev)) |
2165 | amdgpu_virt_init_data_exchange(adev); | |
2166 | ||
9bc92b9c ML |
2167 | amdgpu_fbdev_init(adev); |
2168 | ||
d2f52ac8 RZ |
2169 | r = amdgpu_pm_sysfs_init(adev); |
2170 | if (r) | |
2171 | DRM_ERROR("registering pm debugfs failed (%d).\n", r); | |
2172 | ||
d38ceaf9 | 2173 | r = amdgpu_gem_debugfs_init(adev); |
3f14e623 | 2174 | if (r) |
d38ceaf9 | 2175 | DRM_ERROR("registering gem debugfs failed (%d).\n", r); |
d38ceaf9 AD |
2176 | |
2177 | r = amdgpu_debugfs_regs_init(adev); | |
3f14e623 | 2178 | if (r) |
d38ceaf9 | 2179 | DRM_ERROR("registering register debugfs failed (%d).\n", r); |
d38ceaf9 | 2180 | |
50ab2533 | 2181 | r = amdgpu_debugfs_firmware_init(adev); |
3f14e623 | 2182 | if (r) |
50ab2533 | 2183 | DRM_ERROR("registering firmware debugfs failed (%d).\n", r); |
50ab2533 | 2184 | |
763efb6c | 2185 | r = amdgpu_debugfs_init(adev); |
db95e218 | 2186 | if (r) |
763efb6c | 2187 | DRM_ERROR("Creating debugfs files failed (%d).\n", r); |
db95e218 | 2188 | |
d38ceaf9 AD |
2189 | if ((amdgpu_testing & 1)) { |
2190 | if (adev->accel_working) | |
2191 | amdgpu_test_moves(adev); | |
2192 | else | |
2193 | DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n"); | |
2194 | } | |
d38ceaf9 AD |
2195 | if (amdgpu_benchmarking) { |
2196 | if (adev->accel_working) | |
2197 | amdgpu_benchmark(adev, amdgpu_benchmarking); | |
2198 | else | |
2199 | DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n"); | |
2200 | } | |
2201 | ||
2202 | /* enable clockgating, etc. after ib tests, etc. since some blocks require | |
2203 | * explicit gating rather than handling it automatically. | |
2204 | */ | |
2205 | r = amdgpu_late_init(adev); | |
2c1a2784 AD |
2206 | if (r) { |
2207 | dev_err(adev->dev, "amdgpu_late_init failed\n"); | |
e23b74aa | 2208 | amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); |
83ba126a | 2209 | goto failed; |
2c1a2784 | 2210 | } |
d38ceaf9 AD |
2211 | |
2212 | return 0; | |
83ba126a AD |
2213 | |
2214 | failed: | |
89041940 | 2215 | amdgpu_vf_error_trans_all(adev); |
83ba126a AD |
2216 | if (runtime) |
2217 | vga_switcheroo_fini_domain_pm_ops(adev->dev); | |
8840a387 | 2218 | |
83ba126a | 2219 | return r; |
d38ceaf9 AD |
2220 | } |
2221 | ||
d38ceaf9 AD |
2222 | /** |
2223 | * amdgpu_device_fini - tear down the driver | |
2224 | * | |
2225 | * @adev: amdgpu_device pointer | |
2226 | * | |
2227 | * Tear down the driver info (all asics). | |
2228 | * Called at driver shutdown. | |
2229 | */ | |
2230 | void amdgpu_device_fini(struct amdgpu_device *adev) | |
2231 | { | |
2232 | int r; | |
2233 | ||
2234 | DRM_INFO("amdgpu: finishing device.\n"); | |
2235 | adev->shutdown = true; | |
db2c2a97 PD |
2236 | if (adev->mode_info.mode_config_initialized) |
2237 | drm_crtc_force_disable_all(adev->ddev); | |
b9141cd3 | 2238 | |
d38ceaf9 AD |
2239 | amdgpu_ib_pool_fini(adev); |
2240 | amdgpu_fence_driver_fini(adev); | |
2241 | amdgpu_fbdev_fini(adev); | |
2242 | r = amdgpu_fini(adev); | |
ab4fe3e1 HR |
2243 | if (adev->firmware.gpu_info_fw) { |
2244 | release_firmware(adev->firmware.gpu_info_fw); | |
2245 | adev->firmware.gpu_info_fw = NULL; | |
2246 | } | |
d38ceaf9 | 2247 | adev->accel_working = false; |
2dc80b00 | 2248 | cancel_delayed_work_sync(&adev->late_init_work); |
d38ceaf9 | 2249 | /* free i2c buses */ |
4562236b HW |
2250 | if (!amdgpu_device_has_dc_support(adev)) |
2251 | amdgpu_i2c_fini(adev); | |
d38ceaf9 AD |
2252 | amdgpu_atombios_fini(adev); |
2253 | kfree(adev->bios); | |
2254 | adev->bios = NULL; | |
84c8b22e LW |
2255 | if (!pci_is_thunderbolt_attached(adev->pdev)) |
2256 | vga_switcheroo_unregister_client(adev->pdev); | |
83ba126a AD |
2257 | if (adev->flags & AMD_IS_PX) |
2258 | vga_switcheroo_fini_domain_pm_ops(adev->dev); | |
d38ceaf9 AD |
2259 | vga_client_register(adev->pdev, NULL, NULL, NULL); |
2260 | if (adev->rio_mem) | |
2261 | pci_iounmap(adev->pdev, adev->rio_mem); | |
2262 | adev->rio_mem = NULL; | |
2263 | iounmap(adev->rmmio); | |
2264 | adev->rmmio = NULL; | |
705e519e | 2265 | amdgpu_doorbell_fini(adev); |
d2f52ac8 | 2266 | amdgpu_pm_sysfs_fini(adev); |
d38ceaf9 | 2267 | amdgpu_debugfs_regs_cleanup(adev); |
d38ceaf9 AD |
2268 | } |
2269 | ||
2270 | ||
2271 | /* | |
2272 | * Suspend & resume. | |
2273 | */ | |
2274 | /** | |
810ddc3a | 2275 | * amdgpu_device_suspend - initiate device suspend |
d38ceaf9 AD |
2276 | * |
2277 | * @pdev: drm dev pointer | |
2278 | * @state: suspend state | |
2279 | * | |
2280 | * Puts the hw in the suspend state (all asics). | |
2281 | * Returns 0 for success or an error on failure. | |
2282 | * Called at driver suspend. | |
2283 | */ | |
810ddc3a | 2284 | int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) |
d38ceaf9 AD |
2285 | { |
2286 | struct amdgpu_device *adev; | |
2287 | struct drm_crtc *crtc; | |
2288 | struct drm_connector *connector; | |
5ceb54c6 | 2289 | int r; |
d38ceaf9 AD |
2290 | |
2291 | if (dev == NULL || dev->dev_private == NULL) { | |
2292 | return -ENODEV; | |
2293 | } | |
2294 | ||
2295 | adev = dev->dev_private; | |
2296 | ||
2297 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
2298 | return 0; | |
2299 | ||
2300 | drm_kms_helper_poll_disable(dev); | |
2301 | ||
4562236b HW |
2302 | if (!amdgpu_device_has_dc_support(adev)) { |
2303 | /* turn off display hw */ | |
2304 | drm_modeset_lock_all(dev); | |
2305 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | |
2306 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | |
2307 | } | |
2308 | drm_modeset_unlock_all(dev); | |
d38ceaf9 AD |
2309 | } |
2310 | ||
ba997709 YZ |
2311 | amdgpu_amdkfd_suspend(adev); |
2312 | ||
756e6880 | 2313 | /* unpin the front buffers and cursors */ |
d38ceaf9 | 2314 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
756e6880 | 2315 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
d38ceaf9 AD |
2316 | struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb); |
2317 | struct amdgpu_bo *robj; | |
2318 | ||
756e6880 AD |
2319 | if (amdgpu_crtc->cursor_bo) { |
2320 | struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); | |
7a6901d7 | 2321 | r = amdgpu_bo_reserve(aobj, true); |
756e6880 AD |
2322 | if (r == 0) { |
2323 | amdgpu_bo_unpin(aobj); | |
2324 | amdgpu_bo_unreserve(aobj); | |
2325 | } | |
2326 | } | |
2327 | ||
d38ceaf9 AD |
2328 | if (rfb == NULL || rfb->obj == NULL) { |
2329 | continue; | |
2330 | } | |
2331 | robj = gem_to_amdgpu_bo(rfb->obj); | |
2332 | /* don't unpin kernel fb objects */ | |
2333 | if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { | |
7a6901d7 | 2334 | r = amdgpu_bo_reserve(robj, true); |
d38ceaf9 AD |
2335 | if (r == 0) { |
2336 | amdgpu_bo_unpin(robj); | |
2337 | amdgpu_bo_unreserve(robj); | |
2338 | } | |
2339 | } | |
2340 | } | |
2341 | /* evict vram memory */ | |
2342 | amdgpu_bo_evict_vram(adev); | |
2343 | ||
5ceb54c6 | 2344 | amdgpu_fence_driver_suspend(adev); |
d38ceaf9 AD |
2345 | |
2346 | r = amdgpu_suspend(adev); | |
2347 | ||
a0a71e49 AD |
2348 | /* evict remaining vram memory |
2349 | * This second call to evict vram is to evict the gart page table | |
2350 | * using the CPU. | |
2351 | */ | |
d38ceaf9 AD |
2352 | amdgpu_bo_evict_vram(adev); |
2353 | ||
2354 | pci_save_state(dev->pdev); | |
2355 | if (suspend) { | |
2356 | /* Shut down the device */ | |
2357 | pci_disable_device(dev->pdev); | |
2358 | pci_set_power_state(dev->pdev, PCI_D3hot); | |
74b0b157 | 2359 | } else { |
2360 | r = amdgpu_asic_reset(adev); | |
2361 | if (r) | |
2362 | DRM_ERROR("amdgpu asic reset failed\n"); | |
d38ceaf9 AD |
2363 | } |
2364 | ||
2365 | if (fbcon) { | |
2366 | console_lock(); | |
2367 | amdgpu_fbdev_set_suspend(adev, 1); | |
2368 | console_unlock(); | |
2369 | } | |
2370 | return 0; | |
2371 | } | |
2372 | ||
2373 | /** | |
810ddc3a | 2374 | * amdgpu_device_resume - initiate device resume |
d38ceaf9 AD |
2375 | * |
2376 | * @pdev: drm dev pointer | |
2377 | * | |
2378 | * Bring the hw back to operating state (all asics). | |
2379 | * Returns 0 for success or an error on failure. | |
2380 | * Called at driver resume. | |
2381 | */ | |
810ddc3a | 2382 | int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) |
d38ceaf9 AD |
2383 | { |
2384 | struct drm_connector *connector; | |
2385 | struct amdgpu_device *adev = dev->dev_private; | |
756e6880 | 2386 | struct drm_crtc *crtc; |
03161a6e | 2387 | int r = 0; |
d38ceaf9 AD |
2388 | |
2389 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
2390 | return 0; | |
2391 | ||
74b0b157 | 2392 | if (fbcon) |
d38ceaf9 | 2393 | console_lock(); |
74b0b157 | 2394 | |
d38ceaf9 AD |
2395 | if (resume) { |
2396 | pci_set_power_state(dev->pdev, PCI_D0); | |
2397 | pci_restore_state(dev->pdev); | |
74b0b157 | 2398 | r = pci_enable_device(dev->pdev); |
03161a6e HR |
2399 | if (r) |
2400 | goto unlock; | |
d38ceaf9 AD |
2401 | } |
2402 | ||
2403 | /* post card */ | |
c836fec5 | 2404 | if (amdgpu_need_post(adev)) { |
74b0b157 | 2405 | r = amdgpu_atom_asic_init(adev->mode_info.atom_context); |
2406 | if (r) | |
2407 | DRM_ERROR("amdgpu asic init failed\n"); | |
2408 | } | |
d38ceaf9 AD |
2409 | |
2410 | r = amdgpu_resume(adev); | |
e6707218 | 2411 | if (r) { |
ca198528 | 2412 | DRM_ERROR("amdgpu_resume failed (%d).\n", r); |
03161a6e | 2413 | goto unlock; |
e6707218 | 2414 | } |
5ceb54c6 AD |
2415 | amdgpu_fence_driver_resume(adev); |
2416 | ||
ca198528 FC |
2417 | if (resume) { |
2418 | r = amdgpu_ib_ring_tests(adev); | |
2419 | if (r) | |
2420 | DRM_ERROR("ib ring test failed (%d).\n", r); | |
2421 | } | |
d38ceaf9 AD |
2422 | |
2423 | r = amdgpu_late_init(adev); | |
03161a6e HR |
2424 | if (r) |
2425 | goto unlock; | |
d38ceaf9 | 2426 | |
756e6880 AD |
2427 | /* pin cursors */ |
2428 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
2429 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | |
2430 | ||
2431 | if (amdgpu_crtc->cursor_bo) { | |
2432 | struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); | |
7a6901d7 | 2433 | r = amdgpu_bo_reserve(aobj, true); |
756e6880 AD |
2434 | if (r == 0) { |
2435 | r = amdgpu_bo_pin(aobj, | |
2436 | AMDGPU_GEM_DOMAIN_VRAM, | |
2437 | &amdgpu_crtc->cursor_addr); | |
2438 | if (r != 0) | |
2439 | DRM_ERROR("Failed to pin cursor BO (%d)\n", r); | |
2440 | amdgpu_bo_unreserve(aobj); | |
2441 | } | |
2442 | } | |
2443 | } | |
ba997709 YZ |
2444 | r = amdgpu_amdkfd_resume(adev); |
2445 | if (r) | |
2446 | return r; | |
756e6880 | 2447 | |
d38ceaf9 AD |
2448 | /* blat the mode back in */ |
2449 | if (fbcon) { | |
4562236b HW |
2450 | if (!amdgpu_device_has_dc_support(adev)) { |
2451 | /* pre DCE11 */ | |
2452 | drm_helper_resume_force_mode(dev); | |
2453 | ||
2454 | /* turn on display hw */ | |
2455 | drm_modeset_lock_all(dev); | |
2456 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | |
2457 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | |
2458 | } | |
2459 | drm_modeset_unlock_all(dev); | |
2460 | } else { | |
2461 | /* | |
2462 | * There is no equivalent atomic helper to turn on | |
2463 | * display, so we defined our own function for this, | |
2464 | * once suspend resume is supported by the atomic | |
2465 | * framework this will be reworked | |
2466 | */ | |
2467 | amdgpu_dm_display_resume(adev); | |
d38ceaf9 AD |
2468 | } |
2469 | } | |
2470 | ||
2471 | drm_kms_helper_poll_enable(dev); | |
23a1a9e5 L |
2472 | |
2473 | /* | |
2474 | * Most of the connector probing functions try to acquire runtime pm | |
2475 | * refs to ensure that the GPU is powered on when connector polling is | |
2476 | * performed. Since we're calling this from a runtime PM callback, | |
2477 | * trying to acquire rpm refs will cause us to deadlock. | |
2478 | * | |
2479 | * Since we're guaranteed to be holding the rpm lock, it's safe to | |
2480 | * temporarily disable the rpm helpers so this doesn't deadlock us. | |
2481 | */ | |
2482 | #ifdef CONFIG_PM | |
2483 | dev->dev->power.disable_depth++; | |
2484 | #endif | |
4562236b HW |
2485 | if (!amdgpu_device_has_dc_support(adev)) |
2486 | drm_helper_hpd_irq_event(dev); | |
2487 | else | |
2488 | drm_kms_helper_hotplug_event(dev); | |
23a1a9e5 L |
2489 | #ifdef CONFIG_PM |
2490 | dev->dev->power.disable_depth--; | |
2491 | #endif | |
d38ceaf9 | 2492 | |
03161a6e | 2493 | if (fbcon) |
d38ceaf9 | 2494 | amdgpu_fbdev_set_suspend(adev, 0); |
03161a6e HR |
2495 | |
2496 | unlock: | |
2497 | if (fbcon) | |
d38ceaf9 | 2498 | console_unlock(); |
d38ceaf9 | 2499 | |
03161a6e | 2500 | return r; |
d38ceaf9 AD |
2501 | } |
2502 | ||
63fbf42f CZ |
2503 | static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) |
2504 | { | |
2505 | int i; | |
2506 | bool asic_hang = false; | |
2507 | ||
f993d628 ML |
2508 | if (amdgpu_sriov_vf(adev)) |
2509 | return true; | |
2510 | ||
63fbf42f | 2511 | for (i = 0; i < adev->num_ip_blocks; i++) { |
a1255107 | 2512 | if (!adev->ip_blocks[i].status.valid) |
63fbf42f | 2513 | continue; |
a1255107 AD |
2514 | if (adev->ip_blocks[i].version->funcs->check_soft_reset) |
2515 | adev->ip_blocks[i].status.hang = | |
2516 | adev->ip_blocks[i].version->funcs->check_soft_reset(adev); | |
2517 | if (adev->ip_blocks[i].status.hang) { | |
2518 | DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); | |
63fbf42f CZ |
2519 | asic_hang = true; |
2520 | } | |
2521 | } | |
2522 | return asic_hang; | |
2523 | } | |
2524 | ||
4d446656 | 2525 | static int amdgpu_pre_soft_reset(struct amdgpu_device *adev) |
d31a501e CZ |
2526 | { |
2527 | int i, r = 0; | |
2528 | ||
2529 | for (i = 0; i < adev->num_ip_blocks; i++) { | |
a1255107 | 2530 | if (!adev->ip_blocks[i].status.valid) |
d31a501e | 2531 | continue; |
a1255107 AD |
2532 | if (adev->ip_blocks[i].status.hang && |
2533 | adev->ip_blocks[i].version->funcs->pre_soft_reset) { | |
2534 | r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); | |
d31a501e CZ |
2535 | if (r) |
2536 | return r; | |
2537 | } | |
2538 | } | |
2539 | ||
2540 | return 0; | |
2541 | } | |
2542 | ||
35d782fe CZ |
2543 | static bool amdgpu_need_full_reset(struct amdgpu_device *adev) |
2544 | { | |
da146d3b AD |
2545 | int i; |
2546 | ||
2547 | for (i = 0; i < adev->num_ip_blocks; i++) { | |
a1255107 | 2548 | if (!adev->ip_blocks[i].status.valid) |
da146d3b | 2549 | continue; |
a1255107 AD |
2550 | if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || |
2551 | (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || | |
2552 | (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || | |
98512bb8 KW |
2553 | (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || |
2554 | adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { | |
a1255107 | 2555 | if (adev->ip_blocks[i].status.hang) { |
da146d3b AD |
2556 | DRM_INFO("Some block need full reset!\n"); |
2557 | return true; | |
2558 | } | |
2559 | } | |
35d782fe CZ |
2560 | } |
2561 | return false; | |
2562 | } | |
2563 | ||
2564 | static int amdgpu_soft_reset(struct amdgpu_device *adev) | |
2565 | { | |
2566 | int i, r = 0; | |
2567 | ||
2568 | for (i = 0; i < adev->num_ip_blocks; i++) { | |
a1255107 | 2569 | if (!adev->ip_blocks[i].status.valid) |
35d782fe | 2570 | continue; |
a1255107 AD |
2571 | if (adev->ip_blocks[i].status.hang && |
2572 | adev->ip_blocks[i].version->funcs->soft_reset) { | |
2573 | r = adev->ip_blocks[i].version->funcs->soft_reset(adev); | |
35d782fe CZ |
2574 | if (r) |
2575 | return r; | |
2576 | } | |
2577 | } | |
2578 | ||
2579 | return 0; | |
2580 | } | |
2581 | ||
2582 | static int amdgpu_post_soft_reset(struct amdgpu_device *adev) | |
2583 | { | |
2584 | int i, r = 0; | |
2585 | ||
2586 | for (i = 0; i < adev->num_ip_blocks; i++) { | |
a1255107 | 2587 | if (!adev->ip_blocks[i].status.valid) |
35d782fe | 2588 | continue; |
a1255107 AD |
2589 | if (adev->ip_blocks[i].status.hang && |
2590 | adev->ip_blocks[i].version->funcs->post_soft_reset) | |
2591 | r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); | |
35d782fe CZ |
2592 | if (r) |
2593 | return r; | |
2594 | } | |
2595 | ||
2596 | return 0; | |
2597 | } | |
2598 | ||
3ad81f16 CZ |
2599 | bool amdgpu_need_backup(struct amdgpu_device *adev) |
2600 | { | |
2601 | if (adev->flags & AMD_IS_APU) | |
2602 | return false; | |
2603 | ||
8854695a | 2604 | return amdgpu_gpu_recovery; |
3ad81f16 CZ |
2605 | } |
2606 | ||
53cdccd5 CZ |
2607 | static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, |
2608 | struct amdgpu_ring *ring, | |
2609 | struct amdgpu_bo *bo, | |
f54d1867 | 2610 | struct dma_fence **fence) |
53cdccd5 CZ |
2611 | { |
2612 | uint32_t domain; | |
2613 | int r; | |
2614 | ||
23d2e504 RH |
2615 | if (!bo->shadow) |
2616 | return 0; | |
2617 | ||
1d284797 | 2618 | r = amdgpu_bo_reserve(bo, true); |
23d2e504 RH |
2619 | if (r) |
2620 | return r; | |
2621 | domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); | |
2622 | /* if bo has been evicted, then no need to recover */ | |
2623 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) { | |
82521316 RH |
2624 | r = amdgpu_bo_validate(bo->shadow); |
2625 | if (r) { | |
2626 | DRM_ERROR("bo validate failed!\n"); | |
2627 | goto err; | |
2628 | } | |
2629 | ||
23d2e504 | 2630 | r = amdgpu_bo_restore_from_shadow(adev, ring, bo, |
53cdccd5 | 2631 | NULL, fence, true); |
23d2e504 RH |
2632 | if (r) { |
2633 | DRM_ERROR("recover page table failed!\n"); | |
2634 | goto err; | |
2635 | } | |
2636 | } | |
53cdccd5 | 2637 | err: |
23d2e504 RH |
2638 | amdgpu_bo_unreserve(bo); |
2639 | return r; | |
53cdccd5 CZ |
2640 | } |
2641 | ||
5740682e ML |
2642 | /* |
2643 | * amdgpu_reset - reset ASIC/GPU for bare-metal or passthrough | |
a90ad3c2 ML |
2644 | * |
2645 | * @adev: amdgpu device pointer | |
5740682e | 2646 | * @reset_flags: output param tells caller the reset result |
a90ad3c2 | 2647 | * |
5740682e ML |
2648 | * attempt to do soft-reset or full-reset and reinitialize Asic |
2649 | * return 0 means successed otherwise failed | |
2650 | */ | |
2651 | static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags) | |
a90ad3c2 | 2652 | { |
5740682e ML |
2653 | bool need_full_reset, vram_lost = 0; |
2654 | int r; | |
a90ad3c2 | 2655 | |
5740682e | 2656 | need_full_reset = amdgpu_need_full_reset(adev); |
a90ad3c2 | 2657 | |
5740682e ML |
2658 | if (!need_full_reset) { |
2659 | amdgpu_pre_soft_reset(adev); | |
2660 | r = amdgpu_soft_reset(adev); | |
2661 | amdgpu_post_soft_reset(adev); | |
2662 | if (r || amdgpu_check_soft_reset(adev)) { | |
2663 | DRM_INFO("soft reset failed, will fallback to full reset!\n"); | |
2664 | need_full_reset = true; | |
2665 | } | |
a90ad3c2 | 2666 | |
5740682e | 2667 | } |
a90ad3c2 | 2668 | |
5740682e ML |
2669 | if (need_full_reset) { |
2670 | r = amdgpu_suspend(adev); | |
a90ad3c2 | 2671 | |
5740682e | 2672 | retry: |
5740682e | 2673 | r = amdgpu_asic_reset(adev); |
5740682e ML |
2674 | /* post card */ |
2675 | amdgpu_atom_asic_init(adev->mode_info.atom_context); | |
65781c78 | 2676 | |
5740682e ML |
2677 | if (!r) { |
2678 | dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); | |
2679 | r = amdgpu_resume_phase1(adev); | |
2680 | if (r) | |
2681 | goto out; | |
65781c78 | 2682 | |
5740682e ML |
2683 | vram_lost = amdgpu_check_vram_lost(adev); |
2684 | if (vram_lost) { | |
2685 | DRM_ERROR("VRAM is lost!\n"); | |
2686 | atomic_inc(&adev->vram_lost_counter); | |
2687 | } | |
2688 | ||
c1c7ce8f CK |
2689 | r = amdgpu_gtt_mgr_recover( |
2690 | &adev->mman.bdev.man[TTM_PL_TT]); | |
5740682e ML |
2691 | if (r) |
2692 | goto out; | |
2693 | ||
2694 | r = amdgpu_resume_phase2(adev); | |
2695 | if (r) | |
2696 | goto out; | |
2697 | ||
2698 | if (vram_lost) | |
2699 | amdgpu_fill_reset_magic(adev); | |
65781c78 | 2700 | } |
5740682e | 2701 | } |
65781c78 | 2702 | |
5740682e ML |
2703 | out: |
2704 | if (!r) { | |
2705 | amdgpu_irq_gpu_reset_resume_helper(adev); | |
2706 | r = amdgpu_ib_ring_tests(adev); | |
2707 | if (r) { | |
2708 | dev_err(adev->dev, "ib ring test failed (%d).\n", r); | |
2709 | r = amdgpu_suspend(adev); | |
2710 | need_full_reset = true; | |
2711 | goto retry; | |
2712 | } | |
2713 | } | |
65781c78 | 2714 | |
5740682e ML |
2715 | if (reset_flags) { |
2716 | if (vram_lost) | |
2717 | (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST; | |
a90ad3c2 | 2718 | |
5740682e ML |
2719 | if (need_full_reset) |
2720 | (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET; | |
65781c78 | 2721 | } |
a90ad3c2 | 2722 | |
5740682e ML |
2723 | return r; |
2724 | } | |
a90ad3c2 | 2725 | |
5740682e ML |
2726 | /* |
2727 | * amdgpu_reset_sriov - reset ASIC for SR-IOV vf | |
2728 | * | |
2729 | * @adev: amdgpu device pointer | |
2730 | * @reset_flags: output param tells caller the reset result | |
2731 | * | |
2732 | * do VF FLR and reinitialize Asic | |
2733 | * return 0 means successed otherwise failed | |
2734 | */ | |
2735 | static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, bool from_hypervisor) | |
2736 | { | |
2737 | int r; | |
2738 | ||
2739 | if (from_hypervisor) | |
2740 | r = amdgpu_virt_request_full_gpu(adev, true); | |
2741 | else | |
2742 | r = amdgpu_virt_reset_gpu(adev); | |
2743 | if (r) | |
2744 | return r; | |
a90ad3c2 ML |
2745 | |
2746 | /* Resume IP prior to SMC */ | |
5740682e ML |
2747 | r = amdgpu_sriov_reinit_early(adev); |
2748 | if (r) | |
2749 | goto error; | |
a90ad3c2 ML |
2750 | |
2751 | /* we need recover gart prior to run SMC/CP/SDMA resume */ | |
c1c7ce8f | 2752 | amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); |
a90ad3c2 ML |
2753 | |
2754 | /* now we are okay to resume SMC/CP/SDMA */ | |
5740682e ML |
2755 | r = amdgpu_sriov_reinit_late(adev); |
2756 | if (r) | |
2757 | goto error; | |
a90ad3c2 ML |
2758 | |
2759 | amdgpu_irq_gpu_reset_resume_helper(adev); | |
5740682e ML |
2760 | r = amdgpu_ib_ring_tests(adev); |
2761 | if (r) | |
a90ad3c2 ML |
2762 | dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r); |
2763 | ||
5740682e | 2764 | error: |
a90ad3c2 ML |
2765 | /* release full control of GPU after ib test */ |
2766 | amdgpu_virt_release_full_gpu(adev, true); | |
2767 | ||
5740682e | 2768 | if (reset_flags) { |
75bc6099 ML |
2769 | if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { |
2770 | (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST; | |
2771 | atomic_inc(&adev->vram_lost_counter); | |
2772 | } | |
a90ad3c2 | 2773 | |
5740682e ML |
2774 | /* VF FLR or hotlink reset is always full-reset */ |
2775 | (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET; | |
a90ad3c2 ML |
2776 | } |
2777 | ||
2778 | return r; | |
2779 | } | |
2780 | ||
d38ceaf9 | 2781 | /** |
5740682e | 2782 | * amdgpu_gpu_recover - reset the asic and recover scheduler |
d38ceaf9 AD |
2783 | * |
2784 | * @adev: amdgpu device pointer | |
5740682e | 2785 | * @job: which job trigger hang |
dcebf026 | 2786 | * @force forces reset regardless of amdgpu_gpu_recovery |
d38ceaf9 | 2787 | * |
5740682e | 2788 | * Attempt to reset the GPU if it has hung (all asics). |
d38ceaf9 AD |
2789 | * Returns 0 for success or an error on failure. |
2790 | */ | |
dcebf026 | 2791 | int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool force) |
d38ceaf9 | 2792 | { |
4562236b | 2793 | struct drm_atomic_state *state = NULL; |
5740682e ML |
2794 | uint64_t reset_flags = 0; |
2795 | int i, r, resched; | |
fb140b29 | 2796 | |
63fbf42f CZ |
2797 | if (!amdgpu_check_soft_reset(adev)) { |
2798 | DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); | |
2799 | return 0; | |
2800 | } | |
d38ceaf9 | 2801 | |
dcebf026 AG |
2802 | if (!force && (amdgpu_gpu_recovery == 0 || |
2803 | (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) { | |
2804 | DRM_INFO("GPU recovery disabled.\n"); | |
2805 | return 0; | |
2806 | } | |
2807 | ||
5740682e ML |
2808 | dev_info(adev->dev, "GPU reset begin!\n"); |
2809 | ||
13a752e3 | 2810 | mutex_lock(&adev->lock_reset); |
d94aed5a | 2811 | atomic_inc(&adev->gpu_reset_counter); |
13a752e3 | 2812 | adev->in_gpu_reset = 1; |
d38ceaf9 | 2813 | |
a3c47d6b CZ |
2814 | /* block TTM */ |
2815 | resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); | |
4562236b HW |
2816 | /* store modesetting */ |
2817 | if (amdgpu_device_has_dc_support(adev)) | |
2818 | state = drm_atomic_helper_suspend(adev->ddev); | |
a3c47d6b | 2819 | |
0875dc9e CZ |
2820 | /* block scheduler */ |
2821 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | |
2822 | struct amdgpu_ring *ring = adev->rings[i]; | |
2823 | ||
51687759 | 2824 | if (!ring || !ring->sched.thread) |
0875dc9e | 2825 | continue; |
5740682e ML |
2826 | |
2827 | /* only focus on the ring hit timeout if &job not NULL */ | |
2828 | if (job && job->ring->idx != i) | |
2829 | continue; | |
2830 | ||
0875dc9e | 2831 | kthread_park(ring->sched.thread); |
1b1f42d8 | 2832 | drm_sched_hw_job_reset(&ring->sched, &job->base); |
5740682e | 2833 | |
2f9d4084 ML |
2834 | /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ |
2835 | amdgpu_fence_driver_force_completion(ring); | |
0875dc9e | 2836 | } |
d38ceaf9 | 2837 | |
5740682e ML |
2838 | if (amdgpu_sriov_vf(adev)) |
2839 | r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true); | |
2840 | else | |
2841 | r = amdgpu_reset(adev, &reset_flags); | |
35d782fe | 2842 | |
d38ceaf9 | 2843 | if (!r) { |
5740682e ML |
2844 | if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) || |
2845 | (reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) { | |
53cdccd5 CZ |
2846 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
2847 | struct amdgpu_bo *bo, *tmp; | |
f54d1867 | 2848 | struct dma_fence *fence = NULL, *next = NULL; |
53cdccd5 CZ |
2849 | |
2850 | DRM_INFO("recover vram bo from shadow\n"); | |
2851 | mutex_lock(&adev->shadow_list_lock); | |
2852 | list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { | |
236763d3 | 2853 | next = NULL; |
53cdccd5 CZ |
2854 | amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); |
2855 | if (fence) { | |
f54d1867 | 2856 | r = dma_fence_wait(fence, false); |
53cdccd5 | 2857 | if (r) { |
1d7b17b0 | 2858 | WARN(r, "recovery from shadow isn't completed\n"); |
53cdccd5 CZ |
2859 | break; |
2860 | } | |
2861 | } | |
1f465087 | 2862 | |
f54d1867 | 2863 | dma_fence_put(fence); |
53cdccd5 CZ |
2864 | fence = next; |
2865 | } | |
2866 | mutex_unlock(&adev->shadow_list_lock); | |
2867 | if (fence) { | |
f54d1867 | 2868 | r = dma_fence_wait(fence, false); |
53cdccd5 | 2869 | if (r) |
1d7b17b0 | 2870 | WARN(r, "recovery from shadow isn't completed\n"); |
53cdccd5 | 2871 | } |
f54d1867 | 2872 | dma_fence_put(fence); |
53cdccd5 | 2873 | } |
5740682e | 2874 | |
d38ceaf9 AD |
2875 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
2876 | struct amdgpu_ring *ring = adev->rings[i]; | |
51687759 CZ |
2877 | |
2878 | if (!ring || !ring->sched.thread) | |
d38ceaf9 | 2879 | continue; |
53cdccd5 | 2880 | |
5740682e ML |
2881 | /* only focus on the ring hit timeout if &job not NULL */ |
2882 | if (job && job->ring->idx != i) | |
2883 | continue; | |
2884 | ||
1b1f42d8 | 2885 | drm_sched_job_recovery(&ring->sched); |
0875dc9e | 2886 | kthread_unpark(ring->sched.thread); |
d38ceaf9 | 2887 | } |
d38ceaf9 | 2888 | } else { |
d38ceaf9 | 2889 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
5740682e ML |
2890 | struct amdgpu_ring *ring = adev->rings[i]; |
2891 | ||
2892 | if (!ring || !ring->sched.thread) | |
2893 | continue; | |
2894 | ||
2895 | /* only focus on the ring hit timeout if &job not NULL */ | |
2896 | if (job && job->ring->idx != i) | |
2897 | continue; | |
2898 | ||
2899 | kthread_unpark(adev->rings[i]->sched.thread); | |
d38ceaf9 AD |
2900 | } |
2901 | } | |
2902 | ||
4562236b | 2903 | if (amdgpu_device_has_dc_support(adev)) { |
5740682e ML |
2904 | if (drm_atomic_helper_resume(adev->ddev, state)) |
2905 | dev_info(adev->dev, "drm resume failed:%d\n", r); | |
4562236b | 2906 | amdgpu_dm_display_resume(adev); |
5740682e | 2907 | } else { |
4562236b | 2908 | drm_helper_resume_force_mode(adev->ddev); |
5740682e | 2909 | } |
d38ceaf9 AD |
2910 | |
2911 | ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); | |
5740682e | 2912 | |
89041940 | 2913 | if (r) { |
d38ceaf9 | 2914 | /* bad news, how to tell it to userspace ? */ |
5740682e ML |
2915 | dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter)); |
2916 | amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); | |
2917 | } else { | |
2918 | dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter)); | |
89041940 | 2919 | } |
d38ceaf9 | 2920 | |
89041940 | 2921 | amdgpu_vf_error_trans_all(adev); |
13a752e3 ML |
2922 | adev->in_gpu_reset = 0; |
2923 | mutex_unlock(&adev->lock_reset); | |
d38ceaf9 AD |
2924 | return r; |
2925 | } | |
2926 | ||
d0dd7f0c AD |
2927 | void amdgpu_get_pcie_info(struct amdgpu_device *adev) |
2928 | { | |
2929 | u32 mask; | |
2930 | int ret; | |
2931 | ||
cd474ba0 AD |
2932 | if (amdgpu_pcie_gen_cap) |
2933 | adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; | |
d0dd7f0c | 2934 | |
cd474ba0 AD |
2935 | if (amdgpu_pcie_lane_cap) |
2936 | adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; | |
d0dd7f0c | 2937 | |
cd474ba0 AD |
2938 | /* covers APUs as well */ |
2939 | if (pci_is_root_bus(adev->pdev->bus)) { | |
2940 | if (adev->pm.pcie_gen_mask == 0) | |
2941 | adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; | |
2942 | if (adev->pm.pcie_mlw_mask == 0) | |
2943 | adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; | |
d0dd7f0c | 2944 | return; |
cd474ba0 | 2945 | } |
d0dd7f0c | 2946 | |
cd474ba0 AD |
2947 | if (adev->pm.pcie_gen_mask == 0) { |
2948 | ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); | |
2949 | if (!ret) { | |
2950 | adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | | |
2951 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | | |
2952 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); | |
2953 | ||
2954 | if (mask & DRM_PCIE_SPEED_25) | |
2955 | adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; | |
2956 | if (mask & DRM_PCIE_SPEED_50) | |
2957 | adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; | |
2958 | if (mask & DRM_PCIE_SPEED_80) | |
2959 | adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; | |
2960 | } else { | |
2961 | adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; | |
2962 | } | |
2963 | } | |
2964 | if (adev->pm.pcie_mlw_mask == 0) { | |
2965 | ret = drm_pcie_get_max_link_width(adev->ddev, &mask); | |
2966 | if (!ret) { | |
2967 | switch (mask) { | |
2968 | case 32: | |
2969 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | | |
2970 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | | |
2971 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | | |
2972 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | | |
2973 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | | |
2974 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | | |
2975 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); | |
2976 | break; | |
2977 | case 16: | |
2978 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | | |
2979 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | | |
2980 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | | |
2981 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | | |
2982 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | | |
2983 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); | |
2984 | break; | |
2985 | case 12: | |
2986 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | | |
2987 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | | |
2988 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | | |
2989 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | | |
2990 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); | |
2991 | break; | |
2992 | case 8: | |
2993 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | | |
2994 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | | |
2995 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | | |
2996 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); | |
2997 | break; | |
2998 | case 4: | |
2999 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | | |
3000 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | | |
3001 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); | |
3002 | break; | |
3003 | case 2: | |
3004 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | | |
3005 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); | |
3006 | break; | |
3007 | case 1: | |
3008 | adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; | |
3009 | break; | |
3010 | default: | |
3011 | break; | |
3012 | } | |
3013 | } else { | |
3014 | adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; | |
d0dd7f0c AD |
3015 | } |
3016 | } | |
3017 | } | |
d38ceaf9 AD |
3018 | |
3019 | /* | |
3020 | * Debugfs | |
3021 | */ | |
3022 | int amdgpu_debugfs_add_files(struct amdgpu_device *adev, | |
06ab6832 | 3023 | const struct drm_info_list *files, |
d38ceaf9 AD |
3024 | unsigned nfiles) |
3025 | { | |
3026 | unsigned i; | |
3027 | ||
3028 | for (i = 0; i < adev->debugfs_count; i++) { | |
3029 | if (adev->debugfs[i].files == files) { | |
3030 | /* Already registered */ | |
3031 | return 0; | |
3032 | } | |
3033 | } | |
3034 | ||
3035 | i = adev->debugfs_count + 1; | |
3036 | if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) { | |
3037 | DRM_ERROR("Reached maximum number of debugfs components.\n"); | |
3038 | DRM_ERROR("Report so we increase " | |
3039 | "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n"); | |
3040 | return -EINVAL; | |
3041 | } | |
3042 | adev->debugfs[adev->debugfs_count].files = files; | |
3043 | adev->debugfs[adev->debugfs_count].num_files = nfiles; | |
3044 | adev->debugfs_count = i; | |
3045 | #if defined(CONFIG_DEBUG_FS) | |
d38ceaf9 AD |
3046 | drm_debugfs_create_files(files, nfiles, |
3047 | adev->ddev->primary->debugfs_root, | |
3048 | adev->ddev->primary); | |
3049 | #endif | |
3050 | return 0; | |
3051 | } | |
3052 | ||
d38ceaf9 AD |
3053 | #if defined(CONFIG_DEBUG_FS) |
3054 | ||
3055 | static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, | |
3056 | size_t size, loff_t *pos) | |
3057 | { | |
45063097 | 3058 | struct amdgpu_device *adev = file_inode(f)->i_private; |
d38ceaf9 AD |
3059 | ssize_t result = 0; |
3060 | int r; | |
bd12267d | 3061 | bool pm_pg_lock, use_bank; |
56628159 | 3062 | unsigned instance_bank, sh_bank, se_bank; |
d38ceaf9 AD |
3063 | |
3064 | if (size & 0x3 || *pos & 0x3) | |
3065 | return -EINVAL; | |
3066 | ||
bd12267d TSD |
3067 | /* are we reading registers for which a PG lock is necessary? */ |
3068 | pm_pg_lock = (*pos >> 23) & 1; | |
3069 | ||
56628159 | 3070 | if (*pos & (1ULL << 62)) { |
0b968650 TSD |
3071 | se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; |
3072 | sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; | |
3073 | instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; | |
32977f93 TSD |
3074 | |
3075 | if (se_bank == 0x3FF) | |
3076 | se_bank = 0xFFFFFFFF; | |
3077 | if (sh_bank == 0x3FF) | |
3078 | sh_bank = 0xFFFFFFFF; | |
3079 | if (instance_bank == 0x3FF) | |
3080 | instance_bank = 0xFFFFFFFF; | |
56628159 | 3081 | use_bank = 1; |
56628159 TSD |
3082 | } else { |
3083 | use_bank = 0; | |
3084 | } | |
3085 | ||
801a6aa9 | 3086 | *pos &= (1UL << 22) - 1; |
bd12267d | 3087 | |
56628159 | 3088 | if (use_bank) { |
32977f93 TSD |
3089 | if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || |
3090 | (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) | |
56628159 TSD |
3091 | return -EINVAL; |
3092 | mutex_lock(&adev->grbm_idx_mutex); | |
3093 | amdgpu_gfx_select_se_sh(adev, se_bank, | |
3094 | sh_bank, instance_bank); | |
3095 | } | |
3096 | ||
bd12267d TSD |
3097 | if (pm_pg_lock) |
3098 | mutex_lock(&adev->pm.mutex); | |
3099 | ||
d38ceaf9 AD |
3100 | while (size) { |
3101 | uint32_t value; | |
3102 | ||
3103 | if (*pos > adev->rmmio_size) | |
56628159 | 3104 | goto end; |
d38ceaf9 AD |
3105 | |
3106 | value = RREG32(*pos >> 2); | |
3107 | r = put_user(value, (uint32_t *)buf); | |
56628159 TSD |
3108 | if (r) { |
3109 | result = r; | |
3110 | goto end; | |
3111 | } | |
d38ceaf9 AD |
3112 | |
3113 | result += 4; | |
3114 | buf += 4; | |
3115 | *pos += 4; | |
3116 | size -= 4; | |
3117 | } | |
3118 | ||
56628159 TSD |
3119 | end: |
3120 | if (use_bank) { | |
3121 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | |
3122 | mutex_unlock(&adev->grbm_idx_mutex); | |
3123 | } | |
3124 | ||
bd12267d TSD |
3125 | if (pm_pg_lock) |
3126 | mutex_unlock(&adev->pm.mutex); | |
3127 | ||
d38ceaf9 AD |
3128 | return result; |
3129 | } | |
3130 | ||
3131 | static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, | |
3132 | size_t size, loff_t *pos) | |
3133 | { | |
45063097 | 3134 | struct amdgpu_device *adev = file_inode(f)->i_private; |
d38ceaf9 AD |
3135 | ssize_t result = 0; |
3136 | int r; | |
394fdde2 TSD |
3137 | bool pm_pg_lock, use_bank; |
3138 | unsigned instance_bank, sh_bank, se_bank; | |
d38ceaf9 AD |
3139 | |
3140 | if (size & 0x3 || *pos & 0x3) | |
3141 | return -EINVAL; | |
3142 | ||
394fdde2 TSD |
3143 | /* are we reading registers for which a PG lock is necessary? */ |
3144 | pm_pg_lock = (*pos >> 23) & 1; | |
3145 | ||
3146 | if (*pos & (1ULL << 62)) { | |
0b968650 TSD |
3147 | se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; |
3148 | sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; | |
3149 | instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; | |
394fdde2 TSD |
3150 | |
3151 | if (se_bank == 0x3FF) | |
3152 | se_bank = 0xFFFFFFFF; | |
3153 | if (sh_bank == 0x3FF) | |
3154 | sh_bank = 0xFFFFFFFF; | |
3155 | if (instance_bank == 0x3FF) | |
3156 | instance_bank = 0xFFFFFFFF; | |
3157 | use_bank = 1; | |
3158 | } else { | |
3159 | use_bank = 0; | |
3160 | } | |
3161 | ||
801a6aa9 | 3162 | *pos &= (1UL << 22) - 1; |
394fdde2 TSD |
3163 | |
3164 | if (use_bank) { | |
3165 | if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || | |
3166 | (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) | |
3167 | return -EINVAL; | |
3168 | mutex_lock(&adev->grbm_idx_mutex); | |
3169 | amdgpu_gfx_select_se_sh(adev, se_bank, | |
3170 | sh_bank, instance_bank); | |
3171 | } | |
3172 | ||
3173 | if (pm_pg_lock) | |
3174 | mutex_lock(&adev->pm.mutex); | |
3175 | ||
d38ceaf9 AD |
3176 | while (size) { |
3177 | uint32_t value; | |
3178 | ||
3179 | if (*pos > adev->rmmio_size) | |
3180 | return result; | |
3181 | ||
3182 | r = get_user(value, (uint32_t *)buf); | |
3183 | if (r) | |
3184 | return r; | |
3185 | ||
3186 | WREG32(*pos >> 2, value); | |
3187 | ||
3188 | result += 4; | |
3189 | buf += 4; | |
3190 | *pos += 4; | |
3191 | size -= 4; | |
3192 | } | |
3193 | ||
394fdde2 TSD |
3194 | if (use_bank) { |
3195 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | |
3196 | mutex_unlock(&adev->grbm_idx_mutex); | |
3197 | } | |
3198 | ||
3199 | if (pm_pg_lock) | |
3200 | mutex_unlock(&adev->pm.mutex); | |
3201 | ||
d38ceaf9 AD |
3202 | return result; |
3203 | } | |
3204 | ||
adcec288 TSD |
3205 | static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, |
3206 | size_t size, loff_t *pos) | |
3207 | { | |
45063097 | 3208 | struct amdgpu_device *adev = file_inode(f)->i_private; |
adcec288 TSD |
3209 | ssize_t result = 0; |
3210 | int r; | |
3211 | ||
3212 | if (size & 0x3 || *pos & 0x3) | |
3213 | return -EINVAL; | |
3214 | ||
3215 | while (size) { | |
3216 | uint32_t value; | |
3217 | ||
3218 | value = RREG32_PCIE(*pos >> 2); | |
3219 | r = put_user(value, (uint32_t *)buf); | |
3220 | if (r) | |
3221 | return r; | |
3222 | ||
3223 | result += 4; | |
3224 | buf += 4; | |
3225 | *pos += 4; | |
3226 | size -= 4; | |
3227 | } | |
3228 | ||
3229 | return result; | |
3230 | } | |
3231 | ||
3232 | static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf, | |
3233 | size_t size, loff_t *pos) | |
3234 | { | |
45063097 | 3235 | struct amdgpu_device *adev = file_inode(f)->i_private; |
adcec288 TSD |
3236 | ssize_t result = 0; |
3237 | int r; | |
3238 | ||
3239 | if (size & 0x3 || *pos & 0x3) | |
3240 | return -EINVAL; | |
3241 | ||
3242 | while (size) { | |
3243 | uint32_t value; | |
3244 | ||
3245 | r = get_user(value, (uint32_t *)buf); | |
3246 | if (r) | |
3247 | return r; | |
3248 | ||
3249 | WREG32_PCIE(*pos >> 2, value); | |
3250 | ||
3251 | result += 4; | |
3252 | buf += 4; | |
3253 | *pos += 4; | |
3254 | size -= 4; | |
3255 | } | |
3256 | ||
3257 | return result; | |
3258 | } | |
3259 | ||
3260 | static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, | |
3261 | size_t size, loff_t *pos) | |
3262 | { | |
45063097 | 3263 | struct amdgpu_device *adev = file_inode(f)->i_private; |
adcec288 TSD |
3264 | ssize_t result = 0; |
3265 | int r; | |
3266 | ||
3267 | if (size & 0x3 || *pos & 0x3) | |
3268 | return -EINVAL; | |
3269 | ||
3270 | while (size) { | |
3271 | uint32_t value; | |
3272 | ||
3273 | value = RREG32_DIDT(*pos >> 2); | |
3274 | r = put_user(value, (uint32_t *)buf); | |
3275 | if (r) | |
3276 | return r; | |
3277 | ||
3278 | result += 4; | |
3279 | buf += 4; | |
3280 | *pos += 4; | |
3281 | size -= 4; | |
3282 | } | |
3283 | ||
3284 | return result; | |
3285 | } | |
3286 | ||
3287 | static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf, | |
3288 | size_t size, loff_t *pos) | |
3289 | { | |
45063097 | 3290 | struct amdgpu_device *adev = file_inode(f)->i_private; |
adcec288 TSD |
3291 | ssize_t result = 0; |
3292 | int r; | |
3293 | ||
3294 | if (size & 0x3 || *pos & 0x3) | |
3295 | return -EINVAL; | |
3296 | ||
3297 | while (size) { | |
3298 | uint32_t value; | |
3299 | ||
3300 | r = get_user(value, (uint32_t *)buf); | |
3301 | if (r) | |
3302 | return r; | |
3303 | ||
3304 | WREG32_DIDT(*pos >> 2, value); | |
3305 | ||
3306 | result += 4; | |
3307 | buf += 4; | |
3308 | *pos += 4; | |
3309 | size -= 4; | |
3310 | } | |
3311 | ||
3312 | return result; | |
3313 | } | |
3314 | ||
3315 | static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, | |
3316 | size_t size, loff_t *pos) | |
3317 | { | |
45063097 | 3318 | struct amdgpu_device *adev = file_inode(f)->i_private; |
adcec288 TSD |
3319 | ssize_t result = 0; |
3320 | int r; | |
3321 | ||
3322 | if (size & 0x3 || *pos & 0x3) | |
3323 | return -EINVAL; | |
3324 | ||
3325 | while (size) { | |
3326 | uint32_t value; | |
3327 | ||
6fc0deaf | 3328 | value = RREG32_SMC(*pos); |
adcec288 TSD |
3329 | r = put_user(value, (uint32_t *)buf); |
3330 | if (r) | |
3331 | return r; | |
3332 | ||
3333 | result += 4; | |
3334 | buf += 4; | |
3335 | *pos += 4; | |
3336 | size -= 4; | |
3337 | } | |
3338 | ||
3339 | return result; | |
3340 | } | |
3341 | ||
3342 | static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf, | |
3343 | size_t size, loff_t *pos) | |
3344 | { | |
45063097 | 3345 | struct amdgpu_device *adev = file_inode(f)->i_private; |
adcec288 TSD |
3346 | ssize_t result = 0; |
3347 | int r; | |
3348 | ||
3349 | if (size & 0x3 || *pos & 0x3) | |
3350 | return -EINVAL; | |
3351 | ||
3352 | while (size) { | |
3353 | uint32_t value; | |
3354 | ||
3355 | r = get_user(value, (uint32_t *)buf); | |
3356 | if (r) | |
3357 | return r; | |
3358 | ||
6fc0deaf | 3359 | WREG32_SMC(*pos, value); |
adcec288 TSD |
3360 | |
3361 | result += 4; | |
3362 | buf += 4; | |
3363 | *pos += 4; | |
3364 | size -= 4; | |
3365 | } | |
3366 | ||
3367 | return result; | |
3368 | } | |
3369 | ||
1e051413 TSD |
3370 | static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, |
3371 | size_t size, loff_t *pos) | |
3372 | { | |
45063097 | 3373 | struct amdgpu_device *adev = file_inode(f)->i_private; |
1e051413 TSD |
3374 | ssize_t result = 0; |
3375 | int r; | |
3376 | uint32_t *config, no_regs = 0; | |
3377 | ||
3378 | if (size & 0x3 || *pos & 0x3) | |
3379 | return -EINVAL; | |
3380 | ||
ecab7668 | 3381 | config = kmalloc_array(256, sizeof(*config), GFP_KERNEL); |
1e051413 TSD |
3382 | if (!config) |
3383 | return -ENOMEM; | |
3384 | ||
3385 | /* version, increment each time something is added */ | |
9a999359 | 3386 | config[no_regs++] = 3; |
1e051413 TSD |
3387 | config[no_regs++] = adev->gfx.config.max_shader_engines; |
3388 | config[no_regs++] = adev->gfx.config.max_tile_pipes; | |
3389 | config[no_regs++] = adev->gfx.config.max_cu_per_sh; | |
3390 | config[no_regs++] = adev->gfx.config.max_sh_per_se; | |
3391 | config[no_regs++] = adev->gfx.config.max_backends_per_se; | |
3392 | config[no_regs++] = adev->gfx.config.max_texture_channel_caches; | |
3393 | config[no_regs++] = adev->gfx.config.max_gprs; | |
3394 | config[no_regs++] = adev->gfx.config.max_gs_threads; | |
3395 | config[no_regs++] = adev->gfx.config.max_hw_contexts; | |
3396 | config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend; | |
3397 | config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend; | |
3398 | config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size; | |
3399 | config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size; | |
3400 | config[no_regs++] = adev->gfx.config.num_tile_pipes; | |
3401 | config[no_regs++] = adev->gfx.config.backend_enable_mask; | |
3402 | config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes; | |
3403 | config[no_regs++] = adev->gfx.config.mem_row_size_in_kb; | |
3404 | config[no_regs++] = adev->gfx.config.shader_engine_tile_size; | |
3405 | config[no_regs++] = adev->gfx.config.num_gpus; | |
3406 | config[no_regs++] = adev->gfx.config.multi_gpu_tile_size; | |
3407 | config[no_regs++] = adev->gfx.config.mc_arb_ramcfg; | |
3408 | config[no_regs++] = adev->gfx.config.gb_addr_config; | |
3409 | config[no_regs++] = adev->gfx.config.num_rbs; | |
3410 | ||
89a8f309 TSD |
3411 | /* rev==1 */ |
3412 | config[no_regs++] = adev->rev_id; | |
3413 | config[no_regs++] = adev->pg_flags; | |
3414 | config[no_regs++] = adev->cg_flags; | |
3415 | ||
e9f11dc8 TSD |
3416 | /* rev==2 */ |
3417 | config[no_regs++] = adev->family; | |
3418 | config[no_regs++] = adev->external_rev_id; | |
3419 | ||
9a999359 TSD |
3420 | /* rev==3 */ |
3421 | config[no_regs++] = adev->pdev->device; | |
3422 | config[no_regs++] = adev->pdev->revision; | |
3423 | config[no_regs++] = adev->pdev->subsystem_device; | |
3424 | config[no_regs++] = adev->pdev->subsystem_vendor; | |
3425 | ||
1e051413 TSD |
3426 | while (size && (*pos < no_regs * 4)) { |
3427 | uint32_t value; | |
3428 | ||
3429 | value = config[*pos >> 2]; | |
3430 | r = put_user(value, (uint32_t *)buf); | |
3431 | if (r) { | |
3432 | kfree(config); | |
3433 | return r; | |
3434 | } | |
3435 | ||
3436 | result += 4; | |
3437 | buf += 4; | |
3438 | *pos += 4; | |
3439 | size -= 4; | |
3440 | } | |
3441 | ||
3442 | kfree(config); | |
3443 | return result; | |
3444 | } | |
3445 | ||
f2cdaf20 TSD |
3446 | static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, |
3447 | size_t size, loff_t *pos) | |
3448 | { | |
45063097 | 3449 | struct amdgpu_device *adev = file_inode(f)->i_private; |
9f8df7d7 TSD |
3450 | int idx, x, outsize, r, valuesize; |
3451 | uint32_t values[16]; | |
f2cdaf20 | 3452 | |
9f8df7d7 | 3453 | if (size & 3 || *pos & 0x3) |
f2cdaf20 TSD |
3454 | return -EINVAL; |
3455 | ||
3cbc614f SP |
3456 | if (amdgpu_dpm == 0) |
3457 | return -EINVAL; | |
3458 | ||
f2cdaf20 TSD |
3459 | /* convert offset to sensor number */ |
3460 | idx = *pos >> 2; | |
3461 | ||
9f8df7d7 | 3462 | valuesize = sizeof(values); |
f2cdaf20 | 3463 | if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) |
cd4d7464 | 3464 | r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); |
f2cdaf20 TSD |
3465 | else |
3466 | return -EINVAL; | |
3467 | ||
9f8df7d7 TSD |
3468 | if (size > valuesize) |
3469 | return -EINVAL; | |
3470 | ||
3471 | outsize = 0; | |
3472 | x = 0; | |
3473 | if (!r) { | |
3474 | while (size) { | |
3475 | r = put_user(values[x++], (int32_t *)buf); | |
3476 | buf += 4; | |
3477 | size -= 4; | |
3478 | outsize += 4; | |
3479 | } | |
3480 | } | |
f2cdaf20 | 3481 | |
9f8df7d7 | 3482 | return !r ? outsize : r; |
f2cdaf20 | 3483 | } |
1e051413 | 3484 | |
273d7aa1 TSD |
3485 | static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, |
3486 | size_t size, loff_t *pos) | |
3487 | { | |
3488 | struct amdgpu_device *adev = f->f_inode->i_private; | |
3489 | int r, x; | |
3490 | ssize_t result=0; | |
472259f0 | 3491 | uint32_t offset, se, sh, cu, wave, simd, data[32]; |
273d7aa1 TSD |
3492 | |
3493 | if (size & 3 || *pos & 3) | |
3494 | return -EINVAL; | |
3495 | ||
3496 | /* decode offset */ | |
0b968650 TSD |
3497 | offset = (*pos & GENMASK_ULL(6, 0)); |
3498 | se = (*pos & GENMASK_ULL(14, 7)) >> 7; | |
3499 | sh = (*pos & GENMASK_ULL(22, 15)) >> 15; | |
3500 | cu = (*pos & GENMASK_ULL(30, 23)) >> 23; | |
3501 | wave = (*pos & GENMASK_ULL(36, 31)) >> 31; | |
3502 | simd = (*pos & GENMASK_ULL(44, 37)) >> 37; | |
273d7aa1 TSD |
3503 | |
3504 | /* switch to the specific se/sh/cu */ | |
3505 | mutex_lock(&adev->grbm_idx_mutex); | |
3506 | amdgpu_gfx_select_se_sh(adev, se, sh, cu); | |
3507 | ||
3508 | x = 0; | |
472259f0 TSD |
3509 | if (adev->gfx.funcs->read_wave_data) |
3510 | adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); | |
273d7aa1 TSD |
3511 | |
3512 | amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); | |
3513 | mutex_unlock(&adev->grbm_idx_mutex); | |
3514 | ||
5ecfb3b8 TSD |
3515 | if (!x) |
3516 | return -EINVAL; | |
3517 | ||
472259f0 | 3518 | while (size && (offset < x * 4)) { |
273d7aa1 TSD |
3519 | uint32_t value; |
3520 | ||
472259f0 | 3521 | value = data[offset >> 2]; |
273d7aa1 TSD |
3522 | r = put_user(value, (uint32_t *)buf); |
3523 | if (r) | |
3524 | return r; | |
3525 | ||
3526 | result += 4; | |
3527 | buf += 4; | |
472259f0 | 3528 | offset += 4; |
273d7aa1 TSD |
3529 | size -= 4; |
3530 | } | |
3531 | ||
3532 | return result; | |
3533 | } | |
3534 | ||
c5a60ce8 TSD |
3535 | static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, |
3536 | size_t size, loff_t *pos) | |
3537 | { | |
3538 | struct amdgpu_device *adev = f->f_inode->i_private; | |
3539 | int r; | |
3540 | ssize_t result = 0; | |
3541 | uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; | |
3542 | ||
3543 | if (size & 3 || *pos & 3) | |
3544 | return -EINVAL; | |
3545 | ||
3546 | /* decode offset */ | |
0b968650 TSD |
3547 | offset = *pos & GENMASK_ULL(11, 0); |
3548 | se = (*pos & GENMASK_ULL(19, 12)) >> 12; | |
3549 | sh = (*pos & GENMASK_ULL(27, 20)) >> 20; | |
3550 | cu = (*pos & GENMASK_ULL(35, 28)) >> 28; | |
3551 | wave = (*pos & GENMASK_ULL(43, 36)) >> 36; | |
3552 | simd = (*pos & GENMASK_ULL(51, 44)) >> 44; | |
3553 | thread = (*pos & GENMASK_ULL(59, 52)) >> 52; | |
3554 | bank = (*pos & GENMASK_ULL(61, 60)) >> 60; | |
c5a60ce8 TSD |
3555 | |
3556 | data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); | |
3557 | if (!data) | |
3558 | return -ENOMEM; | |
3559 | ||
3560 | /* switch to the specific se/sh/cu */ | |
3561 | mutex_lock(&adev->grbm_idx_mutex); | |
3562 | amdgpu_gfx_select_se_sh(adev, se, sh, cu); | |
3563 | ||
3564 | if (bank == 0) { | |
3565 | if (adev->gfx.funcs->read_wave_vgprs) | |
3566 | adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data); | |
3567 | } else { | |
3568 | if (adev->gfx.funcs->read_wave_sgprs) | |
3569 | adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data); | |
3570 | } | |
3571 | ||
3572 | amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); | |
3573 | mutex_unlock(&adev->grbm_idx_mutex); | |
3574 | ||
3575 | while (size) { | |
3576 | uint32_t value; | |
3577 | ||
3578 | value = data[offset++]; | |
3579 | r = put_user(value, (uint32_t *)buf); | |
3580 | if (r) { | |
3581 | result = r; | |
3582 | goto err; | |
3583 | } | |
3584 | ||
3585 | result += 4; | |
3586 | buf += 4; | |
3587 | size -= 4; | |
3588 | } | |
3589 | ||
3590 | err: | |
3591 | kfree(data); | |
3592 | return result; | |
3593 | } | |
3594 | ||
d38ceaf9 AD |
3595 | static const struct file_operations amdgpu_debugfs_regs_fops = { |
3596 | .owner = THIS_MODULE, | |
3597 | .read = amdgpu_debugfs_regs_read, | |
3598 | .write = amdgpu_debugfs_regs_write, | |
3599 | .llseek = default_llseek | |
3600 | }; | |
adcec288 TSD |
3601 | static const struct file_operations amdgpu_debugfs_regs_didt_fops = { |
3602 | .owner = THIS_MODULE, | |
3603 | .read = amdgpu_debugfs_regs_didt_read, | |
3604 | .write = amdgpu_debugfs_regs_didt_write, | |
3605 | .llseek = default_llseek | |
3606 | }; | |
3607 | static const struct file_operations amdgpu_debugfs_regs_pcie_fops = { | |
3608 | .owner = THIS_MODULE, | |
3609 | .read = amdgpu_debugfs_regs_pcie_read, | |
3610 | .write = amdgpu_debugfs_regs_pcie_write, | |
3611 | .llseek = default_llseek | |
3612 | }; | |
3613 | static const struct file_operations amdgpu_debugfs_regs_smc_fops = { | |
3614 | .owner = THIS_MODULE, | |
3615 | .read = amdgpu_debugfs_regs_smc_read, | |
3616 | .write = amdgpu_debugfs_regs_smc_write, | |
3617 | .llseek = default_llseek | |
3618 | }; | |
3619 | ||
1e051413 TSD |
3620 | static const struct file_operations amdgpu_debugfs_gca_config_fops = { |
3621 | .owner = THIS_MODULE, | |
3622 | .read = amdgpu_debugfs_gca_config_read, | |
3623 | .llseek = default_llseek | |
3624 | }; | |
3625 | ||
f2cdaf20 TSD |
3626 | static const struct file_operations amdgpu_debugfs_sensors_fops = { |
3627 | .owner = THIS_MODULE, | |
3628 | .read = amdgpu_debugfs_sensor_read, | |
3629 | .llseek = default_llseek | |
3630 | }; | |
3631 | ||
273d7aa1 TSD |
3632 | static const struct file_operations amdgpu_debugfs_wave_fops = { |
3633 | .owner = THIS_MODULE, | |
3634 | .read = amdgpu_debugfs_wave_read, | |
3635 | .llseek = default_llseek | |
3636 | }; | |
c5a60ce8 TSD |
3637 | static const struct file_operations amdgpu_debugfs_gpr_fops = { |
3638 | .owner = THIS_MODULE, | |
3639 | .read = amdgpu_debugfs_gpr_read, | |
3640 | .llseek = default_llseek | |
3641 | }; | |
273d7aa1 | 3642 | |
adcec288 TSD |
3643 | static const struct file_operations *debugfs_regs[] = { |
3644 | &amdgpu_debugfs_regs_fops, | |
3645 | &amdgpu_debugfs_regs_didt_fops, | |
3646 | &amdgpu_debugfs_regs_pcie_fops, | |
3647 | &amdgpu_debugfs_regs_smc_fops, | |
1e051413 | 3648 | &amdgpu_debugfs_gca_config_fops, |
f2cdaf20 | 3649 | &amdgpu_debugfs_sensors_fops, |
273d7aa1 | 3650 | &amdgpu_debugfs_wave_fops, |
c5a60ce8 | 3651 | &amdgpu_debugfs_gpr_fops, |
adcec288 TSD |
3652 | }; |
3653 | ||
3654 | static const char *debugfs_regs_names[] = { | |
3655 | "amdgpu_regs", | |
3656 | "amdgpu_regs_didt", | |
3657 | "amdgpu_regs_pcie", | |
3658 | "amdgpu_regs_smc", | |
1e051413 | 3659 | "amdgpu_gca_config", |
f2cdaf20 | 3660 | "amdgpu_sensors", |
273d7aa1 | 3661 | "amdgpu_wave", |
c5a60ce8 | 3662 | "amdgpu_gpr", |
adcec288 | 3663 | }; |
d38ceaf9 AD |
3664 | |
3665 | static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) | |
3666 | { | |
3667 | struct drm_minor *minor = adev->ddev->primary; | |
3668 | struct dentry *ent, *root = minor->debugfs_root; | |
adcec288 TSD |
3669 | unsigned i, j; |
3670 | ||
3671 | for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { | |
3672 | ent = debugfs_create_file(debugfs_regs_names[i], | |
3673 | S_IFREG | S_IRUGO, root, | |
3674 | adev, debugfs_regs[i]); | |
3675 | if (IS_ERR(ent)) { | |
3676 | for (j = 0; j < i; j++) { | |
3677 | debugfs_remove(adev->debugfs_regs[i]); | |
3678 | adev->debugfs_regs[i] = NULL; | |
3679 | } | |
3680 | return PTR_ERR(ent); | |
3681 | } | |
d38ceaf9 | 3682 | |
adcec288 TSD |
3683 | if (!i) |
3684 | i_size_write(ent->d_inode, adev->rmmio_size); | |
3685 | adev->debugfs_regs[i] = ent; | |
3686 | } | |
d38ceaf9 AD |
3687 | |
3688 | return 0; | |
3689 | } | |
3690 | ||
3691 | static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) | |
3692 | { | |
adcec288 TSD |
3693 | unsigned i; |
3694 | ||
3695 | for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { | |
3696 | if (adev->debugfs_regs[i]) { | |
3697 | debugfs_remove(adev->debugfs_regs[i]); | |
3698 | adev->debugfs_regs[i] = NULL; | |
3699 | } | |
3700 | } | |
d38ceaf9 AD |
3701 | } |
3702 | ||
4f0955fc HR |
3703 | static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) |
3704 | { | |
3705 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
3706 | struct drm_device *dev = node->minor->dev; | |
3707 | struct amdgpu_device *adev = dev->dev_private; | |
3708 | int r = 0, i; | |
3709 | ||
3710 | /* hold on the scheduler */ | |
3711 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | |
3712 | struct amdgpu_ring *ring = adev->rings[i]; | |
3713 | ||
3714 | if (!ring || !ring->sched.thread) | |
3715 | continue; | |
3716 | kthread_park(ring->sched.thread); | |
3717 | } | |
3718 | ||
3719 | seq_printf(m, "run ib test:\n"); | |
3720 | r = amdgpu_ib_ring_tests(adev); | |
3721 | if (r) | |
3722 | seq_printf(m, "ib ring tests failed (%d).\n", r); | |
3723 | else | |
3724 | seq_printf(m, "ib ring tests passed.\n"); | |
3725 | ||
3726 | /* go on the scheduler */ | |
3727 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | |
3728 | struct amdgpu_ring *ring = adev->rings[i]; | |
3729 | ||
3730 | if (!ring || !ring->sched.thread) | |
3731 | continue; | |
3732 | kthread_unpark(ring->sched.thread); | |
3733 | } | |
3734 | ||
3735 | return 0; | |
3736 | } | |
3737 | ||
db95e218 KR |
3738 | static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data) |
3739 | { | |
3740 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
3741 | struct drm_device *dev = node->minor->dev; | |
3742 | struct amdgpu_device *adev = dev->dev_private; | |
3743 | ||
3744 | seq_write(m, adev->bios, adev->bios_size); | |
3745 | return 0; | |
3746 | } | |
3747 | ||
79588d21 CK |
3748 | static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data) |
3749 | { | |
3750 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
3751 | struct drm_device *dev = node->minor->dev; | |
3752 | struct amdgpu_device *adev = dev->dev_private; | |
3753 | ||
3754 | seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev)); | |
3755 | return 0; | |
3756 | } | |
3757 | ||
763efb6c CK |
3758 | static const struct drm_info_list amdgpu_debugfs_list[] = { |
3759 | {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump}, | |
79588d21 CK |
3760 | {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}, |
3761 | {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram} | |
db95e218 KR |
3762 | }; |
3763 | ||
763efb6c | 3764 | static int amdgpu_debugfs_init(struct amdgpu_device *adev) |
db95e218 | 3765 | { |
763efb6c CK |
3766 | return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list, |
3767 | ARRAY_SIZE(amdgpu_debugfs_list)); | |
db95e218 | 3768 | } |
763efb6c | 3769 | |
7cebc728 | 3770 | #else |
763efb6c | 3771 | static int amdgpu_debugfs_init(struct amdgpu_device *adev) |
4f0955fc HR |
3772 | { |
3773 | return 0; | |
3774 | } | |
7cebc728 AK |
3775 | static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) |
3776 | { | |
3777 | return 0; | |
3778 | } | |
3779 | static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } | |
d38ceaf9 | 3780 | #endif |