amd/scheduler:imple job skip feature(v3)
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
4562236b 34#include <drm/drm_atomic_helper.h>
d38ceaf9
AD
35#include <drm/amdgpu_drm.h>
36#include <linux/vgaarb.h>
37#include <linux/vga_switcheroo.h>
38#include <linux/efi.h>
39#include "amdgpu.h"
f4b373f4 40#include "amdgpu_trace.h"
d38ceaf9
AD
41#include "amdgpu_i2c.h"
42#include "atom.h"
43#include "amdgpu_atombios.h"
a5bde2f9 44#include "amdgpu_atomfirmware.h"
d0dd7f0c 45#include "amd_pcie.h"
33f34802
KW
46#ifdef CONFIG_DRM_AMDGPU_SI
47#include "si.h"
48#endif
a2e73f56
AD
49#ifdef CONFIG_DRM_AMDGPU_CIK
50#include "cik.h"
51#endif
aaa36a97 52#include "vi.h"
460826e6 53#include "soc15.h"
d38ceaf9 54#include "bif/bif_4_1_d.h"
9accf2fd 55#include <linux/pci.h>
bec86378 56#include <linux/firmware.h>
89041940 57#include "amdgpu_vf_error.h"
d38ceaf9 58
ba997709 59#include "amdgpu_amdkfd.h"
d2f52ac8 60#include "amdgpu_pm.h"
d38ceaf9 61
e2a75f88 62MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
2d2e5e7e 63MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
e2a75f88 64
2dc80b00
S
65#define AMDGPU_RESUME_MS 2000
66
d38ceaf9
AD
67static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
68static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
4f0955fc 69static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
db95e218 70static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
d38ceaf9
AD
71
72static const char *amdgpu_asic_name[] = {
da69c161
KW
73 "TAHITI",
74 "PITCAIRN",
75 "VERDE",
76 "OLAND",
77 "HAINAN",
d38ceaf9
AD
78 "BONAIRE",
79 "KAVERI",
80 "KABINI",
81 "HAWAII",
82 "MULLINS",
83 "TOPAZ",
84 "TONGA",
48299f95 85 "FIJI",
d38ceaf9 86 "CARRIZO",
139f4917 87 "STONEY",
2cc0c0b5
FC
88 "POLARIS10",
89 "POLARIS11",
c4642a47 90 "POLARIS12",
d4196f01 91 "VEGA10",
2ca8a5d2 92 "RAVEN",
d38ceaf9
AD
93 "LAST",
94};
95
96bool amdgpu_device_is_px(struct drm_device *dev)
97{
98 struct amdgpu_device *adev = dev->dev_private;
99
2f7d10b3 100 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
101 return true;
102 return false;
103}
104
105/*
106 * MMIO register access helper functions.
107 */
108uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 109 uint32_t acc_flags)
d38ceaf9 110{
f4b373f4
TSD
111 uint32_t ret;
112
43ca8efa 113 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 114 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 115
15d72fd7 116 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 117 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
118 else {
119 unsigned long flags;
d38ceaf9
AD
120
121 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
122 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
123 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
124 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 125 }
f4b373f4
TSD
126 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
127 return ret;
d38ceaf9
AD
128}
129
130void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 131 uint32_t acc_flags)
d38ceaf9 132{
f4b373f4 133 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 134
47ed4e1c
KW
135 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
136 adev->last_mm_index = v;
137 }
138
43ca8efa 139 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 140 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 141
15d72fd7 142 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
143 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
144 else {
145 unsigned long flags;
146
147 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
148 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
149 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
150 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
151 }
47ed4e1c
KW
152
153 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
154 udelay(500);
155 }
d38ceaf9
AD
156}
157
158u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
159{
160 if ((reg * 4) < adev->rio_mem_size)
161 return ioread32(adev->rio_mem + (reg * 4));
162 else {
163 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
164 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
165 }
166}
167
168void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
169{
47ed4e1c
KW
170 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
171 adev->last_mm_index = v;
172 }
d38ceaf9
AD
173
174 if ((reg * 4) < adev->rio_mem_size)
175 iowrite32(v, adev->rio_mem + (reg * 4));
176 else {
177 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
178 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
179 }
47ed4e1c
KW
180
181 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
182 udelay(500);
183 }
d38ceaf9
AD
184}
185
186/**
187 * amdgpu_mm_rdoorbell - read a doorbell dword
188 *
189 * @adev: amdgpu_device pointer
190 * @index: doorbell index
191 *
192 * Returns the value in the doorbell aperture at the
193 * requested doorbell index (CIK).
194 */
195u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
196{
197 if (index < adev->doorbell.num_doorbells) {
198 return readl(adev->doorbell.ptr + index);
199 } else {
200 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
201 return 0;
202 }
203}
204
205/**
206 * amdgpu_mm_wdoorbell - write a doorbell dword
207 *
208 * @adev: amdgpu_device pointer
209 * @index: doorbell index
210 * @v: value to write
211 *
212 * Writes @v to the doorbell aperture at the
213 * requested doorbell index (CIK).
214 */
215void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
216{
217 if (index < adev->doorbell.num_doorbells) {
218 writel(v, adev->doorbell.ptr + index);
219 } else {
220 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
221 }
222}
223
832be404
KW
224/**
225 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
226 *
227 * @adev: amdgpu_device pointer
228 * @index: doorbell index
229 *
230 * Returns the value in the doorbell aperture at the
231 * requested doorbell index (VEGA10+).
232 */
233u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
234{
235 if (index < adev->doorbell.num_doorbells) {
236 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
237 } else {
238 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
239 return 0;
240 }
241}
242
243/**
244 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
245 *
246 * @adev: amdgpu_device pointer
247 * @index: doorbell index
248 * @v: value to write
249 *
250 * Writes @v to the doorbell aperture at the
251 * requested doorbell index (VEGA10+).
252 */
253void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
254{
255 if (index < adev->doorbell.num_doorbells) {
256 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
257 } else {
258 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
259 }
260}
261
d38ceaf9
AD
262/**
263 * amdgpu_invalid_rreg - dummy reg read function
264 *
265 * @adev: amdgpu device pointer
266 * @reg: offset of register
267 *
268 * Dummy register read function. Used for register blocks
269 * that certain asics don't have (all asics).
270 * Returns the value in the register.
271 */
272static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
273{
274 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
275 BUG();
276 return 0;
277}
278
279/**
280 * amdgpu_invalid_wreg - dummy reg write function
281 *
282 * @adev: amdgpu device pointer
283 * @reg: offset of register
284 * @v: value to write to the register
285 *
286 * Dummy register read function. Used for register blocks
287 * that certain asics don't have (all asics).
288 */
289static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
290{
291 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
292 reg, v);
293 BUG();
294}
295
296/**
297 * amdgpu_block_invalid_rreg - dummy reg read function
298 *
299 * @adev: amdgpu device pointer
300 * @block: offset of instance
301 * @reg: offset of register
302 *
303 * Dummy register read function. Used for register blocks
304 * that certain asics don't have (all asics).
305 * Returns the value in the register.
306 */
307static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
308 uint32_t block, uint32_t reg)
309{
310 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
311 reg, block);
312 BUG();
313 return 0;
314}
315
316/**
317 * amdgpu_block_invalid_wreg - dummy reg write function
318 *
319 * @adev: amdgpu device pointer
320 * @block: offset of instance
321 * @reg: offset of register
322 * @v: value to write to the register
323 *
324 * Dummy register read function. Used for register blocks
325 * that certain asics don't have (all asics).
326 */
327static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
328 uint32_t block,
329 uint32_t reg, uint32_t v)
330{
331 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
332 reg, block, v);
333 BUG();
334}
335
336static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
337{
a4a02777
CK
338 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
339 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
340 &adev->vram_scratch.robj,
341 &adev->vram_scratch.gpu_addr,
342 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
343}
344
345static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
346{
078af1a3 347 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
348}
349
350/**
351 * amdgpu_program_register_sequence - program an array of registers.
352 *
353 * @adev: amdgpu_device pointer
354 * @registers: pointer to the register array
355 * @array_size: size of the register array
356 *
357 * Programs an array or registers with and and or masks.
358 * This is a helper for setting golden registers.
359 */
360void amdgpu_program_register_sequence(struct amdgpu_device *adev,
361 const u32 *registers,
362 const u32 array_size)
363{
364 u32 tmp, reg, and_mask, or_mask;
365 int i;
366
367 if (array_size % 3)
368 return;
369
370 for (i = 0; i < array_size; i +=3) {
371 reg = registers[i + 0];
372 and_mask = registers[i + 1];
373 or_mask = registers[i + 2];
374
375 if (and_mask == 0xffffffff) {
376 tmp = or_mask;
377 } else {
378 tmp = RREG32(reg);
379 tmp &= ~and_mask;
380 tmp |= or_mask;
381 }
382 WREG32(reg, tmp);
383 }
384}
385
386void amdgpu_pci_config_reset(struct amdgpu_device *adev)
387{
388 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
389}
390
391/*
392 * GPU doorbell aperture helpers function.
393 */
394/**
395 * amdgpu_doorbell_init - Init doorbell driver information.
396 *
397 * @adev: amdgpu_device pointer
398 *
399 * Init doorbell driver information (CIK)
400 * Returns 0 on success, error on failure.
401 */
402static int amdgpu_doorbell_init(struct amdgpu_device *adev)
403{
705e519e
CK
404 /* No doorbell on SI hardware generation */
405 if (adev->asic_type < CHIP_BONAIRE) {
406 adev->doorbell.base = 0;
407 adev->doorbell.size = 0;
408 adev->doorbell.num_doorbells = 0;
409 adev->doorbell.ptr = NULL;
410 return 0;
411 }
412
d38ceaf9
AD
413 /* doorbell bar mapping */
414 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
415 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
416
edf600da 417 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
418 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
419 if (adev->doorbell.num_doorbells == 0)
420 return -EINVAL;
421
8972e5d2
CK
422 adev->doorbell.ptr = ioremap(adev->doorbell.base,
423 adev->doorbell.num_doorbells *
424 sizeof(u32));
425 if (adev->doorbell.ptr == NULL)
d38ceaf9 426 return -ENOMEM;
d38ceaf9
AD
427
428 return 0;
429}
430
431/**
432 * amdgpu_doorbell_fini - Tear down doorbell driver information.
433 *
434 * @adev: amdgpu_device pointer
435 *
436 * Tear down doorbell driver information (CIK)
437 */
438static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
439{
440 iounmap(adev->doorbell.ptr);
441 adev->doorbell.ptr = NULL;
442}
443
444/**
445 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
446 * setup amdkfd
447 *
448 * @adev: amdgpu_device pointer
449 * @aperture_base: output returning doorbell aperture base physical address
450 * @aperture_size: output returning doorbell aperture size in bytes
451 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
452 *
453 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
454 * takes doorbells required for its own rings and reports the setup to amdkfd.
455 * amdgpu reserved doorbells are at the start of the doorbell aperture.
456 */
457void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
458 phys_addr_t *aperture_base,
459 size_t *aperture_size,
460 size_t *start_offset)
461{
462 /*
463 * The first num_doorbells are used by amdgpu.
464 * amdkfd takes whatever's left in the aperture.
465 */
466 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
467 *aperture_base = adev->doorbell.base;
468 *aperture_size = adev->doorbell.size;
469 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
470 } else {
471 *aperture_base = 0;
472 *aperture_size = 0;
473 *start_offset = 0;
474 }
475}
476
477/*
478 * amdgpu_wb_*()
455a7bc2 479 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 480 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
481 */
482
483/**
484 * amdgpu_wb_fini - Disable Writeback and free memory
485 *
486 * @adev: amdgpu_device pointer
487 *
488 * Disables Writeback and frees the Writeback memory (all asics).
489 * Used at driver shutdown.
490 */
491static void amdgpu_wb_fini(struct amdgpu_device *adev)
492{
493 if (adev->wb.wb_obj) {
a76ed485
AD
494 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
495 &adev->wb.gpu_addr,
496 (void **)&adev->wb.wb);
d38ceaf9
AD
497 adev->wb.wb_obj = NULL;
498 }
499}
500
501/**
502 * amdgpu_wb_init- Init Writeback driver info and allocate memory
503 *
504 * @adev: amdgpu_device pointer
505 *
455a7bc2 506 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
507 * Used at driver startup.
508 * Returns 0 on success or an -error on failure.
509 */
510static int amdgpu_wb_init(struct amdgpu_device *adev)
511{
512 int r;
513
514 if (adev->wb.wb_obj == NULL) {
97407b63
AD
515 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
516 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
517 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
518 &adev->wb.wb_obj, &adev->wb.gpu_addr,
519 (void **)&adev->wb.wb);
d38ceaf9
AD
520 if (r) {
521 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
522 return r;
523 }
d38ceaf9
AD
524
525 adev->wb.num_wb = AMDGPU_MAX_WB;
526 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
527
528 /* clear wb memory */
60a970a6 529 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
d38ceaf9
AD
530 }
531
532 return 0;
533}
534
535/**
536 * amdgpu_wb_get - Allocate a wb entry
537 *
538 * @adev: amdgpu_device pointer
539 * @wb: wb index
540 *
541 * Allocate a wb slot for use by the driver (all asics).
542 * Returns 0 on success or -EINVAL on failure.
543 */
544int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
545{
546 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 547
97407b63 548 if (offset < adev->wb.num_wb) {
7014285a 549 __set_bit(offset, adev->wb.used);
63ae07ca 550 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
551 return 0;
552 } else {
553 return -EINVAL;
554 }
555}
556
d38ceaf9
AD
557/**
558 * amdgpu_wb_free - Free a wb entry
559 *
560 * @adev: amdgpu_device pointer
561 * @wb: wb index
562 *
563 * Free a wb slot allocated for use by the driver (all asics)
564 */
565void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
566{
567 if (wb < adev->wb.num_wb)
63ae07ca 568 __clear_bit(wb >> 3, adev->wb.used);
d38ceaf9
AD
569}
570
571/**
572 * amdgpu_vram_location - try to find VRAM location
573 * @adev: amdgpu device structure holding all necessary informations
574 * @mc: memory controller structure holding memory informations
575 * @base: base address at which to put VRAM
576 *
455a7bc2 577 * Function will try to place VRAM at base address provided
d38ceaf9
AD
578 * as parameter (which is so far either PCI aperture address or
579 * for IGP TOM base address).
580 *
581 * If there is not enough space to fit the unvisible VRAM in the 32bits
582 * address space then we limit the VRAM size to the aperture.
583 *
584 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
585 * this shouldn't be a problem as we are using the PCI aperture as a reference.
586 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
587 * not IGP.
588 *
589 * Note: we use mc_vram_size as on some board we need to program the mc to
590 * cover the whole aperture even if VRAM size is inferior to aperture size
591 * Novell bug 204882 + along with lots of ubuntu ones
592 *
593 * Note: when limiting vram it's safe to overwritte real_vram_size because
594 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
595 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
596 * ones)
597 *
598 * Note: IGP TOM addr should be the same as the aperture addr, we don't
455a7bc2 599 * explicitly check for that though.
d38ceaf9
AD
600 *
601 * FIXME: when reducing VRAM size align new size on power of 2.
602 */
603void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
604{
605 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
606
607 mc->vram_start = base;
608 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
609 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
610 mc->real_vram_size = mc->aper_size;
611 mc->mc_vram_size = mc->aper_size;
612 }
613 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
614 if (limit && limit < mc->real_vram_size)
615 mc->real_vram_size = limit;
616 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
617 mc->mc_vram_size >> 20, mc->vram_start,
618 mc->vram_end, mc->real_vram_size >> 20);
619}
620
621/**
6f02a696 622 * amdgpu_gart_location - try to find GTT location
d38ceaf9
AD
623 * @adev: amdgpu device structure holding all necessary informations
624 * @mc: memory controller structure holding memory informations
625 *
626 * Function will place try to place GTT before or after VRAM.
627 *
628 * If GTT size is bigger than space left then we ajust GTT size.
629 * Thus function will never fails.
630 *
631 * FIXME: when reducing GTT size align new size on power of 2.
632 */
6f02a696 633void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
d38ceaf9
AD
634{
635 u64 size_af, size_bf;
636
ed21c047
CK
637 size_af = adev->mc.mc_mask - mc->vram_end;
638 size_bf = mc->vram_start;
d38ceaf9 639 if (size_bf > size_af) {
6f02a696 640 if (mc->gart_size > size_bf) {
d38ceaf9 641 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 642 mc->gart_size = size_bf;
d38ceaf9 643 }
6f02a696 644 mc->gart_start = 0;
d38ceaf9 645 } else {
6f02a696 646 if (mc->gart_size > size_af) {
d38ceaf9 647 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 648 mc->gart_size = size_af;
d38ceaf9 649 }
6f02a696 650 mc->gart_start = mc->vram_end + 1;
d38ceaf9 651 }
6f02a696 652 mc->gart_end = mc->gart_start + mc->gart_size - 1;
d38ceaf9 653 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6f02a696 654 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
d38ceaf9
AD
655}
656
a05502e5
HC
657/*
658 * Firmware Reservation functions
659 */
660/**
661 * amdgpu_fw_reserve_vram_fini - free fw reserved vram
662 *
663 * @adev: amdgpu_device pointer
664 *
665 * free fw reserved vram if it has been reserved.
666 */
667void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
668{
669 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
670 NULL, &adev->fw_vram_usage.va);
671}
672
673/**
674 * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
675 *
676 * @adev: amdgpu_device pointer
677 *
678 * create bo vram reservation from fw.
679 */
680int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
681{
682 int r = 0;
683 u64 gpu_addr;
684 u64 vram_size = adev->mc.visible_vram_size;
685
686 adev->fw_vram_usage.va = NULL;
687 adev->fw_vram_usage.reserved_bo = NULL;
688
689 if (adev->fw_vram_usage.size > 0 &&
690 adev->fw_vram_usage.size <= vram_size) {
691
692 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
693 PAGE_SIZE, true, 0,
694 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
695 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
696 &adev->fw_vram_usage.reserved_bo);
697 if (r)
698 goto error_create;
699
700 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
701 if (r)
702 goto error_reserve;
703 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
704 AMDGPU_GEM_DOMAIN_VRAM,
705 adev->fw_vram_usage.start_offset,
706 (adev->fw_vram_usage.start_offset +
707 adev->fw_vram_usage.size), &gpu_addr);
708 if (r)
709 goto error_pin;
710 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
711 &adev->fw_vram_usage.va);
712 if (r)
713 goto error_kmap;
714
715 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
716 }
717 return r;
718
719error_kmap:
720 amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
721error_pin:
722 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
723error_reserve:
724 amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
725error_create:
726 adev->fw_vram_usage.va = NULL;
727 adev->fw_vram_usage.reserved_bo = NULL;
728 return r;
729}
730
731
d38ceaf9
AD
732/*
733 * GPU helpers function.
734 */
735/**
c836fec5 736 * amdgpu_need_post - check if the hw need post or not
d38ceaf9
AD
737 *
738 * @adev: amdgpu_device pointer
739 *
c836fec5
JQ
740 * Check if the asic has been initialized (all asics) at driver startup
741 * or post is needed if hw reset is performed.
742 * Returns true if need or false if not.
d38ceaf9 743 */
c836fec5 744bool amdgpu_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
745{
746 uint32_t reg;
747
bec86378
ML
748 if (amdgpu_sriov_vf(adev))
749 return false;
750
751 if (amdgpu_passthrough(adev)) {
1da2c326
ML
752 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
753 * some old smc fw still need driver do vPost otherwise gpu hang, while
754 * those smc fw version above 22.15 doesn't have this flaw, so we force
755 * vpost executed for smc version below 22.15
bec86378
ML
756 */
757 if (adev->asic_type == CHIP_FIJI) {
758 int err;
759 uint32_t fw_ver;
760 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
761 /* force vPost if error occured */
762 if (err)
763 return true;
764
765 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
766 if (fw_ver < 0x00160e00)
767 return true;
bec86378 768 }
bec86378 769 }
91fe77eb 770
771 if (adev->has_hw_reset) {
772 adev->has_hw_reset = false;
773 return true;
774 }
775
776 /* bios scratch used on CIK+ */
777 if (adev->asic_type >= CHIP_BONAIRE)
778 return amdgpu_atombios_scratch_need_asic_init(adev);
779
780 /* check MEM_SIZE for older asics */
781 reg = amdgpu_asic_get_config_memsize(adev);
782
783 if ((reg != 0) && (reg != 0xffffffff))
784 return false;
785
786 return true;
bec86378
ML
787}
788
d38ceaf9
AD
789/**
790 * amdgpu_dummy_page_init - init dummy page used by the driver
791 *
792 * @adev: amdgpu_device pointer
793 *
794 * Allocate the dummy page used by the driver (all asics).
795 * This dummy page is used by the driver as a filler for gart entries
796 * when pages are taken out of the GART
797 * Returns 0 on sucess, -ENOMEM on failure.
798 */
799int amdgpu_dummy_page_init(struct amdgpu_device *adev)
800{
801 if (adev->dummy_page.page)
802 return 0;
803 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
804 if (adev->dummy_page.page == NULL)
805 return -ENOMEM;
806 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
807 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
808 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
809 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
810 __free_page(adev->dummy_page.page);
811 adev->dummy_page.page = NULL;
812 return -ENOMEM;
813 }
814 return 0;
815}
816
817/**
818 * amdgpu_dummy_page_fini - free dummy page used by the driver
819 *
820 * @adev: amdgpu_device pointer
821 *
822 * Frees the dummy page used by the driver (all asics).
823 */
824void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
825{
826 if (adev->dummy_page.page == NULL)
827 return;
828 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
829 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
830 __free_page(adev->dummy_page.page);
831 adev->dummy_page.page = NULL;
832}
833
834
835/* ATOM accessor methods */
836/*
837 * ATOM is an interpreted byte code stored in tables in the vbios. The
838 * driver registers callbacks to access registers and the interpreter
839 * in the driver parses the tables and executes then to program specific
840 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
841 * atombios.h, and atom.c
842 */
843
844/**
845 * cail_pll_read - read PLL register
846 *
847 * @info: atom card_info pointer
848 * @reg: PLL register offset
849 *
850 * Provides a PLL register accessor for the atom interpreter (r4xx+).
851 * Returns the value of the PLL register.
852 */
853static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
854{
855 return 0;
856}
857
858/**
859 * cail_pll_write - write PLL register
860 *
861 * @info: atom card_info pointer
862 * @reg: PLL register offset
863 * @val: value to write to the pll register
864 *
865 * Provides a PLL register accessor for the atom interpreter (r4xx+).
866 */
867static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
868{
869
870}
871
872/**
873 * cail_mc_read - read MC (Memory Controller) register
874 *
875 * @info: atom card_info pointer
876 * @reg: MC register offset
877 *
878 * Provides an MC register accessor for the atom interpreter (r4xx+).
879 * Returns the value of the MC register.
880 */
881static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
882{
883 return 0;
884}
885
886/**
887 * cail_mc_write - write MC (Memory Controller) register
888 *
889 * @info: atom card_info pointer
890 * @reg: MC register offset
891 * @val: value to write to the pll register
892 *
893 * Provides a MC register accessor for the atom interpreter (r4xx+).
894 */
895static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
896{
897
898}
899
900/**
901 * cail_reg_write - write MMIO register
902 *
903 * @info: atom card_info pointer
904 * @reg: MMIO register offset
905 * @val: value to write to the pll register
906 *
907 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
908 */
909static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
910{
911 struct amdgpu_device *adev = info->dev->dev_private;
912
913 WREG32(reg, val);
914}
915
916/**
917 * cail_reg_read - read MMIO register
918 *
919 * @info: atom card_info pointer
920 * @reg: MMIO register offset
921 *
922 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
923 * Returns the value of the MMIO register.
924 */
925static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
926{
927 struct amdgpu_device *adev = info->dev->dev_private;
928 uint32_t r;
929
930 r = RREG32(reg);
931 return r;
932}
933
934/**
935 * cail_ioreg_write - write IO register
936 *
937 * @info: atom card_info pointer
938 * @reg: IO register offset
939 * @val: value to write to the pll register
940 *
941 * Provides a IO register accessor for the atom interpreter (r4xx+).
942 */
943static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
944{
945 struct amdgpu_device *adev = info->dev->dev_private;
946
947 WREG32_IO(reg, val);
948}
949
950/**
951 * cail_ioreg_read - read IO register
952 *
953 * @info: atom card_info pointer
954 * @reg: IO register offset
955 *
956 * Provides an IO register accessor for the atom interpreter (r4xx+).
957 * Returns the value of the IO register.
958 */
959static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
960{
961 struct amdgpu_device *adev = info->dev->dev_private;
962 uint32_t r;
963
964 r = RREG32_IO(reg);
965 return r;
966}
967
5b41d94c
KR
968static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
969 struct device_attribute *attr,
970 char *buf)
971{
972 struct drm_device *ddev = dev_get_drvdata(dev);
973 struct amdgpu_device *adev = ddev->dev_private;
974 struct atom_context *ctx = adev->mode_info.atom_context;
975
976 return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
977}
978
979static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
980 NULL);
981
d38ceaf9
AD
982/**
983 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
984 *
985 * @adev: amdgpu_device pointer
986 *
987 * Frees the driver info and register access callbacks for the ATOM
988 * interpreter (r4xx+).
989 * Called at driver shutdown.
990 */
991static void amdgpu_atombios_fini(struct amdgpu_device *adev)
992{
89e0ec9f 993 if (adev->mode_info.atom_context) {
d38ceaf9 994 kfree(adev->mode_info.atom_context->scratch);
89e0ec9f
ML
995 kfree(adev->mode_info.atom_context->iio);
996 }
d38ceaf9
AD
997 kfree(adev->mode_info.atom_context);
998 adev->mode_info.atom_context = NULL;
999 kfree(adev->mode_info.atom_card_info);
1000 adev->mode_info.atom_card_info = NULL;
5b41d94c 1001 device_remove_file(adev->dev, &dev_attr_vbios_version);
d38ceaf9
AD
1002}
1003
1004/**
1005 * amdgpu_atombios_init - init the driver info and callbacks for atombios
1006 *
1007 * @adev: amdgpu_device pointer
1008 *
1009 * Initializes the driver info and register access callbacks for the
1010 * ATOM interpreter (r4xx+).
1011 * Returns 0 on sucess, -ENOMEM on failure.
1012 * Called at driver startup.
1013 */
1014static int amdgpu_atombios_init(struct amdgpu_device *adev)
1015{
1016 struct card_info *atom_card_info =
1017 kzalloc(sizeof(struct card_info), GFP_KERNEL);
5b41d94c 1018 int ret;
d38ceaf9
AD
1019
1020 if (!atom_card_info)
1021 return -ENOMEM;
1022
1023 adev->mode_info.atom_card_info = atom_card_info;
1024 atom_card_info->dev = adev->ddev;
1025 atom_card_info->reg_read = cail_reg_read;
1026 atom_card_info->reg_write = cail_reg_write;
1027 /* needed for iio ops */
1028 if (adev->rio_mem) {
1029 atom_card_info->ioreg_read = cail_ioreg_read;
1030 atom_card_info->ioreg_write = cail_ioreg_write;
1031 } else {
9953b72f 1032 DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
d38ceaf9
AD
1033 atom_card_info->ioreg_read = cail_reg_read;
1034 atom_card_info->ioreg_write = cail_reg_write;
1035 }
1036 atom_card_info->mc_read = cail_mc_read;
1037 atom_card_info->mc_write = cail_mc_write;
1038 atom_card_info->pll_read = cail_pll_read;
1039 atom_card_info->pll_write = cail_pll_write;
1040
1041 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
1042 if (!adev->mode_info.atom_context) {
1043 amdgpu_atombios_fini(adev);
1044 return -ENOMEM;
1045 }
1046
1047 mutex_init(&adev->mode_info.atom_context->mutex);
a5bde2f9
AD
1048 if (adev->is_atom_fw) {
1049 amdgpu_atomfirmware_scratch_regs_init(adev);
1050 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1051 } else {
1052 amdgpu_atombios_scratch_regs_init(adev);
1053 amdgpu_atombios_allocate_fb_scratch(adev);
1054 }
5b41d94c
KR
1055
1056 ret = device_create_file(adev->dev, &dev_attr_vbios_version);
1057 if (ret) {
1058 DRM_ERROR("Failed to create device file for VBIOS version\n");
1059 return ret;
1060 }
1061
d38ceaf9
AD
1062 return 0;
1063}
1064
1065/* if we get transitioned to only one device, take VGA back */
1066/**
1067 * amdgpu_vga_set_decode - enable/disable vga decode
1068 *
1069 * @cookie: amdgpu_device pointer
1070 * @state: enable/disable vga decode
1071 *
1072 * Enable/disable vga decode (all asics).
1073 * Returns VGA resource flags.
1074 */
1075static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1076{
1077 struct amdgpu_device *adev = cookie;
1078 amdgpu_asic_set_vga_state(adev, state);
1079 if (state)
1080 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1081 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1082 else
1083 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1084}
1085
bab4fee7 1086static void amdgpu_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
1087{
1088 /* defines number of bits in page table versus page directory,
1089 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1090 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
1091 if (amdgpu_vm_block_size == -1)
1092 return;
a1adf8be 1093
bab4fee7 1094 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
1095 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1096 amdgpu_vm_block_size);
bab4fee7 1097 goto def_value;
a1adf8be
CZ
1098 }
1099
1100 if (amdgpu_vm_block_size > 24 ||
1101 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1102 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1103 amdgpu_vm_block_size);
bab4fee7 1104 goto def_value;
a1adf8be 1105 }
bab4fee7
JZ
1106
1107 return;
1108
1109def_value:
1110 amdgpu_vm_block_size = -1;
a1adf8be
CZ
1111}
1112
83ca145d
ZJ
1113static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1114{
64dab074
AD
1115 /* no need to check the default value */
1116 if (amdgpu_vm_size == -1)
1117 return;
1118
76117507 1119 if (!is_power_of_2(amdgpu_vm_size)) {
83ca145d
ZJ
1120 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1121 amdgpu_vm_size);
1122 goto def_value;
1123 }
1124
1125 if (amdgpu_vm_size < 1) {
1126 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1127 amdgpu_vm_size);
1128 goto def_value;
1129 }
1130
1131 /*
1132 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1133 */
1134 if (amdgpu_vm_size > 1024) {
1135 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1136 amdgpu_vm_size);
1137 goto def_value;
1138 }
1139
1140 return;
1141
1142def_value:
bab4fee7 1143 amdgpu_vm_size = -1;
83ca145d
ZJ
1144}
1145
d38ceaf9
AD
1146/**
1147 * amdgpu_check_arguments - validate module params
1148 *
1149 * @adev: amdgpu_device pointer
1150 *
1151 * Validates certain module parameters and updates
1152 * the associated values used by the driver (all asics).
1153 */
1154static void amdgpu_check_arguments(struct amdgpu_device *adev)
1155{
5b011235
CZ
1156 if (amdgpu_sched_jobs < 4) {
1157 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1158 amdgpu_sched_jobs);
1159 amdgpu_sched_jobs = 4;
76117507 1160 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
1161 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1162 amdgpu_sched_jobs);
1163 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1164 }
d38ceaf9 1165
83e74db6 1166 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
1167 /* gart size must be greater or equal to 32M */
1168 dev_warn(adev->dev, "gart size (%d) too small\n",
1169 amdgpu_gart_size);
83e74db6 1170 amdgpu_gart_size = -1;
d38ceaf9
AD
1171 }
1172
36d38372 1173 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 1174 /* gtt size must be greater or equal to 32M */
36d38372
CK
1175 dev_warn(adev->dev, "gtt size (%d) too small\n",
1176 amdgpu_gtt_size);
1177 amdgpu_gtt_size = -1;
d38ceaf9
AD
1178 }
1179
d07f14be
RH
1180 /* valid range is between 4 and 9 inclusive */
1181 if (amdgpu_vm_fragment_size != -1 &&
1182 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1183 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1184 amdgpu_vm_fragment_size = -1;
1185 }
1186
83ca145d 1187 amdgpu_check_vm_size(adev);
d38ceaf9 1188
bab4fee7 1189 amdgpu_check_block_size(adev);
6a7f76e7 1190
526bae37 1191 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 1192 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
1193 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1194 amdgpu_vram_page_split);
1195 amdgpu_vram_page_split = 1024;
1196 }
d38ceaf9
AD
1197}
1198
1199/**
1200 * amdgpu_switcheroo_set_state - set switcheroo state
1201 *
1202 * @pdev: pci dev pointer
1694467b 1203 * @state: vga_switcheroo state
d38ceaf9
AD
1204 *
1205 * Callback for the switcheroo driver. Suspends or resumes the
1206 * the asics before or after it is powered up using ACPI methods.
1207 */
1208static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1209{
1210 struct drm_device *dev = pci_get_drvdata(pdev);
1211
1212 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1213 return;
1214
1215 if (state == VGA_SWITCHEROO_ON) {
7ca85295 1216 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1217 /* don't suspend or resume card normally */
1218 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1219
810ddc3a 1220 amdgpu_device_resume(dev, true, true);
d38ceaf9 1221
d38ceaf9
AD
1222 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1223 drm_kms_helper_poll_enable(dev);
1224 } else {
7ca85295 1225 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1226 drm_kms_helper_poll_disable(dev);
1227 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1228 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1229 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1230 }
1231}
1232
1233/**
1234 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1235 *
1236 * @pdev: pci dev pointer
1237 *
1238 * Callback for the switcheroo driver. Check of the switcheroo
1239 * state can be changed.
1240 * Returns true if the state can be changed, false if not.
1241 */
1242static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1243{
1244 struct drm_device *dev = pci_get_drvdata(pdev);
1245
1246 /*
1247 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1248 * locking inversion with the driver load path. And the access here is
1249 * completely racy anyway. So don't bother with locking for now.
1250 */
1251 return dev->open_count == 0;
1252}
1253
1254static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1255 .set_gpu_state = amdgpu_switcheroo_set_state,
1256 .reprobe = NULL,
1257 .can_switch = amdgpu_switcheroo_can_switch,
1258};
1259
1260int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
5fc3aeeb 1261 enum amd_ip_block_type block_type,
1262 enum amd_clockgating_state state)
d38ceaf9
AD
1263{
1264 int i, r = 0;
1265
1266 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1267 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1268 continue;
c722865a
RZ
1269 if (adev->ip_blocks[i].version->type != block_type)
1270 continue;
1271 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1272 continue;
1273 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1274 (void *)adev, state);
1275 if (r)
1276 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1277 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1278 }
1279 return r;
1280}
1281
1282int amdgpu_set_powergating_state(struct amdgpu_device *adev,
5fc3aeeb 1283 enum amd_ip_block_type block_type,
1284 enum amd_powergating_state state)
d38ceaf9
AD
1285{
1286 int i, r = 0;
1287
1288 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1289 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1290 continue;
c722865a
RZ
1291 if (adev->ip_blocks[i].version->type != block_type)
1292 continue;
1293 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1294 continue;
1295 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1296 (void *)adev, state);
1297 if (r)
1298 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1299 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1300 }
1301 return r;
1302}
1303
6cb2d4e4
HR
1304void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1305{
1306 int i;
1307
1308 for (i = 0; i < adev->num_ip_blocks; i++) {
1309 if (!adev->ip_blocks[i].status.valid)
1310 continue;
1311 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1312 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1313 }
1314}
1315
5dbbb60b
AD
1316int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1317 enum amd_ip_block_type block_type)
1318{
1319 int i, r;
1320
1321 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1322 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1323 continue;
a1255107
AD
1324 if (adev->ip_blocks[i].version->type == block_type) {
1325 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1326 if (r)
1327 return r;
1328 break;
1329 }
1330 }
1331 return 0;
1332
1333}
1334
1335bool amdgpu_is_idle(struct amdgpu_device *adev,
1336 enum amd_ip_block_type block_type)
1337{
1338 int i;
1339
1340 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1341 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1342 continue;
a1255107
AD
1343 if (adev->ip_blocks[i].version->type == block_type)
1344 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1345 }
1346 return true;
1347
1348}
1349
a1255107
AD
1350struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1351 enum amd_ip_block_type type)
d38ceaf9
AD
1352{
1353 int i;
1354
1355 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1356 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1357 return &adev->ip_blocks[i];
1358
1359 return NULL;
1360}
1361
1362/**
1363 * amdgpu_ip_block_version_cmp
1364 *
1365 * @adev: amdgpu_device pointer
5fc3aeeb 1366 * @type: enum amd_ip_block_type
d38ceaf9
AD
1367 * @major: major version
1368 * @minor: minor version
1369 *
1370 * return 0 if equal or greater
1371 * return 1 if smaller or the ip_block doesn't exist
1372 */
1373int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
5fc3aeeb 1374 enum amd_ip_block_type type,
d38ceaf9
AD
1375 u32 major, u32 minor)
1376{
a1255107 1377 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
d38ceaf9 1378
a1255107
AD
1379 if (ip_block && ((ip_block->version->major > major) ||
1380 ((ip_block->version->major == major) &&
1381 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1382 return 0;
1383
1384 return 1;
1385}
1386
a1255107
AD
1387/**
1388 * amdgpu_ip_block_add
1389 *
1390 * @adev: amdgpu_device pointer
1391 * @ip_block_version: pointer to the IP to add
1392 *
1393 * Adds the IP block driver information to the collection of IPs
1394 * on the asic.
1395 */
1396int amdgpu_ip_block_add(struct amdgpu_device *adev,
1397 const struct amdgpu_ip_block_version *ip_block_version)
1398{
1399 if (!ip_block_version)
1400 return -EINVAL;
1401
a0bae357
HR
1402 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1403 ip_block_version->funcs->name);
1404
a1255107
AD
1405 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1406
1407 return 0;
1408}
1409
483ef985 1410static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1411{
1412 adev->enable_virtual_display = false;
1413
1414 if (amdgpu_virtual_display) {
1415 struct drm_device *ddev = adev->ddev;
1416 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1417 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1418
1419 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1420 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1421 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1422 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1423 if (!strcmp("all", pciaddname)
1424 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1425 long num_crtc;
1426 int res = -1;
1427
9accf2fd 1428 adev->enable_virtual_display = true;
0f66356d
ED
1429
1430 if (pciaddname_tmp)
1431 res = kstrtol(pciaddname_tmp, 10,
1432 &num_crtc);
1433
1434 if (!res) {
1435 if (num_crtc < 1)
1436 num_crtc = 1;
1437 if (num_crtc > 6)
1438 num_crtc = 6;
1439 adev->mode_info.num_crtc = num_crtc;
1440 } else {
1441 adev->mode_info.num_crtc = 1;
1442 }
9accf2fd
ED
1443 break;
1444 }
1445 }
1446
0f66356d
ED
1447 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1448 amdgpu_virtual_display, pci_address_name,
1449 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1450
1451 kfree(pciaddstr);
1452 }
1453}
1454
e2a75f88
AD
1455static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1456{
e2a75f88
AD
1457 const char *chip_name;
1458 char fw_name[30];
1459 int err;
1460 const struct gpu_info_firmware_header_v1_0 *hdr;
1461
ab4fe3e1
HR
1462 adev->firmware.gpu_info_fw = NULL;
1463
e2a75f88
AD
1464 switch (adev->asic_type) {
1465 case CHIP_TOPAZ:
1466 case CHIP_TONGA:
1467 case CHIP_FIJI:
1468 case CHIP_POLARIS11:
1469 case CHIP_POLARIS10:
1470 case CHIP_POLARIS12:
1471 case CHIP_CARRIZO:
1472 case CHIP_STONEY:
1473#ifdef CONFIG_DRM_AMDGPU_SI
1474 case CHIP_VERDE:
1475 case CHIP_TAHITI:
1476 case CHIP_PITCAIRN:
1477 case CHIP_OLAND:
1478 case CHIP_HAINAN:
1479#endif
1480#ifdef CONFIG_DRM_AMDGPU_CIK
1481 case CHIP_BONAIRE:
1482 case CHIP_HAWAII:
1483 case CHIP_KAVERI:
1484 case CHIP_KABINI:
1485 case CHIP_MULLINS:
1486#endif
1487 default:
1488 return 0;
1489 case CHIP_VEGA10:
1490 chip_name = "vega10";
1491 break;
2d2e5e7e
AD
1492 case CHIP_RAVEN:
1493 chip_name = "raven";
1494 break;
e2a75f88
AD
1495 }
1496
1497 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1498 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1499 if (err) {
1500 dev_err(adev->dev,
1501 "Failed to load gpu_info firmware \"%s\"\n",
1502 fw_name);
1503 goto out;
1504 }
ab4fe3e1 1505 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1506 if (err) {
1507 dev_err(adev->dev,
1508 "Failed to validate gpu_info firmware \"%s\"\n",
1509 fw_name);
1510 goto out;
1511 }
1512
ab4fe3e1 1513 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1514 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1515
1516 switch (hdr->version_major) {
1517 case 1:
1518 {
1519 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1520 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1521 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1522
b5ab16bf
AD
1523 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1524 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1525 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1526 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1527 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1528 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1529 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1530 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1531 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1532 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1533 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1534 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1535 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1536 adev->gfx.cu_info.max_waves_per_simd =
1537 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1538 adev->gfx.cu_info.max_scratch_slots_per_cu =
1539 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1540 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1541 break;
1542 }
1543 default:
1544 dev_err(adev->dev,
1545 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1546 err = -EINVAL;
1547 goto out;
1548 }
1549out:
e2a75f88
AD
1550 return err;
1551}
1552
d38ceaf9
AD
1553static int amdgpu_early_init(struct amdgpu_device *adev)
1554{
aaa36a97 1555 int i, r;
d38ceaf9 1556
483ef985 1557 amdgpu_device_enable_virtual_display(adev);
a6be7570 1558
d38ceaf9 1559 switch (adev->asic_type) {
aaa36a97
AD
1560 case CHIP_TOPAZ:
1561 case CHIP_TONGA:
48299f95 1562 case CHIP_FIJI:
2cc0c0b5
FC
1563 case CHIP_POLARIS11:
1564 case CHIP_POLARIS10:
c4642a47 1565 case CHIP_POLARIS12:
aaa36a97 1566 case CHIP_CARRIZO:
39bb0c92
SL
1567 case CHIP_STONEY:
1568 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1569 adev->family = AMDGPU_FAMILY_CZ;
1570 else
1571 adev->family = AMDGPU_FAMILY_VI;
1572
1573 r = vi_set_ip_blocks(adev);
1574 if (r)
1575 return r;
1576 break;
33f34802
KW
1577#ifdef CONFIG_DRM_AMDGPU_SI
1578 case CHIP_VERDE:
1579 case CHIP_TAHITI:
1580 case CHIP_PITCAIRN:
1581 case CHIP_OLAND:
1582 case CHIP_HAINAN:
295d0daf 1583 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1584 r = si_set_ip_blocks(adev);
1585 if (r)
1586 return r;
1587 break;
1588#endif
a2e73f56
AD
1589#ifdef CONFIG_DRM_AMDGPU_CIK
1590 case CHIP_BONAIRE:
1591 case CHIP_HAWAII:
1592 case CHIP_KAVERI:
1593 case CHIP_KABINI:
1594 case CHIP_MULLINS:
1595 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1596 adev->family = AMDGPU_FAMILY_CI;
1597 else
1598 adev->family = AMDGPU_FAMILY_KV;
1599
1600 r = cik_set_ip_blocks(adev);
1601 if (r)
1602 return r;
1603 break;
1604#endif
2ca8a5d2
CZ
1605 case CHIP_VEGA10:
1606 case CHIP_RAVEN:
1607 if (adev->asic_type == CHIP_RAVEN)
1608 adev->family = AMDGPU_FAMILY_RV;
1609 else
1610 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1611
1612 r = soc15_set_ip_blocks(adev);
1613 if (r)
1614 return r;
1615 break;
d38ceaf9
AD
1616 default:
1617 /* FIXME: not supported yet */
1618 return -EINVAL;
1619 }
1620
e2a75f88
AD
1621 r = amdgpu_device_parse_gpu_info_fw(adev);
1622 if (r)
1623 return r;
1624
3149d9da
XY
1625 if (amdgpu_sriov_vf(adev)) {
1626 r = amdgpu_virt_request_full_gpu(adev, true);
1627 if (r)
1628 return r;
1629 }
1630
d38ceaf9
AD
1631 for (i = 0; i < adev->num_ip_blocks; i++) {
1632 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1633 DRM_ERROR("disabled ip block: %d <%s>\n",
1634 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1635 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1636 } else {
a1255107
AD
1637 if (adev->ip_blocks[i].version->funcs->early_init) {
1638 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1639 if (r == -ENOENT) {
a1255107 1640 adev->ip_blocks[i].status.valid = false;
2c1a2784 1641 } else if (r) {
a1255107
AD
1642 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1643 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1644 return r;
2c1a2784 1645 } else {
a1255107 1646 adev->ip_blocks[i].status.valid = true;
2c1a2784 1647 }
974e6b64 1648 } else {
a1255107 1649 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1650 }
d38ceaf9
AD
1651 }
1652 }
1653
395d1fb9
NH
1654 adev->cg_flags &= amdgpu_cg_mask;
1655 adev->pg_flags &= amdgpu_pg_mask;
1656
d38ceaf9
AD
1657 return 0;
1658}
1659
1660static int amdgpu_init(struct amdgpu_device *adev)
1661{
1662 int i, r;
1663
1664 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1665 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1666 continue;
a1255107 1667 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1668 if (r) {
a1255107
AD
1669 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1670 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1671 return r;
2c1a2784 1672 }
a1255107 1673 adev->ip_blocks[i].status.sw = true;
d38ceaf9 1674 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1675 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9 1676 r = amdgpu_vram_scratch_init(adev);
2c1a2784
AD
1677 if (r) {
1678 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1679 return r;
2c1a2784 1680 }
a1255107 1681 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1682 if (r) {
1683 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1684 return r;
2c1a2784 1685 }
d38ceaf9 1686 r = amdgpu_wb_init(adev);
2c1a2784
AD
1687 if (r) {
1688 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
d38ceaf9 1689 return r;
2c1a2784 1690 }
a1255107 1691 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1692
1693 /* right after GMC hw init, we create CSA */
1694 if (amdgpu_sriov_vf(adev)) {
1695 r = amdgpu_allocate_static_csa(adev);
1696 if (r) {
1697 DRM_ERROR("allocate CSA failed %d\n", r);
1698 return r;
1699 }
1700 }
d38ceaf9
AD
1701 }
1702 }
1703
1704 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1705 if (!adev->ip_blocks[i].status.sw)
d38ceaf9
AD
1706 continue;
1707 /* gmc hw init is done early */
a1255107 1708 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
d38ceaf9 1709 continue;
a1255107 1710 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1711 if (r) {
a1255107
AD
1712 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1713 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1714 return r;
2c1a2784 1715 }
a1255107 1716 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1717 }
1718
1719 return 0;
1720}
1721
0c49e0b8
CZ
1722static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1723{
1724 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1725}
1726
1727static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1728{
1729 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1730 AMDGPU_RESET_MAGIC_NUM);
1731}
1732
2dc80b00 1733static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
d38ceaf9
AD
1734{
1735 int i = 0, r;
1736
1737 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1738 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1739 continue;
4a446d55 1740 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1741 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1742 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1743 /* enable clockgating to save power */
a1255107
AD
1744 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1745 AMD_CG_STATE_GATE);
4a446d55
AD
1746 if (r) {
1747 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1748 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1749 return r;
1750 }
b0b00ff1 1751 }
d38ceaf9 1752 }
2dc80b00
S
1753 return 0;
1754}
1755
1756static int amdgpu_late_init(struct amdgpu_device *adev)
1757{
1758 int i = 0, r;
1759
1760 for (i = 0; i < adev->num_ip_blocks; i++) {
1761 if (!adev->ip_blocks[i].status.valid)
1762 continue;
1763 if (adev->ip_blocks[i].version->funcs->late_init) {
1764 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1765 if (r) {
1766 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1767 adev->ip_blocks[i].version->funcs->name, r);
1768 return r;
1769 }
1770 adev->ip_blocks[i].status.late_initialized = true;
1771 }
1772 }
1773
1774 mod_delayed_work(system_wq, &adev->late_init_work,
1775 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1776
0c49e0b8 1777 amdgpu_fill_reset_magic(adev);
d38ceaf9
AD
1778
1779 return 0;
1780}
1781
1782static int amdgpu_fini(struct amdgpu_device *adev)
1783{
1784 int i, r;
1785
3e96dbfd
AD
1786 /* need to disable SMC first */
1787 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1788 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1789 continue;
a1255107 1790 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1791 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1792 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1793 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1794 if (r) {
1795 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1796 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1797 return r;
1798 }
a1255107 1799 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1800 /* XXX handle errors */
1801 if (r) {
1802 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1803 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1804 }
a1255107 1805 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1806 break;
1807 }
1808 }
1809
d38ceaf9 1810 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1811 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1812 continue;
a1255107 1813 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9
AD
1814 amdgpu_wb_fini(adev);
1815 amdgpu_vram_scratch_fini(adev);
1816 }
8201a67a
RZ
1817
1818 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1819 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1820 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1821 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1822 AMD_CG_STATE_UNGATE);
1823 if (r) {
1824 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1825 adev->ip_blocks[i].version->funcs->name, r);
1826 return r;
1827 }
2c1a2784 1828 }
8201a67a 1829
a1255107 1830 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1831 /* XXX handle errors */
2c1a2784 1832 if (r) {
a1255107
AD
1833 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1834 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1835 }
8201a67a 1836
a1255107 1837 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1838 }
1839
1840 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1841 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1842 continue;
a1255107 1843 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1844 /* XXX handle errors */
2c1a2784 1845 if (r) {
a1255107
AD
1846 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1847 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1848 }
a1255107
AD
1849 adev->ip_blocks[i].status.sw = false;
1850 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1851 }
1852
a6dcfd9c 1853 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1854 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1855 continue;
a1255107
AD
1856 if (adev->ip_blocks[i].version->funcs->late_fini)
1857 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1858 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1859 }
1860
030308fc 1861 if (amdgpu_sriov_vf(adev))
3149d9da 1862 amdgpu_virt_release_full_gpu(adev, false);
2493664f 1863
d38ceaf9
AD
1864 return 0;
1865}
1866
2dc80b00
S
1867static void amdgpu_late_init_func_handler(struct work_struct *work)
1868{
1869 struct amdgpu_device *adev =
1870 container_of(work, struct amdgpu_device, late_init_work.work);
1871 amdgpu_late_set_cg_state(adev);
1872}
1873
faefba95 1874int amdgpu_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1875{
1876 int i, r;
1877
e941ea99
XY
1878 if (amdgpu_sriov_vf(adev))
1879 amdgpu_virt_request_full_gpu(adev, false);
1880
c5a93a28
FC
1881 /* ungate SMC block first */
1882 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1883 AMD_CG_STATE_UNGATE);
1884 if (r) {
1885 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1886 }
1887
d38ceaf9 1888 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1889 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1890 continue;
1891 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1892 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1893 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1894 AMD_CG_STATE_UNGATE);
c5a93a28 1895 if (r) {
a1255107
AD
1896 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1897 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1898 }
2c1a2784 1899 }
d38ceaf9 1900 /* XXX handle errors */
a1255107 1901 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1902 /* XXX handle errors */
2c1a2784 1903 if (r) {
a1255107
AD
1904 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1905 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1906 }
d38ceaf9
AD
1907 }
1908
e941ea99
XY
1909 if (amdgpu_sriov_vf(adev))
1910 amdgpu_virt_release_full_gpu(adev, false);
1911
d38ceaf9
AD
1912 return 0;
1913}
1914
e4f0fdcc 1915static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
a90ad3c2
ML
1916{
1917 int i, r;
1918
2cb681b6
ML
1919 static enum amd_ip_block_type ip_order[] = {
1920 AMD_IP_BLOCK_TYPE_GMC,
1921 AMD_IP_BLOCK_TYPE_COMMON,
2cb681b6
ML
1922 AMD_IP_BLOCK_TYPE_IH,
1923 };
a90ad3c2 1924
2cb681b6
ML
1925 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1926 int j;
1927 struct amdgpu_ip_block *block;
a90ad3c2 1928
2cb681b6
ML
1929 for (j = 0; j < adev->num_ip_blocks; j++) {
1930 block = &adev->ip_blocks[j];
1931
1932 if (block->version->type != ip_order[i] ||
1933 !block->status.valid)
1934 continue;
1935
1936 r = block->version->funcs->hw_init(adev);
1937 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1938 }
1939 }
1940
1941 return 0;
1942}
1943
e4f0fdcc 1944static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
a90ad3c2
ML
1945{
1946 int i, r;
1947
2cb681b6
ML
1948 static enum amd_ip_block_type ip_order[] = {
1949 AMD_IP_BLOCK_TYPE_SMC,
ef4c166d 1950 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
1951 AMD_IP_BLOCK_TYPE_DCE,
1952 AMD_IP_BLOCK_TYPE_GFX,
1953 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
1954 AMD_IP_BLOCK_TYPE_UVD,
1955 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 1956 };
a90ad3c2 1957
2cb681b6
ML
1958 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1959 int j;
1960 struct amdgpu_ip_block *block;
a90ad3c2 1961
2cb681b6
ML
1962 for (j = 0; j < adev->num_ip_blocks; j++) {
1963 block = &adev->ip_blocks[j];
1964
1965 if (block->version->type != ip_order[i] ||
1966 !block->status.valid)
1967 continue;
1968
1969 r = block->version->funcs->hw_init(adev);
1970 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1971 }
1972 }
1973
1974 return 0;
1975}
1976
fcf0649f 1977static int amdgpu_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
1978{
1979 int i, r;
1980
a90ad3c2
ML
1981 for (i = 0; i < adev->num_ip_blocks; i++) {
1982 if (!adev->ip_blocks[i].status.valid)
1983 continue;
a90ad3c2
ML
1984 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1985 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
fcf0649f
CZ
1986 adev->ip_blocks[i].version->type ==
1987 AMD_IP_BLOCK_TYPE_IH) {
1988 r = adev->ip_blocks[i].version->funcs->resume(adev);
1989 if (r) {
1990 DRM_ERROR("resume of IP block <%s> failed %d\n",
1991 adev->ip_blocks[i].version->funcs->name, r);
1992 return r;
1993 }
a90ad3c2
ML
1994 }
1995 }
1996
1997 return 0;
1998}
1999
fcf0649f 2000static int amdgpu_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2001{
2002 int i, r;
2003
2004 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2005 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2006 continue;
fcf0649f
CZ
2007 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2008 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2009 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
2010 continue;
a1255107 2011 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 2012 if (r) {
a1255107
AD
2013 DRM_ERROR("resume of IP block <%s> failed %d\n",
2014 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 2015 return r;
2c1a2784 2016 }
d38ceaf9
AD
2017 }
2018
2019 return 0;
2020}
2021
fcf0649f
CZ
2022static int amdgpu_resume(struct amdgpu_device *adev)
2023{
2024 int r;
2025
2026 r = amdgpu_resume_phase1(adev);
2027 if (r)
2028 return r;
2029 r = amdgpu_resume_phase2(adev);
2030
2031 return r;
2032}
2033
4e99a44e 2034static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 2035{
6867e1b5
ML
2036 if (amdgpu_sriov_vf(adev)) {
2037 if (adev->is_atom_fw) {
2038 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2039 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2040 } else {
2041 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2042 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2043 }
2044
2045 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2046 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 2047 }
048765ad
AR
2048}
2049
4562236b
HW
2050bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2051{
2052 switch (asic_type) {
2053#if defined(CONFIG_DRM_AMD_DC)
2054 case CHIP_BONAIRE:
2055 case CHIP_HAWAII:
0d6fbccb 2056 case CHIP_KAVERI:
4562236b
HW
2057 case CHIP_CARRIZO:
2058 case CHIP_STONEY:
2059 case CHIP_POLARIS11:
2060 case CHIP_POLARIS10:
2c8ad2d5 2061 case CHIP_POLARIS12:
4562236b
HW
2062 case CHIP_TONGA:
2063 case CHIP_FIJI:
2064#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
2065 return amdgpu_dc != 0;
4562236b 2066#endif
17b7cf8c
AD
2067 case CHIP_KABINI:
2068 case CHIP_MULLINS:
2069 return amdgpu_dc > 0;
42f8ffa1
HW
2070 case CHIP_VEGA10:
2071#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 2072 case CHIP_RAVEN:
42f8ffa1 2073#endif
fd187853 2074 return amdgpu_dc != 0;
4562236b
HW
2075#endif
2076 default:
2077 return false;
2078 }
2079}
2080
2081/**
2082 * amdgpu_device_has_dc_support - check if dc is supported
2083 *
2084 * @adev: amdgpu_device_pointer
2085 *
2086 * Returns true for supported, false for not supported
2087 */
2088bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2089{
2555039d
XY
2090 if (amdgpu_sriov_vf(adev))
2091 return false;
2092
4562236b
HW
2093 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2094}
2095
d38ceaf9
AD
2096/**
2097 * amdgpu_device_init - initialize the driver
2098 *
2099 * @adev: amdgpu_device pointer
2100 * @pdev: drm dev pointer
2101 * @pdev: pci dev pointer
2102 * @flags: driver flags
2103 *
2104 * Initializes the driver info and hw (all asics).
2105 * Returns 0 for success or an error on failure.
2106 * Called at driver startup.
2107 */
2108int amdgpu_device_init(struct amdgpu_device *adev,
2109 struct drm_device *ddev,
2110 struct pci_dev *pdev,
2111 uint32_t flags)
2112{
2113 int r, i;
2114 bool runtime = false;
95844d20 2115 u32 max_MBps;
d38ceaf9
AD
2116
2117 adev->shutdown = false;
2118 adev->dev = &pdev->dev;
2119 adev->ddev = ddev;
2120 adev->pdev = pdev;
2121 adev->flags = flags;
2f7d10b3 2122 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 2123 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
6f02a696 2124 adev->mc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
2125 adev->accel_working = false;
2126 adev->num_rings = 0;
2127 adev->mman.buffer_funcs = NULL;
2128 adev->mman.buffer_funcs_ring = NULL;
2129 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 2130 adev->vm_manager.vm_pte_num_rings = 0;
d38ceaf9 2131 adev->gart.gart_funcs = NULL;
f54d1867 2132 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 2133 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
2134
2135 adev->smc_rreg = &amdgpu_invalid_rreg;
2136 adev->smc_wreg = &amdgpu_invalid_wreg;
2137 adev->pcie_rreg = &amdgpu_invalid_rreg;
2138 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
2139 adev->pciep_rreg = &amdgpu_invalid_rreg;
2140 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2141 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2142 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2143 adev->didt_rreg = &amdgpu_invalid_rreg;
2144 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
2145 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2146 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2147 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2148 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2149
3e39ab90
AD
2150 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2151 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2152 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
2153
2154 /* mutex initialization are all done here so we
2155 * can recall function without having locking issues */
d38ceaf9 2156 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 2157 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
2158 mutex_init(&adev->pm.mutex);
2159 mutex_init(&adev->gfx.gpu_clock_mutex);
2160 mutex_init(&adev->srbm_mutex);
b8866c26 2161 mutex_init(&adev->gfx.pipe_reserve_mutex);
d38ceaf9 2162 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 2163 mutex_init(&adev->mn_lock);
e23b74aa 2164 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9
AD
2165 hash_init(adev->mn_hash);
2166
2167 amdgpu_check_arguments(adev);
2168
d38ceaf9
AD
2169 spin_lock_init(&adev->mmio_idx_lock);
2170 spin_lock_init(&adev->smc_idx_lock);
2171 spin_lock_init(&adev->pcie_idx_lock);
2172 spin_lock_init(&adev->uvd_ctx_idx_lock);
2173 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 2174 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 2175 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 2176 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 2177 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 2178
0c4e7fa5
CZ
2179 INIT_LIST_HEAD(&adev->shadow_list);
2180 mutex_init(&adev->shadow_list_lock);
2181
5c1354bd
CZ
2182 INIT_LIST_HEAD(&adev->gtt_list);
2183 spin_lock_init(&adev->gtt_list_lock);
2184
795f2813
AR
2185 INIT_LIST_HEAD(&adev->ring_lru_list);
2186 spin_lock_init(&adev->ring_lru_list_lock);
2187
2dc80b00
S
2188 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2189
0fa49558
AX
2190 /* Registers mapping */
2191 /* TODO: block userspace mapping of io register */
da69c161
KW
2192 if (adev->asic_type >= CHIP_BONAIRE) {
2193 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2194 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2195 } else {
2196 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2197 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2198 }
d38ceaf9 2199
d38ceaf9
AD
2200 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2201 if (adev->rmmio == NULL) {
2202 return -ENOMEM;
2203 }
2204 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2205 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2206
705e519e
CK
2207 /* doorbell bar mapping */
2208 amdgpu_doorbell_init(adev);
d38ceaf9
AD
2209
2210 /* io port mapping */
2211 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2212 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2213 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2214 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2215 break;
2216 }
2217 }
2218 if (adev->rio_mem == NULL)
b64a18c5 2219 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
2220
2221 /* early init functions */
2222 r = amdgpu_early_init(adev);
2223 if (r)
2224 return r;
2225
2226 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2227 /* this will fail for cards that aren't VGA class devices, just
2228 * ignore it */
2229 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2230
2231 if (amdgpu_runtime_pm == 1)
2232 runtime = true;
e9bef455 2233 if (amdgpu_device_is_px(ddev))
d38ceaf9 2234 runtime = true;
84c8b22e
LW
2235 if (!pci_is_thunderbolt_attached(adev->pdev))
2236 vga_switcheroo_register_client(adev->pdev,
2237 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
2238 if (runtime)
2239 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2240
2241 /* Read BIOS */
83ba126a
AD
2242 if (!amdgpu_get_bios(adev)) {
2243 r = -EINVAL;
2244 goto failed;
2245 }
f7e9e9fe 2246
d38ceaf9 2247 r = amdgpu_atombios_init(adev);
2c1a2784
AD
2248 if (r) {
2249 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 2250 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 2251 goto failed;
2c1a2784 2252 }
d38ceaf9 2253
4e99a44e
ML
2254 /* detect if we are with an SRIOV vbios */
2255 amdgpu_device_detect_sriov_bios(adev);
048765ad 2256
d38ceaf9 2257 /* Post card if necessary */
91fe77eb 2258 if (amdgpu_need_post(adev)) {
d38ceaf9 2259 if (!adev->bios) {
bec86378 2260 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
2261 r = -EINVAL;
2262 goto failed;
d38ceaf9 2263 }
bec86378 2264 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
2265 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2266 if (r) {
2267 dev_err(adev->dev, "gpu post error!\n");
2268 goto failed;
2269 }
d38ceaf9
AD
2270 }
2271
88b64e95
AD
2272 if (adev->is_atom_fw) {
2273 /* Initialize clocks */
2274 r = amdgpu_atomfirmware_get_clock_info(adev);
2275 if (r) {
2276 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 2277 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
2278 goto failed;
2279 }
2280 } else {
a5bde2f9
AD
2281 /* Initialize clocks */
2282 r = amdgpu_atombios_get_clock_info(adev);
2283 if (r) {
2284 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 2285 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 2286 goto failed;
a5bde2f9
AD
2287 }
2288 /* init i2c buses */
4562236b
HW
2289 if (!amdgpu_device_has_dc_support(adev))
2290 amdgpu_atombios_i2c_init(adev);
2c1a2784 2291 }
d38ceaf9
AD
2292
2293 /* Fence driver */
2294 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
2295 if (r) {
2296 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 2297 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 2298 goto failed;
2c1a2784 2299 }
d38ceaf9
AD
2300
2301 /* init the mode config */
2302 drm_mode_config_init(adev->ddev);
2303
2304 r = amdgpu_init(adev);
2305 if (r) {
8840a387 2306 /* failed in exclusive mode due to timeout */
2307 if (amdgpu_sriov_vf(adev) &&
2308 !amdgpu_sriov_runtime(adev) &&
2309 amdgpu_virt_mmio_blocked(adev) &&
2310 !amdgpu_virt_wait_reset(adev)) {
2311 dev_err(adev->dev, "VF exclusive mode timeout\n");
2312 r = -EAGAIN;
2313 goto failed;
2314 }
2c1a2784 2315 dev_err(adev->dev, "amdgpu_init failed\n");
e23b74aa 2316 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
d38ceaf9 2317 amdgpu_fini(adev);
83ba126a 2318 goto failed;
d38ceaf9
AD
2319 }
2320
2321 adev->accel_working = true;
2322
e59c0205
AX
2323 amdgpu_vm_check_compute_bug(adev);
2324
95844d20
MO
2325 /* Initialize the buffer migration limit. */
2326 if (amdgpu_moverate >= 0)
2327 max_MBps = amdgpu_moverate;
2328 else
2329 max_MBps = 8; /* Allow 8 MB/s. */
2330 /* Get a log2 for easy divisions. */
2331 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2332
d38ceaf9
AD
2333 r = amdgpu_ib_pool_init(adev);
2334 if (r) {
2335 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 2336 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 2337 goto failed;
d38ceaf9
AD
2338 }
2339
2340 r = amdgpu_ib_ring_tests(adev);
2341 if (r)
2342 DRM_ERROR("ib ring test failed (%d).\n", r);
2343
2dc8f81e
HC
2344 if (amdgpu_sriov_vf(adev))
2345 amdgpu_virt_init_data_exchange(adev);
2346
9bc92b9c
ML
2347 amdgpu_fbdev_init(adev);
2348
d2f52ac8
RZ
2349 r = amdgpu_pm_sysfs_init(adev);
2350 if (r)
2351 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2352
d38ceaf9 2353 r = amdgpu_gem_debugfs_init(adev);
3f14e623 2354 if (r)
d38ceaf9 2355 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2356
2357 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2358 if (r)
d38ceaf9 2359 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2360
4f0955fc
HR
2361 r = amdgpu_debugfs_test_ib_ring_init(adev);
2362 if (r)
2363 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
2364
50ab2533 2365 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2366 if (r)
50ab2533 2367 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2368
db95e218
KR
2369 r = amdgpu_debugfs_vbios_dump_init(adev);
2370 if (r)
2371 DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
2372
d38ceaf9
AD
2373 if ((amdgpu_testing & 1)) {
2374 if (adev->accel_working)
2375 amdgpu_test_moves(adev);
2376 else
2377 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2378 }
d38ceaf9
AD
2379 if (amdgpu_benchmarking) {
2380 if (adev->accel_working)
2381 amdgpu_benchmark(adev, amdgpu_benchmarking);
2382 else
2383 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2384 }
2385
2386 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2387 * explicit gating rather than handling it automatically.
2388 */
2389 r = amdgpu_late_init(adev);
2c1a2784
AD
2390 if (r) {
2391 dev_err(adev->dev, "amdgpu_late_init failed\n");
e23b74aa 2392 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2393 goto failed;
2c1a2784 2394 }
d38ceaf9
AD
2395
2396 return 0;
83ba126a
AD
2397
2398failed:
89041940 2399 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2400 if (runtime)
2401 vga_switcheroo_fini_domain_pm_ops(adev->dev);
8840a387 2402
83ba126a 2403 return r;
d38ceaf9
AD
2404}
2405
d38ceaf9
AD
2406/**
2407 * amdgpu_device_fini - tear down the driver
2408 *
2409 * @adev: amdgpu_device pointer
2410 *
2411 * Tear down the driver info (all asics).
2412 * Called at driver shutdown.
2413 */
2414void amdgpu_device_fini(struct amdgpu_device *adev)
2415{
2416 int r;
2417
2418 DRM_INFO("amdgpu: finishing device.\n");
2419 adev->shutdown = true;
db2c2a97
PD
2420 if (adev->mode_info.mode_config_initialized)
2421 drm_crtc_force_disable_all(adev->ddev);
d38ceaf9
AD
2422 /* evict vram memory */
2423 amdgpu_bo_evict_vram(adev);
2424 amdgpu_ib_pool_fini(adev);
a05502e5 2425 amdgpu_fw_reserve_vram_fini(adev);
d38ceaf9
AD
2426 amdgpu_fence_driver_fini(adev);
2427 amdgpu_fbdev_fini(adev);
2428 r = amdgpu_fini(adev);
ab4fe3e1
HR
2429 if (adev->firmware.gpu_info_fw) {
2430 release_firmware(adev->firmware.gpu_info_fw);
2431 adev->firmware.gpu_info_fw = NULL;
2432 }
d38ceaf9 2433 adev->accel_working = false;
2dc80b00 2434 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2435 /* free i2c buses */
4562236b
HW
2436 if (!amdgpu_device_has_dc_support(adev))
2437 amdgpu_i2c_fini(adev);
d38ceaf9
AD
2438 amdgpu_atombios_fini(adev);
2439 kfree(adev->bios);
2440 adev->bios = NULL;
84c8b22e
LW
2441 if (!pci_is_thunderbolt_attached(adev->pdev))
2442 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2443 if (adev->flags & AMD_IS_PX)
2444 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2445 vga_client_register(adev->pdev, NULL, NULL, NULL);
2446 if (adev->rio_mem)
2447 pci_iounmap(adev->pdev, adev->rio_mem);
2448 adev->rio_mem = NULL;
2449 iounmap(adev->rmmio);
2450 adev->rmmio = NULL;
705e519e 2451 amdgpu_doorbell_fini(adev);
d2f52ac8 2452 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2453 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2454}
2455
2456
2457/*
2458 * Suspend & resume.
2459 */
2460/**
810ddc3a 2461 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2462 *
2463 * @pdev: drm dev pointer
2464 * @state: suspend state
2465 *
2466 * Puts the hw in the suspend state (all asics).
2467 * Returns 0 for success or an error on failure.
2468 * Called at driver suspend.
2469 */
810ddc3a 2470int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2471{
2472 struct amdgpu_device *adev;
2473 struct drm_crtc *crtc;
2474 struct drm_connector *connector;
5ceb54c6 2475 int r;
d38ceaf9
AD
2476
2477 if (dev == NULL || dev->dev_private == NULL) {
2478 return -ENODEV;
2479 }
2480
2481 adev = dev->dev_private;
2482
2483 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2484 return 0;
2485
2486 drm_kms_helper_poll_disable(dev);
2487
4562236b
HW
2488 if (!amdgpu_device_has_dc_support(adev)) {
2489 /* turn off display hw */
2490 drm_modeset_lock_all(dev);
2491 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2492 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2493 }
2494 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2495 }
2496
ba997709
YZ
2497 amdgpu_amdkfd_suspend(adev);
2498
756e6880 2499 /* unpin the front buffers and cursors */
d38ceaf9 2500 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2501 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2502 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2503 struct amdgpu_bo *robj;
2504
756e6880
AD
2505 if (amdgpu_crtc->cursor_bo) {
2506 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2507 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2508 if (r == 0) {
2509 amdgpu_bo_unpin(aobj);
2510 amdgpu_bo_unreserve(aobj);
2511 }
2512 }
2513
d38ceaf9
AD
2514 if (rfb == NULL || rfb->obj == NULL) {
2515 continue;
2516 }
2517 robj = gem_to_amdgpu_bo(rfb->obj);
2518 /* don't unpin kernel fb objects */
2519 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2520 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2521 if (r == 0) {
2522 amdgpu_bo_unpin(robj);
2523 amdgpu_bo_unreserve(robj);
2524 }
2525 }
2526 }
2527 /* evict vram memory */
2528 amdgpu_bo_evict_vram(adev);
2529
5ceb54c6 2530 amdgpu_fence_driver_suspend(adev);
d38ceaf9
AD
2531
2532 r = amdgpu_suspend(adev);
2533
a0a71e49
AD
2534 /* evict remaining vram memory
2535 * This second call to evict vram is to evict the gart page table
2536 * using the CPU.
2537 */
d38ceaf9
AD
2538 amdgpu_bo_evict_vram(adev);
2539
d05da0e2 2540 amdgpu_atombios_scratch_regs_save(adev);
d38ceaf9
AD
2541 pci_save_state(dev->pdev);
2542 if (suspend) {
2543 /* Shut down the device */
2544 pci_disable_device(dev->pdev);
2545 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2546 } else {
2547 r = amdgpu_asic_reset(adev);
2548 if (r)
2549 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2550 }
2551
2552 if (fbcon) {
2553 console_lock();
2554 amdgpu_fbdev_set_suspend(adev, 1);
2555 console_unlock();
2556 }
2557 return 0;
2558}
2559
2560/**
810ddc3a 2561 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2562 *
2563 * @pdev: drm dev pointer
2564 *
2565 * Bring the hw back to operating state (all asics).
2566 * Returns 0 for success or an error on failure.
2567 * Called at driver resume.
2568 */
810ddc3a 2569int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2570{
2571 struct drm_connector *connector;
2572 struct amdgpu_device *adev = dev->dev_private;
756e6880 2573 struct drm_crtc *crtc;
03161a6e 2574 int r = 0;
d38ceaf9
AD
2575
2576 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2577 return 0;
2578
74b0b157 2579 if (fbcon)
d38ceaf9 2580 console_lock();
74b0b157 2581
d38ceaf9
AD
2582 if (resume) {
2583 pci_set_power_state(dev->pdev, PCI_D0);
2584 pci_restore_state(dev->pdev);
74b0b157 2585 r = pci_enable_device(dev->pdev);
03161a6e
HR
2586 if (r)
2587 goto unlock;
d38ceaf9 2588 }
d05da0e2 2589 amdgpu_atombios_scratch_regs_restore(adev);
d38ceaf9
AD
2590
2591 /* post card */
c836fec5 2592 if (amdgpu_need_post(adev)) {
74b0b157 2593 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2594 if (r)
2595 DRM_ERROR("amdgpu asic init failed\n");
2596 }
d38ceaf9
AD
2597
2598 r = amdgpu_resume(adev);
e6707218 2599 if (r) {
ca198528 2600 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
03161a6e 2601 goto unlock;
e6707218 2602 }
5ceb54c6
AD
2603 amdgpu_fence_driver_resume(adev);
2604
ca198528
FC
2605 if (resume) {
2606 r = amdgpu_ib_ring_tests(adev);
2607 if (r)
2608 DRM_ERROR("ib ring test failed (%d).\n", r);
2609 }
d38ceaf9
AD
2610
2611 r = amdgpu_late_init(adev);
03161a6e
HR
2612 if (r)
2613 goto unlock;
d38ceaf9 2614
756e6880
AD
2615 /* pin cursors */
2616 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2617 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2618
2619 if (amdgpu_crtc->cursor_bo) {
2620 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2621 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2622 if (r == 0) {
2623 r = amdgpu_bo_pin(aobj,
2624 AMDGPU_GEM_DOMAIN_VRAM,
2625 &amdgpu_crtc->cursor_addr);
2626 if (r != 0)
2627 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2628 amdgpu_bo_unreserve(aobj);
2629 }
2630 }
2631 }
ba997709
YZ
2632 r = amdgpu_amdkfd_resume(adev);
2633 if (r)
2634 return r;
756e6880 2635
d38ceaf9
AD
2636 /* blat the mode back in */
2637 if (fbcon) {
4562236b
HW
2638 if (!amdgpu_device_has_dc_support(adev)) {
2639 /* pre DCE11 */
2640 drm_helper_resume_force_mode(dev);
2641
2642 /* turn on display hw */
2643 drm_modeset_lock_all(dev);
2644 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2645 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2646 }
2647 drm_modeset_unlock_all(dev);
2648 } else {
2649 /*
2650 * There is no equivalent atomic helper to turn on
2651 * display, so we defined our own function for this,
2652 * once suspend resume is supported by the atomic
2653 * framework this will be reworked
2654 */
2655 amdgpu_dm_display_resume(adev);
d38ceaf9
AD
2656 }
2657 }
2658
2659 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2660
2661 /*
2662 * Most of the connector probing functions try to acquire runtime pm
2663 * refs to ensure that the GPU is powered on when connector polling is
2664 * performed. Since we're calling this from a runtime PM callback,
2665 * trying to acquire rpm refs will cause us to deadlock.
2666 *
2667 * Since we're guaranteed to be holding the rpm lock, it's safe to
2668 * temporarily disable the rpm helpers so this doesn't deadlock us.
2669 */
2670#ifdef CONFIG_PM
2671 dev->dev->power.disable_depth++;
2672#endif
4562236b
HW
2673 if (!amdgpu_device_has_dc_support(adev))
2674 drm_helper_hpd_irq_event(dev);
2675 else
2676 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2677#ifdef CONFIG_PM
2678 dev->dev->power.disable_depth--;
2679#endif
d38ceaf9 2680
03161a6e 2681 if (fbcon)
d38ceaf9 2682 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2683
2684unlock:
2685 if (fbcon)
d38ceaf9 2686 console_unlock();
d38ceaf9 2687
03161a6e 2688 return r;
d38ceaf9
AD
2689}
2690
63fbf42f
CZ
2691static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2692{
2693 int i;
2694 bool asic_hang = false;
2695
f993d628
ML
2696 if (amdgpu_sriov_vf(adev))
2697 return true;
2698
63fbf42f 2699 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2700 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2701 continue;
a1255107
AD
2702 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2703 adev->ip_blocks[i].status.hang =
2704 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2705 if (adev->ip_blocks[i].status.hang) {
2706 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2707 asic_hang = true;
2708 }
2709 }
2710 return asic_hang;
2711}
2712
4d446656 2713static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2714{
2715 int i, r = 0;
2716
2717 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2718 if (!adev->ip_blocks[i].status.valid)
d31a501e 2719 continue;
a1255107
AD
2720 if (adev->ip_blocks[i].status.hang &&
2721 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2722 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2723 if (r)
2724 return r;
2725 }
2726 }
2727
2728 return 0;
2729}
2730
35d782fe
CZ
2731static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2732{
da146d3b
AD
2733 int i;
2734
2735 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2736 if (!adev->ip_blocks[i].status.valid)
da146d3b 2737 continue;
a1255107
AD
2738 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2739 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2740 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
2741 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2742 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 2743 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2744 DRM_INFO("Some block need full reset!\n");
2745 return true;
2746 }
2747 }
35d782fe
CZ
2748 }
2749 return false;
2750}
2751
2752static int amdgpu_soft_reset(struct amdgpu_device *adev)
2753{
2754 int i, r = 0;
2755
2756 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2757 if (!adev->ip_blocks[i].status.valid)
35d782fe 2758 continue;
a1255107
AD
2759 if (adev->ip_blocks[i].status.hang &&
2760 adev->ip_blocks[i].version->funcs->soft_reset) {
2761 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2762 if (r)
2763 return r;
2764 }
2765 }
2766
2767 return 0;
2768}
2769
2770static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2771{
2772 int i, r = 0;
2773
2774 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2775 if (!adev->ip_blocks[i].status.valid)
35d782fe 2776 continue;
a1255107
AD
2777 if (adev->ip_blocks[i].status.hang &&
2778 adev->ip_blocks[i].version->funcs->post_soft_reset)
2779 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2780 if (r)
2781 return r;
2782 }
2783
2784 return 0;
2785}
2786
3ad81f16
CZ
2787bool amdgpu_need_backup(struct amdgpu_device *adev)
2788{
2789 if (adev->flags & AMD_IS_APU)
2790 return false;
2791
2792 return amdgpu_lockup_timeout > 0 ? true : false;
2793}
2794
53cdccd5
CZ
2795static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2796 struct amdgpu_ring *ring,
2797 struct amdgpu_bo *bo,
f54d1867 2798 struct dma_fence **fence)
53cdccd5
CZ
2799{
2800 uint32_t domain;
2801 int r;
2802
23d2e504
RH
2803 if (!bo->shadow)
2804 return 0;
2805
1d284797 2806 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2807 if (r)
2808 return r;
2809 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2810 /* if bo has been evicted, then no need to recover */
2811 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2812 r = amdgpu_bo_validate(bo->shadow);
2813 if (r) {
2814 DRM_ERROR("bo validate failed!\n");
2815 goto err;
2816 }
2817
23d2e504 2818 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2819 NULL, fence, true);
23d2e504
RH
2820 if (r) {
2821 DRM_ERROR("recover page table failed!\n");
2822 goto err;
2823 }
2824 }
53cdccd5 2825err:
23d2e504
RH
2826 amdgpu_bo_unreserve(bo);
2827 return r;
53cdccd5
CZ
2828}
2829
a90ad3c2
ML
2830/**
2831 * amdgpu_sriov_gpu_reset - reset the asic
2832 *
2833 * @adev: amdgpu device pointer
7225f873 2834 * @job: which job trigger hang
a90ad3c2
ML
2835 *
2836 * Attempt the reset the GPU if it has hung (all asics).
2837 * for SRIOV case.
2838 * Returns 0 for success or an error on failure.
2839 */
7225f873 2840int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
a90ad3c2 2841{
65781c78 2842 int i, j, r = 0;
a90ad3c2
ML
2843 int resched;
2844 struct amdgpu_bo *bo, *tmp;
2845 struct amdgpu_ring *ring;
2846 struct dma_fence *fence = NULL, *next = NULL;
2847
147b5983 2848 mutex_lock(&adev->virt.lock_reset);
a90ad3c2 2849 atomic_inc(&adev->gpu_reset_counter);
3224a12b 2850 adev->in_sriov_reset = true;
a90ad3c2
ML
2851
2852 /* block TTM */
2853 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2854
65781c78
ML
2855 /* we start from the ring trigger GPU hang */
2856 j = job ? job->ring->idx : 0;
a90ad3c2 2857
65781c78
ML
2858 /* block scheduler */
2859 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2860 ring = adev->rings[i % AMDGPU_MAX_RINGS];
a90ad3c2
ML
2861 if (!ring || !ring->sched.thread)
2862 continue;
2863
2864 kthread_park(ring->sched.thread);
65781c78
ML
2865
2866 if (job && j != i)
2867 continue;
2868
4f059ecd 2869 /* here give the last chance to check if job removed from mirror-list
65781c78 2870 * since we already pay some time on kthread_park */
4f059ecd 2871 if (job && list_empty(&job->base.node)) {
65781c78
ML
2872 kthread_unpark(ring->sched.thread);
2873 goto give_up_reset;
2874 }
2875
2876 if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
2877 amd_sched_job_kickout(&job->base);
2878
2879 /* only do job_reset on the hang ring if @job not NULL */
a8a51a70 2880 amd_sched_hw_job_reset(&ring->sched, NULL);
a90ad3c2 2881
65781c78 2882 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2f9d4084 2883 amdgpu_fence_driver_force_completion(ring);
65781c78 2884 }
a90ad3c2
ML
2885
2886 /* request to take full control of GPU before re-initialization */
7225f873 2887 if (job)
a90ad3c2
ML
2888 amdgpu_virt_reset_gpu(adev);
2889 else
2890 amdgpu_virt_request_full_gpu(adev, true);
2891
2892
2893 /* Resume IP prior to SMC */
e4f0fdcc 2894 amdgpu_sriov_reinit_early(adev);
a90ad3c2
ML
2895
2896 /* we need recover gart prior to run SMC/CP/SDMA resume */
2897 amdgpu_ttm_recover_gart(adev);
2898
2899 /* now we are okay to resume SMC/CP/SDMA */
e4f0fdcc 2900 amdgpu_sriov_reinit_late(adev);
a90ad3c2
ML
2901
2902 amdgpu_irq_gpu_reset_resume_helper(adev);
2903
2904 if (amdgpu_ib_ring_tests(adev))
2905 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2906
2907 /* release full control of GPU after ib test */
2908 amdgpu_virt_release_full_gpu(adev, true);
2909
2910 DRM_INFO("recover vram bo from shadow\n");
2911
2912 ring = adev->mman.buffer_funcs_ring;
2913 mutex_lock(&adev->shadow_list_lock);
2914 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 2915 next = NULL;
a90ad3c2
ML
2916 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2917 if (fence) {
2918 r = dma_fence_wait(fence, false);
2919 if (r) {
2920 WARN(r, "recovery from shadow isn't completed\n");
2921 break;
2922 }
2923 }
2924
2925 dma_fence_put(fence);
2926 fence = next;
2927 }
2928 mutex_unlock(&adev->shadow_list_lock);
2929
2930 if (fence) {
2931 r = dma_fence_wait(fence, false);
2932 if (r)
2933 WARN(r, "recovery from shadow isn't completed\n");
2934 }
2935 dma_fence_put(fence);
2936
65781c78
ML
2937 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2938 ring = adev->rings[i % AMDGPU_MAX_RINGS];
a90ad3c2
ML
2939 if (!ring || !ring->sched.thread)
2940 continue;
2941
65781c78
ML
2942 if (job && j != i) {
2943 kthread_unpark(ring->sched.thread);
2944 continue;
2945 }
2946
a90ad3c2
ML
2947 amd_sched_job_recovery(&ring->sched);
2948 kthread_unpark(ring->sched.thread);
2949 }
2950
2951 drm_helper_resume_force_mode(adev->ddev);
65781c78 2952give_up_reset:
a90ad3c2
ML
2953 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2954 if (r) {
2955 /* bad news, how to tell it to userspace ? */
2956 dev_info(adev->dev, "GPU reset failed\n");
65781c78
ML
2957 } else {
2958 dev_info(adev->dev, "GPU reset successed!\n");
a90ad3c2
ML
2959 }
2960
3224a12b 2961 adev->in_sriov_reset = false;
147b5983 2962 mutex_unlock(&adev->virt.lock_reset);
a90ad3c2
ML
2963 return r;
2964}
2965
d38ceaf9
AD
2966/**
2967 * amdgpu_gpu_reset - reset the asic
2968 *
2969 * @adev: amdgpu device pointer
2970 *
2971 * Attempt the reset the GPU if it has hung (all asics).
2972 * Returns 0 for success or an error on failure.
2973 */
2974int amdgpu_gpu_reset(struct amdgpu_device *adev)
2975{
4562236b 2976 struct drm_atomic_state *state = NULL;
d38ceaf9
AD
2977 int i, r;
2978 int resched;
0c49e0b8 2979 bool need_full_reset, vram_lost = false;
fb140b29 2980
63fbf42f
CZ
2981 if (!amdgpu_check_soft_reset(adev)) {
2982 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2983 return 0;
2984 }
d38ceaf9 2985
d94aed5a 2986 atomic_inc(&adev->gpu_reset_counter);
d38ceaf9 2987
a3c47d6b
CZ
2988 /* block TTM */
2989 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
4562236b
HW
2990 /* store modesetting */
2991 if (amdgpu_device_has_dc_support(adev))
2992 state = drm_atomic_helper_suspend(adev->ddev);
a3c47d6b 2993
0875dc9e
CZ
2994 /* block scheduler */
2995 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2996 struct amdgpu_ring *ring = adev->rings[i];
2997
51687759 2998 if (!ring || !ring->sched.thread)
0875dc9e
CZ
2999 continue;
3000 kthread_park(ring->sched.thread);
a8a51a70 3001 amd_sched_hw_job_reset(&ring->sched, NULL);
2f9d4084
ML
3002 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3003 amdgpu_fence_driver_force_completion(ring);
0875dc9e 3004 }
d38ceaf9 3005
35d782fe 3006 need_full_reset = amdgpu_need_full_reset(adev);
d38ceaf9 3007
35d782fe
CZ
3008 if (!need_full_reset) {
3009 amdgpu_pre_soft_reset(adev);
3010 r = amdgpu_soft_reset(adev);
3011 amdgpu_post_soft_reset(adev);
3012 if (r || amdgpu_check_soft_reset(adev)) {
3013 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3014 need_full_reset = true;
3015 }
f1aa7e08
CZ
3016 }
3017
35d782fe 3018 if (need_full_reset) {
35d782fe 3019 r = amdgpu_suspend(adev);
bfa99269 3020
35d782fe 3021retry:
d05da0e2 3022 amdgpu_atombios_scratch_regs_save(adev);
35d782fe 3023 r = amdgpu_asic_reset(adev);
d05da0e2 3024 amdgpu_atombios_scratch_regs_restore(adev);
35d782fe
CZ
3025 /* post card */
3026 amdgpu_atom_asic_init(adev->mode_info.atom_context);
3027
3028 if (!r) {
3029 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
fcf0649f
CZ
3030 r = amdgpu_resume_phase1(adev);
3031 if (r)
3032 goto out;
0c49e0b8 3033 vram_lost = amdgpu_check_vram_lost(adev);
f1892138 3034 if (vram_lost) {
0c49e0b8 3035 DRM_ERROR("VRAM is lost!\n");
f1892138
CZ
3036 atomic_inc(&adev->vram_lost_counter);
3037 }
fcf0649f
CZ
3038 r = amdgpu_ttm_recover_gart(adev);
3039 if (r)
3040 goto out;
3041 r = amdgpu_resume_phase2(adev);
3042 if (r)
3043 goto out;
0c49e0b8
CZ
3044 if (vram_lost)
3045 amdgpu_fill_reset_magic(adev);
35d782fe 3046 }
d38ceaf9 3047 }
fcf0649f 3048out:
d38ceaf9 3049 if (!r) {
e72cfd58 3050 amdgpu_irq_gpu_reset_resume_helper(adev);
1f465087
CZ
3051 r = amdgpu_ib_ring_tests(adev);
3052 if (r) {
3053 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
40019dc4 3054 r = amdgpu_suspend(adev);
53cdccd5 3055 need_full_reset = true;
40019dc4 3056 goto retry;
1f465087 3057 }
53cdccd5
CZ
3058 /**
3059 * recovery vm page tables, since we cannot depend on VRAM is
3060 * consistent after gpu full reset.
3061 */
3062 if (need_full_reset && amdgpu_need_backup(adev)) {
3063 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
3064 struct amdgpu_bo *bo, *tmp;
f54d1867 3065 struct dma_fence *fence = NULL, *next = NULL;
53cdccd5
CZ
3066
3067 DRM_INFO("recover vram bo from shadow\n");
3068 mutex_lock(&adev->shadow_list_lock);
3069 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 3070 next = NULL;
53cdccd5
CZ
3071 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
3072 if (fence) {
f54d1867 3073 r = dma_fence_wait(fence, false);
53cdccd5 3074 if (r) {
1d7b17b0 3075 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5
CZ
3076 break;
3077 }
3078 }
1f465087 3079
f54d1867 3080 dma_fence_put(fence);
53cdccd5
CZ
3081 fence = next;
3082 }
3083 mutex_unlock(&adev->shadow_list_lock);
3084 if (fence) {
f54d1867 3085 r = dma_fence_wait(fence, false);
53cdccd5 3086 if (r)
1d7b17b0 3087 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5 3088 }
f54d1867 3089 dma_fence_put(fence);
53cdccd5 3090 }
d38ceaf9
AD
3091 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3092 struct amdgpu_ring *ring = adev->rings[i];
51687759
CZ
3093
3094 if (!ring || !ring->sched.thread)
d38ceaf9 3095 continue;
53cdccd5 3096
aa1c8900 3097 amd_sched_job_recovery(&ring->sched);
0875dc9e 3098 kthread_unpark(ring->sched.thread);
d38ceaf9 3099 }
d38ceaf9 3100 } else {
2200edac 3101 dev_err(adev->dev, "asic resume failed (%d).\n", r);
d38ceaf9 3102 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
51687759 3103 if (adev->rings[i] && adev->rings[i]->sched.thread) {
0875dc9e 3104 kthread_unpark(adev->rings[i]->sched.thread);
0875dc9e 3105 }
d38ceaf9
AD
3106 }
3107 }
3108
4562236b
HW
3109 if (amdgpu_device_has_dc_support(adev)) {
3110 r = drm_atomic_helper_resume(adev->ddev, state);
3111 amdgpu_dm_display_resume(adev);
3112 } else
3113 drm_helper_resume_force_mode(adev->ddev);
d38ceaf9
AD
3114
3115 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
89041940 3116 if (r) {
d38ceaf9
AD
3117 /* bad news, how to tell it to userspace ? */
3118 dev_info(adev->dev, "GPU reset failed\n");
89041940
GW
3119 }
3120 else {
6643be65 3121 dev_info(adev->dev, "GPU reset successed!\n");
89041940 3122 }
d38ceaf9 3123
89041940 3124 amdgpu_vf_error_trans_all(adev);
d38ceaf9
AD
3125 return r;
3126}
3127
d0dd7f0c
AD
3128void amdgpu_get_pcie_info(struct amdgpu_device *adev)
3129{
3130 u32 mask;
3131 int ret;
3132
cd474ba0
AD
3133 if (amdgpu_pcie_gen_cap)
3134 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 3135
cd474ba0
AD
3136 if (amdgpu_pcie_lane_cap)
3137 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 3138
cd474ba0
AD
3139 /* covers APUs as well */
3140 if (pci_is_root_bus(adev->pdev->bus)) {
3141 if (adev->pm.pcie_gen_mask == 0)
3142 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3143 if (adev->pm.pcie_mlw_mask == 0)
3144 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 3145 return;
cd474ba0 3146 }
d0dd7f0c 3147
cd474ba0
AD
3148 if (adev->pm.pcie_gen_mask == 0) {
3149 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3150 if (!ret) {
3151 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3152 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3153 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3154
3155 if (mask & DRM_PCIE_SPEED_25)
3156 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3157 if (mask & DRM_PCIE_SPEED_50)
3158 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3159 if (mask & DRM_PCIE_SPEED_80)
3160 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3161 } else {
3162 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3163 }
3164 }
3165 if (adev->pm.pcie_mlw_mask == 0) {
3166 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3167 if (!ret) {
3168 switch (mask) {
3169 case 32:
3170 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3171 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3172 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3173 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3174 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3175 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3176 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3177 break;
3178 case 16:
3179 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3180 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3181 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3182 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3183 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3184 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3185 break;
3186 case 12:
3187 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3188 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3189 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3190 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3191 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3192 break;
3193 case 8:
3194 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3195 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3196 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3197 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3198 break;
3199 case 4:
3200 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3201 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3202 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3203 break;
3204 case 2:
3205 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3206 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3207 break;
3208 case 1:
3209 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3210 break;
3211 default:
3212 break;
3213 }
3214 } else {
3215 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
3216 }
3217 }
3218}
d38ceaf9
AD
3219
3220/*
3221 * Debugfs
3222 */
3223int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
06ab6832 3224 const struct drm_info_list *files,
d38ceaf9
AD
3225 unsigned nfiles)
3226{
3227 unsigned i;
3228
3229 for (i = 0; i < adev->debugfs_count; i++) {
3230 if (adev->debugfs[i].files == files) {
3231 /* Already registered */
3232 return 0;
3233 }
3234 }
3235
3236 i = adev->debugfs_count + 1;
3237 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3238 DRM_ERROR("Reached maximum number of debugfs components.\n");
3239 DRM_ERROR("Report so we increase "
3240 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3241 return -EINVAL;
3242 }
3243 adev->debugfs[adev->debugfs_count].files = files;
3244 adev->debugfs[adev->debugfs_count].num_files = nfiles;
3245 adev->debugfs_count = i;
3246#if defined(CONFIG_DEBUG_FS)
d38ceaf9
AD
3247 drm_debugfs_create_files(files, nfiles,
3248 adev->ddev->primary->debugfs_root,
3249 adev->ddev->primary);
3250#endif
3251 return 0;
3252}
3253
d38ceaf9
AD
3254#if defined(CONFIG_DEBUG_FS)
3255
3256static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3257 size_t size, loff_t *pos)
3258{
45063097 3259 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
3260 ssize_t result = 0;
3261 int r;
bd12267d 3262 bool pm_pg_lock, use_bank;
56628159 3263 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
3264
3265 if (size & 0x3 || *pos & 0x3)
3266 return -EINVAL;
3267
bd12267d
TSD
3268 /* are we reading registers for which a PG lock is necessary? */
3269 pm_pg_lock = (*pos >> 23) & 1;
3270
56628159 3271 if (*pos & (1ULL << 62)) {
0b968650
TSD
3272 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3273 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3274 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
32977f93
TSD
3275
3276 if (se_bank == 0x3FF)
3277 se_bank = 0xFFFFFFFF;
3278 if (sh_bank == 0x3FF)
3279 sh_bank = 0xFFFFFFFF;
3280 if (instance_bank == 0x3FF)
3281 instance_bank = 0xFFFFFFFF;
56628159 3282 use_bank = 1;
56628159
TSD
3283 } else {
3284 use_bank = 0;
3285 }
3286
801a6aa9 3287 *pos &= (1UL << 22) - 1;
bd12267d 3288
56628159 3289 if (use_bank) {
32977f93
TSD
3290 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3291 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
56628159
TSD
3292 return -EINVAL;
3293 mutex_lock(&adev->grbm_idx_mutex);
3294 amdgpu_gfx_select_se_sh(adev, se_bank,
3295 sh_bank, instance_bank);
3296 }
3297
bd12267d
TSD
3298 if (pm_pg_lock)
3299 mutex_lock(&adev->pm.mutex);
3300
d38ceaf9
AD
3301 while (size) {
3302 uint32_t value;
3303
3304 if (*pos > adev->rmmio_size)
56628159 3305 goto end;
d38ceaf9
AD
3306
3307 value = RREG32(*pos >> 2);
3308 r = put_user(value, (uint32_t *)buf);
56628159
TSD
3309 if (r) {
3310 result = r;
3311 goto end;
3312 }
d38ceaf9
AD
3313
3314 result += 4;
3315 buf += 4;
3316 *pos += 4;
3317 size -= 4;
3318 }
3319
56628159
TSD
3320end:
3321 if (use_bank) {
3322 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3323 mutex_unlock(&adev->grbm_idx_mutex);
3324 }
3325
bd12267d
TSD
3326 if (pm_pg_lock)
3327 mutex_unlock(&adev->pm.mutex);
3328
d38ceaf9
AD
3329 return result;
3330}
3331
3332static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3333 size_t size, loff_t *pos)
3334{
45063097 3335 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
3336 ssize_t result = 0;
3337 int r;
394fdde2
TSD
3338 bool pm_pg_lock, use_bank;
3339 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
3340
3341 if (size & 0x3 || *pos & 0x3)
3342 return -EINVAL;
3343
394fdde2
TSD
3344 /* are we reading registers for which a PG lock is necessary? */
3345 pm_pg_lock = (*pos >> 23) & 1;
3346
3347 if (*pos & (1ULL << 62)) {
0b968650
TSD
3348 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3349 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3350 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
394fdde2
TSD
3351
3352 if (se_bank == 0x3FF)
3353 se_bank = 0xFFFFFFFF;
3354 if (sh_bank == 0x3FF)
3355 sh_bank = 0xFFFFFFFF;
3356 if (instance_bank == 0x3FF)
3357 instance_bank = 0xFFFFFFFF;
3358 use_bank = 1;
3359 } else {
3360 use_bank = 0;
3361 }
3362
801a6aa9 3363 *pos &= (1UL << 22) - 1;
394fdde2
TSD
3364
3365 if (use_bank) {
3366 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3367 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3368 return -EINVAL;
3369 mutex_lock(&adev->grbm_idx_mutex);
3370 amdgpu_gfx_select_se_sh(adev, se_bank,
3371 sh_bank, instance_bank);
3372 }
3373
3374 if (pm_pg_lock)
3375 mutex_lock(&adev->pm.mutex);
3376
d38ceaf9
AD
3377 while (size) {
3378 uint32_t value;
3379
3380 if (*pos > adev->rmmio_size)
3381 return result;
3382
3383 r = get_user(value, (uint32_t *)buf);
3384 if (r)
3385 return r;
3386
3387 WREG32(*pos >> 2, value);
3388
3389 result += 4;
3390 buf += 4;
3391 *pos += 4;
3392 size -= 4;
3393 }
3394
394fdde2
TSD
3395 if (use_bank) {
3396 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3397 mutex_unlock(&adev->grbm_idx_mutex);
3398 }
3399
3400 if (pm_pg_lock)
3401 mutex_unlock(&adev->pm.mutex);
3402
d38ceaf9
AD
3403 return result;
3404}
3405
adcec288
TSD
3406static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3407 size_t size, loff_t *pos)
3408{
45063097 3409 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3410 ssize_t result = 0;
3411 int r;
3412
3413 if (size & 0x3 || *pos & 0x3)
3414 return -EINVAL;
3415
3416 while (size) {
3417 uint32_t value;
3418
3419 value = RREG32_PCIE(*pos >> 2);
3420 r = put_user(value, (uint32_t *)buf);
3421 if (r)
3422 return r;
3423
3424 result += 4;
3425 buf += 4;
3426 *pos += 4;
3427 size -= 4;
3428 }
3429
3430 return result;
3431}
3432
3433static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3434 size_t size, loff_t *pos)
3435{
45063097 3436 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3437 ssize_t result = 0;
3438 int r;
3439
3440 if (size & 0x3 || *pos & 0x3)
3441 return -EINVAL;
3442
3443 while (size) {
3444 uint32_t value;
3445
3446 r = get_user(value, (uint32_t *)buf);
3447 if (r)
3448 return r;
3449
3450 WREG32_PCIE(*pos >> 2, value);
3451
3452 result += 4;
3453 buf += 4;
3454 *pos += 4;
3455 size -= 4;
3456 }
3457
3458 return result;
3459}
3460
3461static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3462 size_t size, loff_t *pos)
3463{
45063097 3464 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3465 ssize_t result = 0;
3466 int r;
3467
3468 if (size & 0x3 || *pos & 0x3)
3469 return -EINVAL;
3470
3471 while (size) {
3472 uint32_t value;
3473
3474 value = RREG32_DIDT(*pos >> 2);
3475 r = put_user(value, (uint32_t *)buf);
3476 if (r)
3477 return r;
3478
3479 result += 4;
3480 buf += 4;
3481 *pos += 4;
3482 size -= 4;
3483 }
3484
3485 return result;
3486}
3487
3488static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3489 size_t size, loff_t *pos)
3490{
45063097 3491 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3492 ssize_t result = 0;
3493 int r;
3494
3495 if (size & 0x3 || *pos & 0x3)
3496 return -EINVAL;
3497
3498 while (size) {
3499 uint32_t value;
3500
3501 r = get_user(value, (uint32_t *)buf);
3502 if (r)
3503 return r;
3504
3505 WREG32_DIDT(*pos >> 2, value);
3506
3507 result += 4;
3508 buf += 4;
3509 *pos += 4;
3510 size -= 4;
3511 }
3512
3513 return result;
3514}
3515
3516static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3517 size_t size, loff_t *pos)
3518{
45063097 3519 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3520 ssize_t result = 0;
3521 int r;
3522
3523 if (size & 0x3 || *pos & 0x3)
3524 return -EINVAL;
3525
3526 while (size) {
3527 uint32_t value;
3528
6fc0deaf 3529 value = RREG32_SMC(*pos);
adcec288
TSD
3530 r = put_user(value, (uint32_t *)buf);
3531 if (r)
3532 return r;
3533
3534 result += 4;
3535 buf += 4;
3536 *pos += 4;
3537 size -= 4;
3538 }
3539
3540 return result;
3541}
3542
3543static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3544 size_t size, loff_t *pos)
3545{
45063097 3546 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3547 ssize_t result = 0;
3548 int r;
3549
3550 if (size & 0x3 || *pos & 0x3)
3551 return -EINVAL;
3552
3553 while (size) {
3554 uint32_t value;
3555
3556 r = get_user(value, (uint32_t *)buf);
3557 if (r)
3558 return r;
3559
6fc0deaf 3560 WREG32_SMC(*pos, value);
adcec288
TSD
3561
3562 result += 4;
3563 buf += 4;
3564 *pos += 4;
3565 size -= 4;
3566 }
3567
3568 return result;
3569}
3570
1e051413
TSD
3571static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3572 size_t size, loff_t *pos)
3573{
45063097 3574 struct amdgpu_device *adev = file_inode(f)->i_private;
1e051413
TSD
3575 ssize_t result = 0;
3576 int r;
3577 uint32_t *config, no_regs = 0;
3578
3579 if (size & 0x3 || *pos & 0x3)
3580 return -EINVAL;
3581
ecab7668 3582 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
1e051413
TSD
3583 if (!config)
3584 return -ENOMEM;
3585
3586 /* version, increment each time something is added */
9a999359 3587 config[no_regs++] = 3;
1e051413
TSD
3588 config[no_regs++] = adev->gfx.config.max_shader_engines;
3589 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3590 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3591 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3592 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3593 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3594 config[no_regs++] = adev->gfx.config.max_gprs;
3595 config[no_regs++] = adev->gfx.config.max_gs_threads;
3596 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3597 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3598 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3599 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3600 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3601 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3602 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3603 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3604 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3605 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3606 config[no_regs++] = adev->gfx.config.num_gpus;
3607 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3608 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3609 config[no_regs++] = adev->gfx.config.gb_addr_config;
3610 config[no_regs++] = adev->gfx.config.num_rbs;
3611
89a8f309
TSD
3612 /* rev==1 */
3613 config[no_regs++] = adev->rev_id;
3614 config[no_regs++] = adev->pg_flags;
3615 config[no_regs++] = adev->cg_flags;
3616
e9f11dc8
TSD
3617 /* rev==2 */
3618 config[no_regs++] = adev->family;
3619 config[no_regs++] = adev->external_rev_id;
3620
9a999359
TSD
3621 /* rev==3 */
3622 config[no_regs++] = adev->pdev->device;
3623 config[no_regs++] = adev->pdev->revision;
3624 config[no_regs++] = adev->pdev->subsystem_device;
3625 config[no_regs++] = adev->pdev->subsystem_vendor;
3626
1e051413
TSD
3627 while (size && (*pos < no_regs * 4)) {
3628 uint32_t value;
3629
3630 value = config[*pos >> 2];
3631 r = put_user(value, (uint32_t *)buf);
3632 if (r) {
3633 kfree(config);
3634 return r;
3635 }
3636
3637 result += 4;
3638 buf += 4;
3639 *pos += 4;
3640 size -= 4;
3641 }
3642
3643 kfree(config);
3644 return result;
3645}
3646
f2cdaf20
TSD
3647static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3648 size_t size, loff_t *pos)
3649{
45063097 3650 struct amdgpu_device *adev = file_inode(f)->i_private;
9f8df7d7
TSD
3651 int idx, x, outsize, r, valuesize;
3652 uint32_t values[16];
f2cdaf20 3653
9f8df7d7 3654 if (size & 3 || *pos & 0x3)
f2cdaf20
TSD
3655 return -EINVAL;
3656
3cbc614f
SP
3657 if (amdgpu_dpm == 0)
3658 return -EINVAL;
3659
f2cdaf20
TSD
3660 /* convert offset to sensor number */
3661 idx = *pos >> 2;
3662
9f8df7d7 3663 valuesize = sizeof(values);
f2cdaf20 3664 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
cd4d7464 3665 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
f2cdaf20
TSD
3666 else
3667 return -EINVAL;
3668
9f8df7d7
TSD
3669 if (size > valuesize)
3670 return -EINVAL;
3671
3672 outsize = 0;
3673 x = 0;
3674 if (!r) {
3675 while (size) {
3676 r = put_user(values[x++], (int32_t *)buf);
3677 buf += 4;
3678 size -= 4;
3679 outsize += 4;
3680 }
3681 }
f2cdaf20 3682
9f8df7d7 3683 return !r ? outsize : r;
f2cdaf20 3684}
1e051413 3685
273d7aa1
TSD
3686static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3687 size_t size, loff_t *pos)
3688{
3689 struct amdgpu_device *adev = f->f_inode->i_private;
3690 int r, x;
3691 ssize_t result=0;
472259f0 3692 uint32_t offset, se, sh, cu, wave, simd, data[32];
273d7aa1
TSD
3693
3694 if (size & 3 || *pos & 3)
3695 return -EINVAL;
3696
3697 /* decode offset */
0b968650
TSD
3698 offset = (*pos & GENMASK_ULL(6, 0));
3699 se = (*pos & GENMASK_ULL(14, 7)) >> 7;
3700 sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
3701 cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
3702 wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
3703 simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
273d7aa1
TSD
3704
3705 /* switch to the specific se/sh/cu */
3706 mutex_lock(&adev->grbm_idx_mutex);
3707 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3708
3709 x = 0;
472259f0
TSD
3710 if (adev->gfx.funcs->read_wave_data)
3711 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
273d7aa1
TSD
3712
3713 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3714 mutex_unlock(&adev->grbm_idx_mutex);
3715
5ecfb3b8
TSD
3716 if (!x)
3717 return -EINVAL;
3718
472259f0 3719 while (size && (offset < x * 4)) {
273d7aa1
TSD
3720 uint32_t value;
3721
472259f0 3722 value = data[offset >> 2];
273d7aa1
TSD
3723 r = put_user(value, (uint32_t *)buf);
3724 if (r)
3725 return r;
3726
3727 result += 4;
3728 buf += 4;
472259f0 3729 offset += 4;
273d7aa1
TSD
3730 size -= 4;
3731 }
3732
3733 return result;
3734}
3735
c5a60ce8
TSD
3736static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3737 size_t size, loff_t *pos)
3738{
3739 struct amdgpu_device *adev = f->f_inode->i_private;
3740 int r;
3741 ssize_t result = 0;
3742 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3743
3744 if (size & 3 || *pos & 3)
3745 return -EINVAL;
3746
3747 /* decode offset */
0b968650
TSD
3748 offset = *pos & GENMASK_ULL(11, 0);
3749 se = (*pos & GENMASK_ULL(19, 12)) >> 12;
3750 sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
3751 cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
3752 wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
3753 simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
3754 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
3755 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
c5a60ce8
TSD
3756
3757 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3758 if (!data)
3759 return -ENOMEM;
3760
3761 /* switch to the specific se/sh/cu */
3762 mutex_lock(&adev->grbm_idx_mutex);
3763 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3764
3765 if (bank == 0) {
3766 if (adev->gfx.funcs->read_wave_vgprs)
3767 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3768 } else {
3769 if (adev->gfx.funcs->read_wave_sgprs)
3770 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3771 }
3772
3773 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3774 mutex_unlock(&adev->grbm_idx_mutex);
3775
3776 while (size) {
3777 uint32_t value;
3778
3779 value = data[offset++];
3780 r = put_user(value, (uint32_t *)buf);
3781 if (r) {
3782 result = r;
3783 goto err;
3784 }
3785
3786 result += 4;
3787 buf += 4;
3788 size -= 4;
3789 }
3790
3791err:
3792 kfree(data);
3793 return result;
3794}
3795
d38ceaf9
AD
3796static const struct file_operations amdgpu_debugfs_regs_fops = {
3797 .owner = THIS_MODULE,
3798 .read = amdgpu_debugfs_regs_read,
3799 .write = amdgpu_debugfs_regs_write,
3800 .llseek = default_llseek
3801};
adcec288
TSD
3802static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3803 .owner = THIS_MODULE,
3804 .read = amdgpu_debugfs_regs_didt_read,
3805 .write = amdgpu_debugfs_regs_didt_write,
3806 .llseek = default_llseek
3807};
3808static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3809 .owner = THIS_MODULE,
3810 .read = amdgpu_debugfs_regs_pcie_read,
3811 .write = amdgpu_debugfs_regs_pcie_write,
3812 .llseek = default_llseek
3813};
3814static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3815 .owner = THIS_MODULE,
3816 .read = amdgpu_debugfs_regs_smc_read,
3817 .write = amdgpu_debugfs_regs_smc_write,
3818 .llseek = default_llseek
3819};
3820
1e051413
TSD
3821static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3822 .owner = THIS_MODULE,
3823 .read = amdgpu_debugfs_gca_config_read,
3824 .llseek = default_llseek
3825};
3826
f2cdaf20
TSD
3827static const struct file_operations amdgpu_debugfs_sensors_fops = {
3828 .owner = THIS_MODULE,
3829 .read = amdgpu_debugfs_sensor_read,
3830 .llseek = default_llseek
3831};
3832
273d7aa1
TSD
3833static const struct file_operations amdgpu_debugfs_wave_fops = {
3834 .owner = THIS_MODULE,
3835 .read = amdgpu_debugfs_wave_read,
3836 .llseek = default_llseek
3837};
c5a60ce8
TSD
3838static const struct file_operations amdgpu_debugfs_gpr_fops = {
3839 .owner = THIS_MODULE,
3840 .read = amdgpu_debugfs_gpr_read,
3841 .llseek = default_llseek
3842};
273d7aa1 3843
adcec288
TSD
3844static const struct file_operations *debugfs_regs[] = {
3845 &amdgpu_debugfs_regs_fops,
3846 &amdgpu_debugfs_regs_didt_fops,
3847 &amdgpu_debugfs_regs_pcie_fops,
3848 &amdgpu_debugfs_regs_smc_fops,
1e051413 3849 &amdgpu_debugfs_gca_config_fops,
f2cdaf20 3850 &amdgpu_debugfs_sensors_fops,
273d7aa1 3851 &amdgpu_debugfs_wave_fops,
c5a60ce8 3852 &amdgpu_debugfs_gpr_fops,
adcec288
TSD
3853};
3854
3855static const char *debugfs_regs_names[] = {
3856 "amdgpu_regs",
3857 "amdgpu_regs_didt",
3858 "amdgpu_regs_pcie",
3859 "amdgpu_regs_smc",
1e051413 3860 "amdgpu_gca_config",
f2cdaf20 3861 "amdgpu_sensors",
273d7aa1 3862 "amdgpu_wave",
c5a60ce8 3863 "amdgpu_gpr",
adcec288 3864};
d38ceaf9
AD
3865
3866static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3867{
3868 struct drm_minor *minor = adev->ddev->primary;
3869 struct dentry *ent, *root = minor->debugfs_root;
adcec288
TSD
3870 unsigned i, j;
3871
3872 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3873 ent = debugfs_create_file(debugfs_regs_names[i],
3874 S_IFREG | S_IRUGO, root,
3875 adev, debugfs_regs[i]);
3876 if (IS_ERR(ent)) {
3877 for (j = 0; j < i; j++) {
3878 debugfs_remove(adev->debugfs_regs[i]);
3879 adev->debugfs_regs[i] = NULL;
3880 }
3881 return PTR_ERR(ent);
3882 }
d38ceaf9 3883
adcec288
TSD
3884 if (!i)
3885 i_size_write(ent->d_inode, adev->rmmio_size);
3886 adev->debugfs_regs[i] = ent;
3887 }
d38ceaf9
AD
3888
3889 return 0;
3890}
3891
3892static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3893{
adcec288
TSD
3894 unsigned i;
3895
3896 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3897 if (adev->debugfs_regs[i]) {
3898 debugfs_remove(adev->debugfs_regs[i]);
3899 adev->debugfs_regs[i] = NULL;
3900 }
3901 }
d38ceaf9
AD
3902}
3903
4f0955fc
HR
3904static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3905{
3906 struct drm_info_node *node = (struct drm_info_node *) m->private;
3907 struct drm_device *dev = node->minor->dev;
3908 struct amdgpu_device *adev = dev->dev_private;
3909 int r = 0, i;
3910
3911 /* hold on the scheduler */
3912 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3913 struct amdgpu_ring *ring = adev->rings[i];
3914
3915 if (!ring || !ring->sched.thread)
3916 continue;
3917 kthread_park(ring->sched.thread);
3918 }
3919
3920 seq_printf(m, "run ib test:\n");
3921 r = amdgpu_ib_ring_tests(adev);
3922 if (r)
3923 seq_printf(m, "ib ring tests failed (%d).\n", r);
3924 else
3925 seq_printf(m, "ib ring tests passed.\n");
3926
3927 /* go on the scheduler */
3928 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3929 struct amdgpu_ring *ring = adev->rings[i];
3930
3931 if (!ring || !ring->sched.thread)
3932 continue;
3933 kthread_unpark(ring->sched.thread);
3934 }
3935
3936 return 0;
3937}
3938
3939static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
3940 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
3941};
3942
3943static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3944{
3945 return amdgpu_debugfs_add_files(adev,
3946 amdgpu_debugfs_test_ib_ring_list, 1);
3947}
3948
d38ceaf9
AD
3949int amdgpu_debugfs_init(struct drm_minor *minor)
3950{
3951 return 0;
3952}
db95e218
KR
3953
3954static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
3955{
3956 struct drm_info_node *node = (struct drm_info_node *) m->private;
3957 struct drm_device *dev = node->minor->dev;
3958 struct amdgpu_device *adev = dev->dev_private;
3959
3960 seq_write(m, adev->bios, adev->bios_size);
3961 return 0;
3962}
3963
db95e218
KR
3964static const struct drm_info_list amdgpu_vbios_dump_list[] = {
3965 {"amdgpu_vbios",
3966 amdgpu_debugfs_get_vbios_dump,
3967 0, NULL},
3968};
3969
db95e218
KR
3970static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3971{
3972 return amdgpu_debugfs_add_files(adev,
3973 amdgpu_vbios_dump_list, 1);
3974}
7cebc728 3975#else
27bad5b9 3976static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
4f0955fc
HR
3977{
3978 return 0;
3979}
7cebc728
AK
3980static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3981{
3982 return 0;
3983}
db95e218
KR
3984static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3985{
3986 return 0;
3987}
7cebc728 3988static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
d38ceaf9 3989#endif