drm/amdgpu: refine SR-IOV firmware VRAM reservation to protect data
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
4562236b 34#include <drm/drm_atomic_helper.h>
d38ceaf9
AD
35#include <drm/amdgpu_drm.h>
36#include <linux/vgaarb.h>
37#include <linux/vga_switcheroo.h>
38#include <linux/efi.h>
39#include "amdgpu.h"
f4b373f4 40#include "amdgpu_trace.h"
d38ceaf9
AD
41#include "amdgpu_i2c.h"
42#include "atom.h"
43#include "amdgpu_atombios.h"
a5bde2f9 44#include "amdgpu_atomfirmware.h"
d0dd7f0c 45#include "amd_pcie.h"
33f34802
KW
46#ifdef CONFIG_DRM_AMDGPU_SI
47#include "si.h"
48#endif
a2e73f56
AD
49#ifdef CONFIG_DRM_AMDGPU_CIK
50#include "cik.h"
51#endif
aaa36a97 52#include "vi.h"
460826e6 53#include "soc15.h"
d38ceaf9 54#include "bif/bif_4_1_d.h"
9accf2fd 55#include <linux/pci.h>
bec86378 56#include <linux/firmware.h>
89041940 57#include "amdgpu_vf_error.h"
d38ceaf9 58
ba997709 59#include "amdgpu_amdkfd.h"
d2f52ac8 60#include "amdgpu_pm.h"
d38ceaf9 61
e2a75f88 62MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
2d2e5e7e 63MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
e2a75f88 64
2dc80b00
S
65#define AMDGPU_RESUME_MS 2000
66
d38ceaf9
AD
67static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
68static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
4f0955fc 69static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
db95e218 70static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
d38ceaf9
AD
71
72static const char *amdgpu_asic_name[] = {
da69c161
KW
73 "TAHITI",
74 "PITCAIRN",
75 "VERDE",
76 "OLAND",
77 "HAINAN",
d38ceaf9
AD
78 "BONAIRE",
79 "KAVERI",
80 "KABINI",
81 "HAWAII",
82 "MULLINS",
83 "TOPAZ",
84 "TONGA",
48299f95 85 "FIJI",
d38ceaf9 86 "CARRIZO",
139f4917 87 "STONEY",
2cc0c0b5
FC
88 "POLARIS10",
89 "POLARIS11",
c4642a47 90 "POLARIS12",
d4196f01 91 "VEGA10",
2ca8a5d2 92 "RAVEN",
d38ceaf9
AD
93 "LAST",
94};
95
96bool amdgpu_device_is_px(struct drm_device *dev)
97{
98 struct amdgpu_device *adev = dev->dev_private;
99
2f7d10b3 100 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
101 return true;
102 return false;
103}
104
105/*
106 * MMIO register access helper functions.
107 */
108uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 109 uint32_t acc_flags)
d38ceaf9 110{
f4b373f4
TSD
111 uint32_t ret;
112
43ca8efa 113 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 114 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 115
15d72fd7 116 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 117 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
118 else {
119 unsigned long flags;
d38ceaf9
AD
120
121 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
122 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
123 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
124 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 125 }
f4b373f4
TSD
126 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
127 return ret;
d38ceaf9
AD
128}
129
130void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 131 uint32_t acc_flags)
d38ceaf9 132{
f4b373f4 133 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 134
47ed4e1c
KW
135 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
136 adev->last_mm_index = v;
137 }
138
43ca8efa 139 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 140 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 141
15d72fd7 142 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
143 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
144 else {
145 unsigned long flags;
146
147 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
148 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
149 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
150 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
151 }
47ed4e1c
KW
152
153 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
154 udelay(500);
155 }
d38ceaf9
AD
156}
157
158u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
159{
160 if ((reg * 4) < adev->rio_mem_size)
161 return ioread32(adev->rio_mem + (reg * 4));
162 else {
163 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
164 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
165 }
166}
167
168void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
169{
47ed4e1c
KW
170 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
171 adev->last_mm_index = v;
172 }
d38ceaf9
AD
173
174 if ((reg * 4) < adev->rio_mem_size)
175 iowrite32(v, adev->rio_mem + (reg * 4));
176 else {
177 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
178 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
179 }
47ed4e1c
KW
180
181 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
182 udelay(500);
183 }
d38ceaf9
AD
184}
185
186/**
187 * amdgpu_mm_rdoorbell - read a doorbell dword
188 *
189 * @adev: amdgpu_device pointer
190 * @index: doorbell index
191 *
192 * Returns the value in the doorbell aperture at the
193 * requested doorbell index (CIK).
194 */
195u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
196{
197 if (index < adev->doorbell.num_doorbells) {
198 return readl(adev->doorbell.ptr + index);
199 } else {
200 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
201 return 0;
202 }
203}
204
205/**
206 * amdgpu_mm_wdoorbell - write a doorbell dword
207 *
208 * @adev: amdgpu_device pointer
209 * @index: doorbell index
210 * @v: value to write
211 *
212 * Writes @v to the doorbell aperture at the
213 * requested doorbell index (CIK).
214 */
215void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
216{
217 if (index < adev->doorbell.num_doorbells) {
218 writel(v, adev->doorbell.ptr + index);
219 } else {
220 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
221 }
222}
223
832be404
KW
224/**
225 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
226 *
227 * @adev: amdgpu_device pointer
228 * @index: doorbell index
229 *
230 * Returns the value in the doorbell aperture at the
231 * requested doorbell index (VEGA10+).
232 */
233u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
234{
235 if (index < adev->doorbell.num_doorbells) {
236 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
237 } else {
238 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
239 return 0;
240 }
241}
242
243/**
244 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
245 *
246 * @adev: amdgpu_device pointer
247 * @index: doorbell index
248 * @v: value to write
249 *
250 * Writes @v to the doorbell aperture at the
251 * requested doorbell index (VEGA10+).
252 */
253void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
254{
255 if (index < adev->doorbell.num_doorbells) {
256 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
257 } else {
258 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
259 }
260}
261
d38ceaf9
AD
262/**
263 * amdgpu_invalid_rreg - dummy reg read function
264 *
265 * @adev: amdgpu device pointer
266 * @reg: offset of register
267 *
268 * Dummy register read function. Used for register blocks
269 * that certain asics don't have (all asics).
270 * Returns the value in the register.
271 */
272static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
273{
274 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
275 BUG();
276 return 0;
277}
278
279/**
280 * amdgpu_invalid_wreg - dummy reg write function
281 *
282 * @adev: amdgpu device pointer
283 * @reg: offset of register
284 * @v: value to write to the register
285 *
286 * Dummy register read function. Used for register blocks
287 * that certain asics don't have (all asics).
288 */
289static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
290{
291 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
292 reg, v);
293 BUG();
294}
295
296/**
297 * amdgpu_block_invalid_rreg - dummy reg read function
298 *
299 * @adev: amdgpu device pointer
300 * @block: offset of instance
301 * @reg: offset of register
302 *
303 * Dummy register read function. Used for register blocks
304 * that certain asics don't have (all asics).
305 * Returns the value in the register.
306 */
307static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
308 uint32_t block, uint32_t reg)
309{
310 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
311 reg, block);
312 BUG();
313 return 0;
314}
315
316/**
317 * amdgpu_block_invalid_wreg - dummy reg write function
318 *
319 * @adev: amdgpu device pointer
320 * @block: offset of instance
321 * @reg: offset of register
322 * @v: value to write to the register
323 *
324 * Dummy register read function. Used for register blocks
325 * that certain asics don't have (all asics).
326 */
327static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
328 uint32_t block,
329 uint32_t reg, uint32_t v)
330{
331 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
332 reg, block, v);
333 BUG();
334}
335
336static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
337{
a4a02777
CK
338 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
339 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
340 &adev->vram_scratch.robj,
341 &adev->vram_scratch.gpu_addr,
342 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
343}
344
345static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
346{
078af1a3 347 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
348}
349
350/**
351 * amdgpu_program_register_sequence - program an array of registers.
352 *
353 * @adev: amdgpu_device pointer
354 * @registers: pointer to the register array
355 * @array_size: size of the register array
356 *
357 * Programs an array or registers with and and or masks.
358 * This is a helper for setting golden registers.
359 */
360void amdgpu_program_register_sequence(struct amdgpu_device *adev,
361 const u32 *registers,
362 const u32 array_size)
363{
364 u32 tmp, reg, and_mask, or_mask;
365 int i;
366
367 if (array_size % 3)
368 return;
369
370 for (i = 0; i < array_size; i +=3) {
371 reg = registers[i + 0];
372 and_mask = registers[i + 1];
373 or_mask = registers[i + 2];
374
375 if (and_mask == 0xffffffff) {
376 tmp = or_mask;
377 } else {
378 tmp = RREG32(reg);
379 tmp &= ~and_mask;
380 tmp |= or_mask;
381 }
382 WREG32(reg, tmp);
383 }
384}
385
386void amdgpu_pci_config_reset(struct amdgpu_device *adev)
387{
388 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
389}
390
391/*
392 * GPU doorbell aperture helpers function.
393 */
394/**
395 * amdgpu_doorbell_init - Init doorbell driver information.
396 *
397 * @adev: amdgpu_device pointer
398 *
399 * Init doorbell driver information (CIK)
400 * Returns 0 on success, error on failure.
401 */
402static int amdgpu_doorbell_init(struct amdgpu_device *adev)
403{
705e519e
CK
404 /* No doorbell on SI hardware generation */
405 if (adev->asic_type < CHIP_BONAIRE) {
406 adev->doorbell.base = 0;
407 adev->doorbell.size = 0;
408 adev->doorbell.num_doorbells = 0;
409 adev->doorbell.ptr = NULL;
410 return 0;
411 }
412
d38ceaf9
AD
413 /* doorbell bar mapping */
414 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
415 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
416
edf600da 417 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
418 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
419 if (adev->doorbell.num_doorbells == 0)
420 return -EINVAL;
421
8972e5d2
CK
422 adev->doorbell.ptr = ioremap(adev->doorbell.base,
423 adev->doorbell.num_doorbells *
424 sizeof(u32));
425 if (adev->doorbell.ptr == NULL)
d38ceaf9 426 return -ENOMEM;
d38ceaf9
AD
427
428 return 0;
429}
430
431/**
432 * amdgpu_doorbell_fini - Tear down doorbell driver information.
433 *
434 * @adev: amdgpu_device pointer
435 *
436 * Tear down doorbell driver information (CIK)
437 */
438static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
439{
440 iounmap(adev->doorbell.ptr);
441 adev->doorbell.ptr = NULL;
442}
443
444/**
445 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
446 * setup amdkfd
447 *
448 * @adev: amdgpu_device pointer
449 * @aperture_base: output returning doorbell aperture base physical address
450 * @aperture_size: output returning doorbell aperture size in bytes
451 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
452 *
453 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
454 * takes doorbells required for its own rings and reports the setup to amdkfd.
455 * amdgpu reserved doorbells are at the start of the doorbell aperture.
456 */
457void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
458 phys_addr_t *aperture_base,
459 size_t *aperture_size,
460 size_t *start_offset)
461{
462 /*
463 * The first num_doorbells are used by amdgpu.
464 * amdkfd takes whatever's left in the aperture.
465 */
466 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
467 *aperture_base = adev->doorbell.base;
468 *aperture_size = adev->doorbell.size;
469 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
470 } else {
471 *aperture_base = 0;
472 *aperture_size = 0;
473 *start_offset = 0;
474 }
475}
476
477/*
478 * amdgpu_wb_*()
455a7bc2 479 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 480 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
481 */
482
483/**
484 * amdgpu_wb_fini - Disable Writeback and free memory
485 *
486 * @adev: amdgpu_device pointer
487 *
488 * Disables Writeback and frees the Writeback memory (all asics).
489 * Used at driver shutdown.
490 */
491static void amdgpu_wb_fini(struct amdgpu_device *adev)
492{
493 if (adev->wb.wb_obj) {
a76ed485
AD
494 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
495 &adev->wb.gpu_addr,
496 (void **)&adev->wb.wb);
d38ceaf9
AD
497 adev->wb.wb_obj = NULL;
498 }
499}
500
501/**
502 * amdgpu_wb_init- Init Writeback driver info and allocate memory
503 *
504 * @adev: amdgpu_device pointer
505 *
455a7bc2 506 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
507 * Used at driver startup.
508 * Returns 0 on success or an -error on failure.
509 */
510static int amdgpu_wb_init(struct amdgpu_device *adev)
511{
512 int r;
513
514 if (adev->wb.wb_obj == NULL) {
97407b63
AD
515 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
516 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
517 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
518 &adev->wb.wb_obj, &adev->wb.gpu_addr,
519 (void **)&adev->wb.wb);
d38ceaf9
AD
520 if (r) {
521 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
522 return r;
523 }
d38ceaf9
AD
524
525 adev->wb.num_wb = AMDGPU_MAX_WB;
526 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
527
528 /* clear wb memory */
60a970a6 529 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
d38ceaf9
AD
530 }
531
532 return 0;
533}
534
535/**
536 * amdgpu_wb_get - Allocate a wb entry
537 *
538 * @adev: amdgpu_device pointer
539 * @wb: wb index
540 *
541 * Allocate a wb slot for use by the driver (all asics).
542 * Returns 0 on success or -EINVAL on failure.
543 */
544int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
545{
546 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 547
97407b63 548 if (offset < adev->wb.num_wb) {
7014285a 549 __set_bit(offset, adev->wb.used);
63ae07ca 550 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
551 return 0;
552 } else {
553 return -EINVAL;
554 }
555}
556
d38ceaf9
AD
557/**
558 * amdgpu_wb_free - Free a wb entry
559 *
560 * @adev: amdgpu_device pointer
561 * @wb: wb index
562 *
563 * Free a wb slot allocated for use by the driver (all asics)
564 */
565void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
566{
567 if (wb < adev->wb.num_wb)
63ae07ca 568 __clear_bit(wb >> 3, adev->wb.used);
d38ceaf9
AD
569}
570
571/**
572 * amdgpu_vram_location - try to find VRAM location
573 * @adev: amdgpu device structure holding all necessary informations
574 * @mc: memory controller structure holding memory informations
575 * @base: base address at which to put VRAM
576 *
455a7bc2 577 * Function will try to place VRAM at base address provided
d38ceaf9
AD
578 * as parameter (which is so far either PCI aperture address or
579 * for IGP TOM base address).
580 *
581 * If there is not enough space to fit the unvisible VRAM in the 32bits
582 * address space then we limit the VRAM size to the aperture.
583 *
584 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
585 * this shouldn't be a problem as we are using the PCI aperture as a reference.
586 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
587 * not IGP.
588 *
589 * Note: we use mc_vram_size as on some board we need to program the mc to
590 * cover the whole aperture even if VRAM size is inferior to aperture size
591 * Novell bug 204882 + along with lots of ubuntu ones
592 *
593 * Note: when limiting vram it's safe to overwritte real_vram_size because
594 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
595 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
596 * ones)
597 *
598 * Note: IGP TOM addr should be the same as the aperture addr, we don't
455a7bc2 599 * explicitly check for that though.
d38ceaf9
AD
600 *
601 * FIXME: when reducing VRAM size align new size on power of 2.
602 */
603void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
604{
605 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
606
607 mc->vram_start = base;
608 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
609 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
610 mc->real_vram_size = mc->aper_size;
611 mc->mc_vram_size = mc->aper_size;
612 }
613 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
614 if (limit && limit < mc->real_vram_size)
615 mc->real_vram_size = limit;
616 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
617 mc->mc_vram_size >> 20, mc->vram_start,
618 mc->vram_end, mc->real_vram_size >> 20);
619}
620
621/**
6f02a696 622 * amdgpu_gart_location - try to find GTT location
d38ceaf9
AD
623 * @adev: amdgpu device structure holding all necessary informations
624 * @mc: memory controller structure holding memory informations
625 *
626 * Function will place try to place GTT before or after VRAM.
627 *
628 * If GTT size is bigger than space left then we ajust GTT size.
629 * Thus function will never fails.
630 *
631 * FIXME: when reducing GTT size align new size on power of 2.
632 */
6f02a696 633void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
d38ceaf9
AD
634{
635 u64 size_af, size_bf;
636
ed21c047
CK
637 size_af = adev->mc.mc_mask - mc->vram_end;
638 size_bf = mc->vram_start;
d38ceaf9 639 if (size_bf > size_af) {
6f02a696 640 if (mc->gart_size > size_bf) {
d38ceaf9 641 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 642 mc->gart_size = size_bf;
d38ceaf9 643 }
6f02a696 644 mc->gart_start = 0;
d38ceaf9 645 } else {
6f02a696 646 if (mc->gart_size > size_af) {
d38ceaf9 647 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 648 mc->gart_size = size_af;
d38ceaf9 649 }
6f02a696 650 mc->gart_start = mc->vram_end + 1;
d38ceaf9 651 }
6f02a696 652 mc->gart_end = mc->gart_start + mc->gart_size - 1;
d38ceaf9 653 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6f02a696 654 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
d38ceaf9
AD
655}
656
a05502e5
HC
657/*
658 * Firmware Reservation functions
659 */
660/**
661 * amdgpu_fw_reserve_vram_fini - free fw reserved vram
662 *
663 * @adev: amdgpu_device pointer
664 *
665 * free fw reserved vram if it has been reserved.
666 */
667void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
668{
669 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
670 NULL, &adev->fw_vram_usage.va);
671}
672
673/**
674 * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
675 *
676 * @adev: amdgpu_device pointer
677 *
678 * create bo vram reservation from fw.
679 */
680int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
681{
682 int r = 0;
3c738893 683 int i;
a05502e5
HC
684 u64 gpu_addr;
685 u64 vram_size = adev->mc.visible_vram_size;
3c738893
HC
686 u64 offset = adev->fw_vram_usage.start_offset;
687 u64 size = adev->fw_vram_usage.size;
688 struct amdgpu_bo *bo;
a05502e5
HC
689
690 adev->fw_vram_usage.va = NULL;
691 adev->fw_vram_usage.reserved_bo = NULL;
692
693 if (adev->fw_vram_usage.size > 0 &&
694 adev->fw_vram_usage.size <= vram_size) {
695
696 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
3c738893 697 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
a05502e5
HC
698 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
699 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
700 &adev->fw_vram_usage.reserved_bo);
701 if (r)
702 goto error_create;
703
704 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
705 if (r)
706 goto error_reserve;
3c738893
HC
707
708 /* remove the original mem node and create a new one at the
709 * request position
710 */
711 bo = adev->fw_vram_usage.reserved_bo;
712 offset = ALIGN(offset, PAGE_SIZE);
713 for (i = 0; i < bo->placement.num_placement; ++i) {
714 bo->placements[i].fpfn = offset >> PAGE_SHIFT;
715 bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
716 }
717
718 ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
719 r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem,
720 false, false);
721 if (r)
722 goto error_pin;
723
a05502e5
HC
724 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
725 AMDGPU_GEM_DOMAIN_VRAM,
726 adev->fw_vram_usage.start_offset,
727 (adev->fw_vram_usage.start_offset +
728 adev->fw_vram_usage.size), &gpu_addr);
729 if (r)
730 goto error_pin;
731 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
732 &adev->fw_vram_usage.va);
733 if (r)
734 goto error_kmap;
735
736 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
737 }
738 return r;
739
740error_kmap:
741 amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
742error_pin:
743 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
744error_reserve:
745 amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
746error_create:
747 adev->fw_vram_usage.va = NULL;
748 adev->fw_vram_usage.reserved_bo = NULL;
749 return r;
750}
751
752
d38ceaf9
AD
753/*
754 * GPU helpers function.
755 */
756/**
c836fec5 757 * amdgpu_need_post - check if the hw need post or not
d38ceaf9
AD
758 *
759 * @adev: amdgpu_device pointer
760 *
c836fec5
JQ
761 * Check if the asic has been initialized (all asics) at driver startup
762 * or post is needed if hw reset is performed.
763 * Returns true if need or false if not.
d38ceaf9 764 */
c836fec5 765bool amdgpu_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
766{
767 uint32_t reg;
768
bec86378
ML
769 if (amdgpu_sriov_vf(adev))
770 return false;
771
772 if (amdgpu_passthrough(adev)) {
1da2c326
ML
773 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
774 * some old smc fw still need driver do vPost otherwise gpu hang, while
775 * those smc fw version above 22.15 doesn't have this flaw, so we force
776 * vpost executed for smc version below 22.15
bec86378
ML
777 */
778 if (adev->asic_type == CHIP_FIJI) {
779 int err;
780 uint32_t fw_ver;
781 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
782 /* force vPost if error occured */
783 if (err)
784 return true;
785
786 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
787 if (fw_ver < 0x00160e00)
788 return true;
bec86378 789 }
bec86378 790 }
91fe77eb 791
792 if (adev->has_hw_reset) {
793 adev->has_hw_reset = false;
794 return true;
795 }
796
797 /* bios scratch used on CIK+ */
798 if (adev->asic_type >= CHIP_BONAIRE)
799 return amdgpu_atombios_scratch_need_asic_init(adev);
800
801 /* check MEM_SIZE for older asics */
802 reg = amdgpu_asic_get_config_memsize(adev);
803
804 if ((reg != 0) && (reg != 0xffffffff))
805 return false;
806
807 return true;
bec86378
ML
808}
809
d38ceaf9
AD
810/**
811 * amdgpu_dummy_page_init - init dummy page used by the driver
812 *
813 * @adev: amdgpu_device pointer
814 *
815 * Allocate the dummy page used by the driver (all asics).
816 * This dummy page is used by the driver as a filler for gart entries
817 * when pages are taken out of the GART
818 * Returns 0 on sucess, -ENOMEM on failure.
819 */
820int amdgpu_dummy_page_init(struct amdgpu_device *adev)
821{
822 if (adev->dummy_page.page)
823 return 0;
824 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
825 if (adev->dummy_page.page == NULL)
826 return -ENOMEM;
827 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
828 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
829 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
830 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
831 __free_page(adev->dummy_page.page);
832 adev->dummy_page.page = NULL;
833 return -ENOMEM;
834 }
835 return 0;
836}
837
838/**
839 * amdgpu_dummy_page_fini - free dummy page used by the driver
840 *
841 * @adev: amdgpu_device pointer
842 *
843 * Frees the dummy page used by the driver (all asics).
844 */
845void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
846{
847 if (adev->dummy_page.page == NULL)
848 return;
849 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
850 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
851 __free_page(adev->dummy_page.page);
852 adev->dummy_page.page = NULL;
853}
854
855
856/* ATOM accessor methods */
857/*
858 * ATOM is an interpreted byte code stored in tables in the vbios. The
859 * driver registers callbacks to access registers and the interpreter
860 * in the driver parses the tables and executes then to program specific
861 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
862 * atombios.h, and atom.c
863 */
864
865/**
866 * cail_pll_read - read PLL register
867 *
868 * @info: atom card_info pointer
869 * @reg: PLL register offset
870 *
871 * Provides a PLL register accessor for the atom interpreter (r4xx+).
872 * Returns the value of the PLL register.
873 */
874static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
875{
876 return 0;
877}
878
879/**
880 * cail_pll_write - write PLL register
881 *
882 * @info: atom card_info pointer
883 * @reg: PLL register offset
884 * @val: value to write to the pll register
885 *
886 * Provides a PLL register accessor for the atom interpreter (r4xx+).
887 */
888static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
889{
890
891}
892
893/**
894 * cail_mc_read - read MC (Memory Controller) register
895 *
896 * @info: atom card_info pointer
897 * @reg: MC register offset
898 *
899 * Provides an MC register accessor for the atom interpreter (r4xx+).
900 * Returns the value of the MC register.
901 */
902static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
903{
904 return 0;
905}
906
907/**
908 * cail_mc_write - write MC (Memory Controller) register
909 *
910 * @info: atom card_info pointer
911 * @reg: MC register offset
912 * @val: value to write to the pll register
913 *
914 * Provides a MC register accessor for the atom interpreter (r4xx+).
915 */
916static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
917{
918
919}
920
921/**
922 * cail_reg_write - write MMIO register
923 *
924 * @info: atom card_info pointer
925 * @reg: MMIO register offset
926 * @val: value to write to the pll register
927 *
928 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
929 */
930static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
931{
932 struct amdgpu_device *adev = info->dev->dev_private;
933
934 WREG32(reg, val);
935}
936
937/**
938 * cail_reg_read - read MMIO register
939 *
940 * @info: atom card_info pointer
941 * @reg: MMIO register offset
942 *
943 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
944 * Returns the value of the MMIO register.
945 */
946static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
947{
948 struct amdgpu_device *adev = info->dev->dev_private;
949 uint32_t r;
950
951 r = RREG32(reg);
952 return r;
953}
954
955/**
956 * cail_ioreg_write - write IO register
957 *
958 * @info: atom card_info pointer
959 * @reg: IO register offset
960 * @val: value to write to the pll register
961 *
962 * Provides a IO register accessor for the atom interpreter (r4xx+).
963 */
964static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
965{
966 struct amdgpu_device *adev = info->dev->dev_private;
967
968 WREG32_IO(reg, val);
969}
970
971/**
972 * cail_ioreg_read - read IO register
973 *
974 * @info: atom card_info pointer
975 * @reg: IO register offset
976 *
977 * Provides an IO register accessor for the atom interpreter (r4xx+).
978 * Returns the value of the IO register.
979 */
980static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
981{
982 struct amdgpu_device *adev = info->dev->dev_private;
983 uint32_t r;
984
985 r = RREG32_IO(reg);
986 return r;
987}
988
5b41d94c
KR
989static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
990 struct device_attribute *attr,
991 char *buf)
992{
993 struct drm_device *ddev = dev_get_drvdata(dev);
994 struct amdgpu_device *adev = ddev->dev_private;
995 struct atom_context *ctx = adev->mode_info.atom_context;
996
997 return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
998}
999
1000static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
1001 NULL);
1002
d38ceaf9
AD
1003/**
1004 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
1005 *
1006 * @adev: amdgpu_device pointer
1007 *
1008 * Frees the driver info and register access callbacks for the ATOM
1009 * interpreter (r4xx+).
1010 * Called at driver shutdown.
1011 */
1012static void amdgpu_atombios_fini(struct amdgpu_device *adev)
1013{
89e0ec9f 1014 if (adev->mode_info.atom_context) {
d38ceaf9 1015 kfree(adev->mode_info.atom_context->scratch);
89e0ec9f
ML
1016 kfree(adev->mode_info.atom_context->iio);
1017 }
d38ceaf9
AD
1018 kfree(adev->mode_info.atom_context);
1019 adev->mode_info.atom_context = NULL;
1020 kfree(adev->mode_info.atom_card_info);
1021 adev->mode_info.atom_card_info = NULL;
5b41d94c 1022 device_remove_file(adev->dev, &dev_attr_vbios_version);
d38ceaf9
AD
1023}
1024
1025/**
1026 * amdgpu_atombios_init - init the driver info and callbacks for atombios
1027 *
1028 * @adev: amdgpu_device pointer
1029 *
1030 * Initializes the driver info and register access callbacks for the
1031 * ATOM interpreter (r4xx+).
1032 * Returns 0 on sucess, -ENOMEM on failure.
1033 * Called at driver startup.
1034 */
1035static int amdgpu_atombios_init(struct amdgpu_device *adev)
1036{
1037 struct card_info *atom_card_info =
1038 kzalloc(sizeof(struct card_info), GFP_KERNEL);
5b41d94c 1039 int ret;
d38ceaf9
AD
1040
1041 if (!atom_card_info)
1042 return -ENOMEM;
1043
1044 adev->mode_info.atom_card_info = atom_card_info;
1045 atom_card_info->dev = adev->ddev;
1046 atom_card_info->reg_read = cail_reg_read;
1047 atom_card_info->reg_write = cail_reg_write;
1048 /* needed for iio ops */
1049 if (adev->rio_mem) {
1050 atom_card_info->ioreg_read = cail_ioreg_read;
1051 atom_card_info->ioreg_write = cail_ioreg_write;
1052 } else {
9953b72f 1053 DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
d38ceaf9
AD
1054 atom_card_info->ioreg_read = cail_reg_read;
1055 atom_card_info->ioreg_write = cail_reg_write;
1056 }
1057 atom_card_info->mc_read = cail_mc_read;
1058 atom_card_info->mc_write = cail_mc_write;
1059 atom_card_info->pll_read = cail_pll_read;
1060 atom_card_info->pll_write = cail_pll_write;
1061
1062 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
1063 if (!adev->mode_info.atom_context) {
1064 amdgpu_atombios_fini(adev);
1065 return -ENOMEM;
1066 }
1067
1068 mutex_init(&adev->mode_info.atom_context->mutex);
a5bde2f9
AD
1069 if (adev->is_atom_fw) {
1070 amdgpu_atomfirmware_scratch_regs_init(adev);
1071 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1072 } else {
1073 amdgpu_atombios_scratch_regs_init(adev);
1074 amdgpu_atombios_allocate_fb_scratch(adev);
1075 }
5b41d94c
KR
1076
1077 ret = device_create_file(adev->dev, &dev_attr_vbios_version);
1078 if (ret) {
1079 DRM_ERROR("Failed to create device file for VBIOS version\n");
1080 return ret;
1081 }
1082
d38ceaf9
AD
1083 return 0;
1084}
1085
1086/* if we get transitioned to only one device, take VGA back */
1087/**
1088 * amdgpu_vga_set_decode - enable/disable vga decode
1089 *
1090 * @cookie: amdgpu_device pointer
1091 * @state: enable/disable vga decode
1092 *
1093 * Enable/disable vga decode (all asics).
1094 * Returns VGA resource flags.
1095 */
1096static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1097{
1098 struct amdgpu_device *adev = cookie;
1099 amdgpu_asic_set_vga_state(adev, state);
1100 if (state)
1101 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1102 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1103 else
1104 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1105}
1106
bab4fee7 1107static void amdgpu_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
1108{
1109 /* defines number of bits in page table versus page directory,
1110 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1111 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
1112 if (amdgpu_vm_block_size == -1)
1113 return;
a1adf8be 1114
bab4fee7 1115 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
1116 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1117 amdgpu_vm_block_size);
bab4fee7 1118 goto def_value;
a1adf8be
CZ
1119 }
1120
1121 if (amdgpu_vm_block_size > 24 ||
1122 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1123 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1124 amdgpu_vm_block_size);
bab4fee7 1125 goto def_value;
a1adf8be 1126 }
bab4fee7
JZ
1127
1128 return;
1129
1130def_value:
1131 amdgpu_vm_block_size = -1;
a1adf8be
CZ
1132}
1133
83ca145d
ZJ
1134static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1135{
64dab074
AD
1136 /* no need to check the default value */
1137 if (amdgpu_vm_size == -1)
1138 return;
1139
76117507 1140 if (!is_power_of_2(amdgpu_vm_size)) {
83ca145d
ZJ
1141 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1142 amdgpu_vm_size);
1143 goto def_value;
1144 }
1145
1146 if (amdgpu_vm_size < 1) {
1147 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1148 amdgpu_vm_size);
1149 goto def_value;
1150 }
1151
1152 /*
1153 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1154 */
1155 if (amdgpu_vm_size > 1024) {
1156 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1157 amdgpu_vm_size);
1158 goto def_value;
1159 }
1160
1161 return;
1162
1163def_value:
bab4fee7 1164 amdgpu_vm_size = -1;
83ca145d
ZJ
1165}
1166
d38ceaf9
AD
1167/**
1168 * amdgpu_check_arguments - validate module params
1169 *
1170 * @adev: amdgpu_device pointer
1171 *
1172 * Validates certain module parameters and updates
1173 * the associated values used by the driver (all asics).
1174 */
1175static void amdgpu_check_arguments(struct amdgpu_device *adev)
1176{
5b011235
CZ
1177 if (amdgpu_sched_jobs < 4) {
1178 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1179 amdgpu_sched_jobs);
1180 amdgpu_sched_jobs = 4;
76117507 1181 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
1182 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1183 amdgpu_sched_jobs);
1184 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1185 }
d38ceaf9 1186
83e74db6 1187 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
1188 /* gart size must be greater or equal to 32M */
1189 dev_warn(adev->dev, "gart size (%d) too small\n",
1190 amdgpu_gart_size);
83e74db6 1191 amdgpu_gart_size = -1;
d38ceaf9
AD
1192 }
1193
36d38372 1194 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 1195 /* gtt size must be greater or equal to 32M */
36d38372
CK
1196 dev_warn(adev->dev, "gtt size (%d) too small\n",
1197 amdgpu_gtt_size);
1198 amdgpu_gtt_size = -1;
d38ceaf9
AD
1199 }
1200
d07f14be
RH
1201 /* valid range is between 4 and 9 inclusive */
1202 if (amdgpu_vm_fragment_size != -1 &&
1203 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1204 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1205 amdgpu_vm_fragment_size = -1;
1206 }
1207
83ca145d 1208 amdgpu_check_vm_size(adev);
d38ceaf9 1209
bab4fee7 1210 amdgpu_check_block_size(adev);
6a7f76e7 1211
526bae37 1212 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 1213 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
1214 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1215 amdgpu_vram_page_split);
1216 amdgpu_vram_page_split = 1024;
1217 }
d38ceaf9
AD
1218}
1219
1220/**
1221 * amdgpu_switcheroo_set_state - set switcheroo state
1222 *
1223 * @pdev: pci dev pointer
1694467b 1224 * @state: vga_switcheroo state
d38ceaf9
AD
1225 *
1226 * Callback for the switcheroo driver. Suspends or resumes the
1227 * the asics before or after it is powered up using ACPI methods.
1228 */
1229static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1230{
1231 struct drm_device *dev = pci_get_drvdata(pdev);
1232
1233 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1234 return;
1235
1236 if (state == VGA_SWITCHEROO_ON) {
7ca85295 1237 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1238 /* don't suspend or resume card normally */
1239 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1240
810ddc3a 1241 amdgpu_device_resume(dev, true, true);
d38ceaf9 1242
d38ceaf9
AD
1243 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1244 drm_kms_helper_poll_enable(dev);
1245 } else {
7ca85295 1246 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1247 drm_kms_helper_poll_disable(dev);
1248 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1249 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1250 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1251 }
1252}
1253
1254/**
1255 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1256 *
1257 * @pdev: pci dev pointer
1258 *
1259 * Callback for the switcheroo driver. Check of the switcheroo
1260 * state can be changed.
1261 * Returns true if the state can be changed, false if not.
1262 */
1263static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1264{
1265 struct drm_device *dev = pci_get_drvdata(pdev);
1266
1267 /*
1268 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1269 * locking inversion with the driver load path. And the access here is
1270 * completely racy anyway. So don't bother with locking for now.
1271 */
1272 return dev->open_count == 0;
1273}
1274
1275static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1276 .set_gpu_state = amdgpu_switcheroo_set_state,
1277 .reprobe = NULL,
1278 .can_switch = amdgpu_switcheroo_can_switch,
1279};
1280
1281int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
5fc3aeeb 1282 enum amd_ip_block_type block_type,
1283 enum amd_clockgating_state state)
d38ceaf9
AD
1284{
1285 int i, r = 0;
1286
1287 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1288 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1289 continue;
c722865a
RZ
1290 if (adev->ip_blocks[i].version->type != block_type)
1291 continue;
1292 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1293 continue;
1294 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1295 (void *)adev, state);
1296 if (r)
1297 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1298 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1299 }
1300 return r;
1301}
1302
1303int amdgpu_set_powergating_state(struct amdgpu_device *adev,
5fc3aeeb 1304 enum amd_ip_block_type block_type,
1305 enum amd_powergating_state state)
d38ceaf9
AD
1306{
1307 int i, r = 0;
1308
1309 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1310 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1311 continue;
c722865a
RZ
1312 if (adev->ip_blocks[i].version->type != block_type)
1313 continue;
1314 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1315 continue;
1316 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1317 (void *)adev, state);
1318 if (r)
1319 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1320 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1321 }
1322 return r;
1323}
1324
6cb2d4e4
HR
1325void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1326{
1327 int i;
1328
1329 for (i = 0; i < adev->num_ip_blocks; i++) {
1330 if (!adev->ip_blocks[i].status.valid)
1331 continue;
1332 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1333 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1334 }
1335}
1336
5dbbb60b
AD
1337int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1338 enum amd_ip_block_type block_type)
1339{
1340 int i, r;
1341
1342 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1343 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1344 continue;
a1255107
AD
1345 if (adev->ip_blocks[i].version->type == block_type) {
1346 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1347 if (r)
1348 return r;
1349 break;
1350 }
1351 }
1352 return 0;
1353
1354}
1355
1356bool amdgpu_is_idle(struct amdgpu_device *adev,
1357 enum amd_ip_block_type block_type)
1358{
1359 int i;
1360
1361 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1362 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1363 continue;
a1255107
AD
1364 if (adev->ip_blocks[i].version->type == block_type)
1365 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1366 }
1367 return true;
1368
1369}
1370
a1255107
AD
1371struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1372 enum amd_ip_block_type type)
d38ceaf9
AD
1373{
1374 int i;
1375
1376 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1377 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1378 return &adev->ip_blocks[i];
1379
1380 return NULL;
1381}
1382
1383/**
1384 * amdgpu_ip_block_version_cmp
1385 *
1386 * @adev: amdgpu_device pointer
5fc3aeeb 1387 * @type: enum amd_ip_block_type
d38ceaf9
AD
1388 * @major: major version
1389 * @minor: minor version
1390 *
1391 * return 0 if equal or greater
1392 * return 1 if smaller or the ip_block doesn't exist
1393 */
1394int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
5fc3aeeb 1395 enum amd_ip_block_type type,
d38ceaf9
AD
1396 u32 major, u32 minor)
1397{
a1255107 1398 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
d38ceaf9 1399
a1255107
AD
1400 if (ip_block && ((ip_block->version->major > major) ||
1401 ((ip_block->version->major == major) &&
1402 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1403 return 0;
1404
1405 return 1;
1406}
1407
a1255107
AD
1408/**
1409 * amdgpu_ip_block_add
1410 *
1411 * @adev: amdgpu_device pointer
1412 * @ip_block_version: pointer to the IP to add
1413 *
1414 * Adds the IP block driver information to the collection of IPs
1415 * on the asic.
1416 */
1417int amdgpu_ip_block_add(struct amdgpu_device *adev,
1418 const struct amdgpu_ip_block_version *ip_block_version)
1419{
1420 if (!ip_block_version)
1421 return -EINVAL;
1422
a0bae357
HR
1423 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1424 ip_block_version->funcs->name);
1425
a1255107
AD
1426 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1427
1428 return 0;
1429}
1430
483ef985 1431static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1432{
1433 adev->enable_virtual_display = false;
1434
1435 if (amdgpu_virtual_display) {
1436 struct drm_device *ddev = adev->ddev;
1437 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1438 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1439
1440 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1441 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1442 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1443 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1444 if (!strcmp("all", pciaddname)
1445 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1446 long num_crtc;
1447 int res = -1;
1448
9accf2fd 1449 adev->enable_virtual_display = true;
0f66356d
ED
1450
1451 if (pciaddname_tmp)
1452 res = kstrtol(pciaddname_tmp, 10,
1453 &num_crtc);
1454
1455 if (!res) {
1456 if (num_crtc < 1)
1457 num_crtc = 1;
1458 if (num_crtc > 6)
1459 num_crtc = 6;
1460 adev->mode_info.num_crtc = num_crtc;
1461 } else {
1462 adev->mode_info.num_crtc = 1;
1463 }
9accf2fd
ED
1464 break;
1465 }
1466 }
1467
0f66356d
ED
1468 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1469 amdgpu_virtual_display, pci_address_name,
1470 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1471
1472 kfree(pciaddstr);
1473 }
1474}
1475
e2a75f88
AD
1476static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1477{
e2a75f88
AD
1478 const char *chip_name;
1479 char fw_name[30];
1480 int err;
1481 const struct gpu_info_firmware_header_v1_0 *hdr;
1482
ab4fe3e1
HR
1483 adev->firmware.gpu_info_fw = NULL;
1484
e2a75f88
AD
1485 switch (adev->asic_type) {
1486 case CHIP_TOPAZ:
1487 case CHIP_TONGA:
1488 case CHIP_FIJI:
1489 case CHIP_POLARIS11:
1490 case CHIP_POLARIS10:
1491 case CHIP_POLARIS12:
1492 case CHIP_CARRIZO:
1493 case CHIP_STONEY:
1494#ifdef CONFIG_DRM_AMDGPU_SI
1495 case CHIP_VERDE:
1496 case CHIP_TAHITI:
1497 case CHIP_PITCAIRN:
1498 case CHIP_OLAND:
1499 case CHIP_HAINAN:
1500#endif
1501#ifdef CONFIG_DRM_AMDGPU_CIK
1502 case CHIP_BONAIRE:
1503 case CHIP_HAWAII:
1504 case CHIP_KAVERI:
1505 case CHIP_KABINI:
1506 case CHIP_MULLINS:
1507#endif
1508 default:
1509 return 0;
1510 case CHIP_VEGA10:
1511 chip_name = "vega10";
1512 break;
2d2e5e7e
AD
1513 case CHIP_RAVEN:
1514 chip_name = "raven";
1515 break;
e2a75f88
AD
1516 }
1517
1518 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1519 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1520 if (err) {
1521 dev_err(adev->dev,
1522 "Failed to load gpu_info firmware \"%s\"\n",
1523 fw_name);
1524 goto out;
1525 }
ab4fe3e1 1526 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1527 if (err) {
1528 dev_err(adev->dev,
1529 "Failed to validate gpu_info firmware \"%s\"\n",
1530 fw_name);
1531 goto out;
1532 }
1533
ab4fe3e1 1534 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1535 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1536
1537 switch (hdr->version_major) {
1538 case 1:
1539 {
1540 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1541 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1542 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1543
b5ab16bf
AD
1544 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1545 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1546 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1547 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1548 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1549 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1550 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1551 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1552 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1553 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1554 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1555 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1556 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1557 adev->gfx.cu_info.max_waves_per_simd =
1558 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1559 adev->gfx.cu_info.max_scratch_slots_per_cu =
1560 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1561 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1562 break;
1563 }
1564 default:
1565 dev_err(adev->dev,
1566 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1567 err = -EINVAL;
1568 goto out;
1569 }
1570out:
e2a75f88
AD
1571 return err;
1572}
1573
d38ceaf9
AD
1574static int amdgpu_early_init(struct amdgpu_device *adev)
1575{
aaa36a97 1576 int i, r;
d38ceaf9 1577
483ef985 1578 amdgpu_device_enable_virtual_display(adev);
a6be7570 1579
d38ceaf9 1580 switch (adev->asic_type) {
aaa36a97
AD
1581 case CHIP_TOPAZ:
1582 case CHIP_TONGA:
48299f95 1583 case CHIP_FIJI:
2cc0c0b5
FC
1584 case CHIP_POLARIS11:
1585 case CHIP_POLARIS10:
c4642a47 1586 case CHIP_POLARIS12:
aaa36a97 1587 case CHIP_CARRIZO:
39bb0c92
SL
1588 case CHIP_STONEY:
1589 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1590 adev->family = AMDGPU_FAMILY_CZ;
1591 else
1592 adev->family = AMDGPU_FAMILY_VI;
1593
1594 r = vi_set_ip_blocks(adev);
1595 if (r)
1596 return r;
1597 break;
33f34802
KW
1598#ifdef CONFIG_DRM_AMDGPU_SI
1599 case CHIP_VERDE:
1600 case CHIP_TAHITI:
1601 case CHIP_PITCAIRN:
1602 case CHIP_OLAND:
1603 case CHIP_HAINAN:
295d0daf 1604 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1605 r = si_set_ip_blocks(adev);
1606 if (r)
1607 return r;
1608 break;
1609#endif
a2e73f56
AD
1610#ifdef CONFIG_DRM_AMDGPU_CIK
1611 case CHIP_BONAIRE:
1612 case CHIP_HAWAII:
1613 case CHIP_KAVERI:
1614 case CHIP_KABINI:
1615 case CHIP_MULLINS:
1616 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1617 adev->family = AMDGPU_FAMILY_CI;
1618 else
1619 adev->family = AMDGPU_FAMILY_KV;
1620
1621 r = cik_set_ip_blocks(adev);
1622 if (r)
1623 return r;
1624 break;
1625#endif
2ca8a5d2
CZ
1626 case CHIP_VEGA10:
1627 case CHIP_RAVEN:
1628 if (adev->asic_type == CHIP_RAVEN)
1629 adev->family = AMDGPU_FAMILY_RV;
1630 else
1631 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1632
1633 r = soc15_set_ip_blocks(adev);
1634 if (r)
1635 return r;
1636 break;
d38ceaf9
AD
1637 default:
1638 /* FIXME: not supported yet */
1639 return -EINVAL;
1640 }
1641
e2a75f88
AD
1642 r = amdgpu_device_parse_gpu_info_fw(adev);
1643 if (r)
1644 return r;
1645
3149d9da
XY
1646 if (amdgpu_sriov_vf(adev)) {
1647 r = amdgpu_virt_request_full_gpu(adev, true);
1648 if (r)
5ffa61c1 1649 return -EAGAIN;
3149d9da
XY
1650 }
1651
d38ceaf9
AD
1652 for (i = 0; i < adev->num_ip_blocks; i++) {
1653 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1654 DRM_ERROR("disabled ip block: %d <%s>\n",
1655 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1656 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1657 } else {
a1255107
AD
1658 if (adev->ip_blocks[i].version->funcs->early_init) {
1659 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1660 if (r == -ENOENT) {
a1255107 1661 adev->ip_blocks[i].status.valid = false;
2c1a2784 1662 } else if (r) {
a1255107
AD
1663 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1664 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1665 return r;
2c1a2784 1666 } else {
a1255107 1667 adev->ip_blocks[i].status.valid = true;
2c1a2784 1668 }
974e6b64 1669 } else {
a1255107 1670 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1671 }
d38ceaf9
AD
1672 }
1673 }
1674
395d1fb9
NH
1675 adev->cg_flags &= amdgpu_cg_mask;
1676 adev->pg_flags &= amdgpu_pg_mask;
1677
d38ceaf9
AD
1678 return 0;
1679}
1680
1681static int amdgpu_init(struct amdgpu_device *adev)
1682{
1683 int i, r;
1684
1685 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1686 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1687 continue;
a1255107 1688 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1689 if (r) {
a1255107
AD
1690 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1691 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1692 return r;
2c1a2784 1693 }
a1255107 1694 adev->ip_blocks[i].status.sw = true;
d38ceaf9 1695 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1696 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9 1697 r = amdgpu_vram_scratch_init(adev);
2c1a2784
AD
1698 if (r) {
1699 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1700 return r;
2c1a2784 1701 }
a1255107 1702 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1703 if (r) {
1704 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1705 return r;
2c1a2784 1706 }
d38ceaf9 1707 r = amdgpu_wb_init(adev);
2c1a2784
AD
1708 if (r) {
1709 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
d38ceaf9 1710 return r;
2c1a2784 1711 }
a1255107 1712 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1713
1714 /* right after GMC hw init, we create CSA */
1715 if (amdgpu_sriov_vf(adev)) {
1716 r = amdgpu_allocate_static_csa(adev);
1717 if (r) {
1718 DRM_ERROR("allocate CSA failed %d\n", r);
1719 return r;
1720 }
1721 }
d38ceaf9
AD
1722 }
1723 }
1724
1725 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1726 if (!adev->ip_blocks[i].status.sw)
d38ceaf9
AD
1727 continue;
1728 /* gmc hw init is done early */
a1255107 1729 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
d38ceaf9 1730 continue;
a1255107 1731 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1732 if (r) {
a1255107
AD
1733 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1734 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1735 return r;
2c1a2784 1736 }
a1255107 1737 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1738 }
1739
1740 return 0;
1741}
1742
0c49e0b8
CZ
1743static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1744{
1745 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1746}
1747
1748static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1749{
1750 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1751 AMDGPU_RESET_MAGIC_NUM);
1752}
1753
2dc80b00 1754static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
d38ceaf9
AD
1755{
1756 int i = 0, r;
1757
1758 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1759 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1760 continue;
4a446d55 1761 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1762 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1763 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1764 /* enable clockgating to save power */
a1255107
AD
1765 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1766 AMD_CG_STATE_GATE);
4a446d55
AD
1767 if (r) {
1768 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1769 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1770 return r;
1771 }
b0b00ff1 1772 }
d38ceaf9 1773 }
2dc80b00
S
1774 return 0;
1775}
1776
1777static int amdgpu_late_init(struct amdgpu_device *adev)
1778{
1779 int i = 0, r;
1780
1781 for (i = 0; i < adev->num_ip_blocks; i++) {
1782 if (!adev->ip_blocks[i].status.valid)
1783 continue;
1784 if (adev->ip_blocks[i].version->funcs->late_init) {
1785 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1786 if (r) {
1787 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1788 adev->ip_blocks[i].version->funcs->name, r);
1789 return r;
1790 }
1791 adev->ip_blocks[i].status.late_initialized = true;
1792 }
1793 }
1794
1795 mod_delayed_work(system_wq, &adev->late_init_work,
1796 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1797
0c49e0b8 1798 amdgpu_fill_reset_magic(adev);
d38ceaf9
AD
1799
1800 return 0;
1801}
1802
1803static int amdgpu_fini(struct amdgpu_device *adev)
1804{
1805 int i, r;
1806
3e96dbfd
AD
1807 /* need to disable SMC first */
1808 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1809 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1810 continue;
a1255107 1811 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1812 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1813 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1814 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1815 if (r) {
1816 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1817 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1818 return r;
1819 }
a1255107 1820 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1821 /* XXX handle errors */
1822 if (r) {
1823 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1824 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1825 }
a1255107 1826 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1827 break;
1828 }
1829 }
1830
d38ceaf9 1831 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1832 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1833 continue;
a1255107 1834 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9
AD
1835 amdgpu_wb_fini(adev);
1836 amdgpu_vram_scratch_fini(adev);
1837 }
8201a67a
RZ
1838
1839 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1840 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1841 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1842 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1843 AMD_CG_STATE_UNGATE);
1844 if (r) {
1845 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1846 adev->ip_blocks[i].version->funcs->name, r);
1847 return r;
1848 }
2c1a2784 1849 }
8201a67a 1850
a1255107 1851 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1852 /* XXX handle errors */
2c1a2784 1853 if (r) {
a1255107
AD
1854 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1855 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1856 }
8201a67a 1857
a1255107 1858 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1859 }
1860
1861 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1862 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1863 continue;
a1255107 1864 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1865 /* XXX handle errors */
2c1a2784 1866 if (r) {
a1255107
AD
1867 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1868 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1869 }
a1255107
AD
1870 adev->ip_blocks[i].status.sw = false;
1871 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1872 }
1873
a6dcfd9c 1874 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1875 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1876 continue;
a1255107
AD
1877 if (adev->ip_blocks[i].version->funcs->late_fini)
1878 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1879 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1880 }
1881
030308fc 1882 if (amdgpu_sriov_vf(adev))
3149d9da 1883 amdgpu_virt_release_full_gpu(adev, false);
2493664f 1884
d38ceaf9
AD
1885 return 0;
1886}
1887
2dc80b00
S
1888static void amdgpu_late_init_func_handler(struct work_struct *work)
1889{
1890 struct amdgpu_device *adev =
1891 container_of(work, struct amdgpu_device, late_init_work.work);
1892 amdgpu_late_set_cg_state(adev);
1893}
1894
faefba95 1895int amdgpu_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1896{
1897 int i, r;
1898
e941ea99
XY
1899 if (amdgpu_sriov_vf(adev))
1900 amdgpu_virt_request_full_gpu(adev, false);
1901
c5a93a28
FC
1902 /* ungate SMC block first */
1903 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1904 AMD_CG_STATE_UNGATE);
1905 if (r) {
1906 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1907 }
1908
d38ceaf9 1909 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1910 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1911 continue;
1912 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1913 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1914 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1915 AMD_CG_STATE_UNGATE);
c5a93a28 1916 if (r) {
a1255107
AD
1917 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1918 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1919 }
2c1a2784 1920 }
d38ceaf9 1921 /* XXX handle errors */
a1255107 1922 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1923 /* XXX handle errors */
2c1a2784 1924 if (r) {
a1255107
AD
1925 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1926 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1927 }
d38ceaf9
AD
1928 }
1929
e941ea99
XY
1930 if (amdgpu_sriov_vf(adev))
1931 amdgpu_virt_release_full_gpu(adev, false);
1932
d38ceaf9
AD
1933 return 0;
1934}
1935
e4f0fdcc 1936static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
a90ad3c2
ML
1937{
1938 int i, r;
1939
2cb681b6
ML
1940 static enum amd_ip_block_type ip_order[] = {
1941 AMD_IP_BLOCK_TYPE_GMC,
1942 AMD_IP_BLOCK_TYPE_COMMON,
2cb681b6
ML
1943 AMD_IP_BLOCK_TYPE_IH,
1944 };
a90ad3c2 1945
2cb681b6
ML
1946 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1947 int j;
1948 struct amdgpu_ip_block *block;
a90ad3c2 1949
2cb681b6
ML
1950 for (j = 0; j < adev->num_ip_blocks; j++) {
1951 block = &adev->ip_blocks[j];
1952
1953 if (block->version->type != ip_order[i] ||
1954 !block->status.valid)
1955 continue;
1956
1957 r = block->version->funcs->hw_init(adev);
1958 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1959 }
1960 }
1961
1962 return 0;
1963}
1964
e4f0fdcc 1965static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
a90ad3c2
ML
1966{
1967 int i, r;
1968
2cb681b6
ML
1969 static enum amd_ip_block_type ip_order[] = {
1970 AMD_IP_BLOCK_TYPE_SMC,
ef4c166d 1971 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
1972 AMD_IP_BLOCK_TYPE_DCE,
1973 AMD_IP_BLOCK_TYPE_GFX,
1974 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
1975 AMD_IP_BLOCK_TYPE_UVD,
1976 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 1977 };
a90ad3c2 1978
2cb681b6
ML
1979 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1980 int j;
1981 struct amdgpu_ip_block *block;
a90ad3c2 1982
2cb681b6
ML
1983 for (j = 0; j < adev->num_ip_blocks; j++) {
1984 block = &adev->ip_blocks[j];
1985
1986 if (block->version->type != ip_order[i] ||
1987 !block->status.valid)
1988 continue;
1989
1990 r = block->version->funcs->hw_init(adev);
1991 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1992 }
1993 }
1994
1995 return 0;
1996}
1997
fcf0649f 1998static int amdgpu_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
1999{
2000 int i, r;
2001
a90ad3c2
ML
2002 for (i = 0; i < adev->num_ip_blocks; i++) {
2003 if (!adev->ip_blocks[i].status.valid)
2004 continue;
a90ad3c2
ML
2005 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2006 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
fcf0649f
CZ
2007 adev->ip_blocks[i].version->type ==
2008 AMD_IP_BLOCK_TYPE_IH) {
2009 r = adev->ip_blocks[i].version->funcs->resume(adev);
2010 if (r) {
2011 DRM_ERROR("resume of IP block <%s> failed %d\n",
2012 adev->ip_blocks[i].version->funcs->name, r);
2013 return r;
2014 }
a90ad3c2
ML
2015 }
2016 }
2017
2018 return 0;
2019}
2020
fcf0649f 2021static int amdgpu_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2022{
2023 int i, r;
2024
2025 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2026 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2027 continue;
fcf0649f
CZ
2028 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2029 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2030 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
2031 continue;
a1255107 2032 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 2033 if (r) {
a1255107
AD
2034 DRM_ERROR("resume of IP block <%s> failed %d\n",
2035 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 2036 return r;
2c1a2784 2037 }
d38ceaf9
AD
2038 }
2039
2040 return 0;
2041}
2042
fcf0649f
CZ
2043static int amdgpu_resume(struct amdgpu_device *adev)
2044{
2045 int r;
2046
2047 r = amdgpu_resume_phase1(adev);
2048 if (r)
2049 return r;
2050 r = amdgpu_resume_phase2(adev);
2051
2052 return r;
2053}
2054
4e99a44e 2055static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 2056{
6867e1b5
ML
2057 if (amdgpu_sriov_vf(adev)) {
2058 if (adev->is_atom_fw) {
2059 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2060 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2061 } else {
2062 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2063 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2064 }
2065
2066 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2067 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 2068 }
048765ad
AR
2069}
2070
4562236b
HW
2071bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2072{
2073 switch (asic_type) {
2074#if defined(CONFIG_DRM_AMD_DC)
2075 case CHIP_BONAIRE:
2076 case CHIP_HAWAII:
0d6fbccb 2077 case CHIP_KAVERI:
4562236b
HW
2078 case CHIP_CARRIZO:
2079 case CHIP_STONEY:
2080 case CHIP_POLARIS11:
2081 case CHIP_POLARIS10:
2c8ad2d5 2082 case CHIP_POLARIS12:
4562236b
HW
2083 case CHIP_TONGA:
2084 case CHIP_FIJI:
2085#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
2086 return amdgpu_dc != 0;
4562236b 2087#endif
17b7cf8c
AD
2088 case CHIP_KABINI:
2089 case CHIP_MULLINS:
2090 return amdgpu_dc > 0;
42f8ffa1
HW
2091 case CHIP_VEGA10:
2092#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 2093 case CHIP_RAVEN:
42f8ffa1 2094#endif
fd187853 2095 return amdgpu_dc != 0;
4562236b
HW
2096#endif
2097 default:
2098 return false;
2099 }
2100}
2101
2102/**
2103 * amdgpu_device_has_dc_support - check if dc is supported
2104 *
2105 * @adev: amdgpu_device_pointer
2106 *
2107 * Returns true for supported, false for not supported
2108 */
2109bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2110{
2555039d
XY
2111 if (amdgpu_sriov_vf(adev))
2112 return false;
2113
4562236b
HW
2114 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2115}
2116
d38ceaf9
AD
2117/**
2118 * amdgpu_device_init - initialize the driver
2119 *
2120 * @adev: amdgpu_device pointer
2121 * @pdev: drm dev pointer
2122 * @pdev: pci dev pointer
2123 * @flags: driver flags
2124 *
2125 * Initializes the driver info and hw (all asics).
2126 * Returns 0 for success or an error on failure.
2127 * Called at driver startup.
2128 */
2129int amdgpu_device_init(struct amdgpu_device *adev,
2130 struct drm_device *ddev,
2131 struct pci_dev *pdev,
2132 uint32_t flags)
2133{
2134 int r, i;
2135 bool runtime = false;
95844d20 2136 u32 max_MBps;
d38ceaf9
AD
2137
2138 adev->shutdown = false;
2139 adev->dev = &pdev->dev;
2140 adev->ddev = ddev;
2141 adev->pdev = pdev;
2142 adev->flags = flags;
2f7d10b3 2143 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 2144 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
6f02a696 2145 adev->mc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
2146 adev->accel_working = false;
2147 adev->num_rings = 0;
2148 adev->mman.buffer_funcs = NULL;
2149 adev->mman.buffer_funcs_ring = NULL;
2150 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 2151 adev->vm_manager.vm_pte_num_rings = 0;
d38ceaf9 2152 adev->gart.gart_funcs = NULL;
f54d1867 2153 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 2154 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
2155
2156 adev->smc_rreg = &amdgpu_invalid_rreg;
2157 adev->smc_wreg = &amdgpu_invalid_wreg;
2158 adev->pcie_rreg = &amdgpu_invalid_rreg;
2159 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
2160 adev->pciep_rreg = &amdgpu_invalid_rreg;
2161 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2162 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2163 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2164 adev->didt_rreg = &amdgpu_invalid_rreg;
2165 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
2166 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2167 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2168 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2169 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2170
3e39ab90
AD
2171 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2172 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2173 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
2174
2175 /* mutex initialization are all done here so we
2176 * can recall function without having locking issues */
d38ceaf9 2177 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 2178 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
2179 mutex_init(&adev->pm.mutex);
2180 mutex_init(&adev->gfx.gpu_clock_mutex);
2181 mutex_init(&adev->srbm_mutex);
b8866c26 2182 mutex_init(&adev->gfx.pipe_reserve_mutex);
d38ceaf9 2183 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 2184 mutex_init(&adev->mn_lock);
e23b74aa 2185 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 2186 hash_init(adev->mn_hash);
13a752e3 2187 mutex_init(&adev->lock_reset);
d38ceaf9
AD
2188
2189 amdgpu_check_arguments(adev);
2190
d38ceaf9
AD
2191 spin_lock_init(&adev->mmio_idx_lock);
2192 spin_lock_init(&adev->smc_idx_lock);
2193 spin_lock_init(&adev->pcie_idx_lock);
2194 spin_lock_init(&adev->uvd_ctx_idx_lock);
2195 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 2196 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 2197 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 2198 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 2199 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 2200
0c4e7fa5
CZ
2201 INIT_LIST_HEAD(&adev->shadow_list);
2202 mutex_init(&adev->shadow_list_lock);
2203
795f2813
AR
2204 INIT_LIST_HEAD(&adev->ring_lru_list);
2205 spin_lock_init(&adev->ring_lru_list_lock);
2206
2dc80b00
S
2207 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2208
0fa49558
AX
2209 /* Registers mapping */
2210 /* TODO: block userspace mapping of io register */
da69c161
KW
2211 if (adev->asic_type >= CHIP_BONAIRE) {
2212 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2213 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2214 } else {
2215 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2216 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2217 }
d38ceaf9 2218
d38ceaf9
AD
2219 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2220 if (adev->rmmio == NULL) {
2221 return -ENOMEM;
2222 }
2223 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2224 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2225
705e519e
CK
2226 /* doorbell bar mapping */
2227 amdgpu_doorbell_init(adev);
d38ceaf9
AD
2228
2229 /* io port mapping */
2230 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2231 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2232 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2233 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2234 break;
2235 }
2236 }
2237 if (adev->rio_mem == NULL)
b64a18c5 2238 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
2239
2240 /* early init functions */
2241 r = amdgpu_early_init(adev);
2242 if (r)
2243 return r;
2244
2245 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2246 /* this will fail for cards that aren't VGA class devices, just
2247 * ignore it */
2248 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2249
2250 if (amdgpu_runtime_pm == 1)
2251 runtime = true;
e9bef455 2252 if (amdgpu_device_is_px(ddev))
d38ceaf9 2253 runtime = true;
84c8b22e
LW
2254 if (!pci_is_thunderbolt_attached(adev->pdev))
2255 vga_switcheroo_register_client(adev->pdev,
2256 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
2257 if (runtime)
2258 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2259
2260 /* Read BIOS */
83ba126a
AD
2261 if (!amdgpu_get_bios(adev)) {
2262 r = -EINVAL;
2263 goto failed;
2264 }
f7e9e9fe 2265
d38ceaf9 2266 r = amdgpu_atombios_init(adev);
2c1a2784
AD
2267 if (r) {
2268 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 2269 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 2270 goto failed;
2c1a2784 2271 }
d38ceaf9 2272
4e99a44e
ML
2273 /* detect if we are with an SRIOV vbios */
2274 amdgpu_device_detect_sriov_bios(adev);
048765ad 2275
d38ceaf9 2276 /* Post card if necessary */
91fe77eb 2277 if (amdgpu_need_post(adev)) {
d38ceaf9 2278 if (!adev->bios) {
bec86378 2279 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
2280 r = -EINVAL;
2281 goto failed;
d38ceaf9 2282 }
bec86378 2283 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
2284 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2285 if (r) {
2286 dev_err(adev->dev, "gpu post error!\n");
2287 goto failed;
2288 }
d38ceaf9
AD
2289 }
2290
88b64e95
AD
2291 if (adev->is_atom_fw) {
2292 /* Initialize clocks */
2293 r = amdgpu_atomfirmware_get_clock_info(adev);
2294 if (r) {
2295 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 2296 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
2297 goto failed;
2298 }
2299 } else {
a5bde2f9
AD
2300 /* Initialize clocks */
2301 r = amdgpu_atombios_get_clock_info(adev);
2302 if (r) {
2303 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 2304 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 2305 goto failed;
a5bde2f9
AD
2306 }
2307 /* init i2c buses */
4562236b
HW
2308 if (!amdgpu_device_has_dc_support(adev))
2309 amdgpu_atombios_i2c_init(adev);
2c1a2784 2310 }
d38ceaf9
AD
2311
2312 /* Fence driver */
2313 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
2314 if (r) {
2315 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 2316 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 2317 goto failed;
2c1a2784 2318 }
d38ceaf9
AD
2319
2320 /* init the mode config */
2321 drm_mode_config_init(adev->ddev);
2322
2323 r = amdgpu_init(adev);
2324 if (r) {
8840a387 2325 /* failed in exclusive mode due to timeout */
2326 if (amdgpu_sriov_vf(adev) &&
2327 !amdgpu_sriov_runtime(adev) &&
2328 amdgpu_virt_mmio_blocked(adev) &&
2329 !amdgpu_virt_wait_reset(adev)) {
2330 dev_err(adev->dev, "VF exclusive mode timeout\n");
2331 r = -EAGAIN;
2332 goto failed;
2333 }
2c1a2784 2334 dev_err(adev->dev, "amdgpu_init failed\n");
e23b74aa 2335 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
d38ceaf9 2336 amdgpu_fini(adev);
83ba126a 2337 goto failed;
d38ceaf9
AD
2338 }
2339
2340 adev->accel_working = true;
2341
e59c0205
AX
2342 amdgpu_vm_check_compute_bug(adev);
2343
95844d20
MO
2344 /* Initialize the buffer migration limit. */
2345 if (amdgpu_moverate >= 0)
2346 max_MBps = amdgpu_moverate;
2347 else
2348 max_MBps = 8; /* Allow 8 MB/s. */
2349 /* Get a log2 for easy divisions. */
2350 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2351
d38ceaf9
AD
2352 r = amdgpu_ib_pool_init(adev);
2353 if (r) {
2354 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 2355 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 2356 goto failed;
d38ceaf9
AD
2357 }
2358
2359 r = amdgpu_ib_ring_tests(adev);
2360 if (r)
2361 DRM_ERROR("ib ring test failed (%d).\n", r);
2362
2dc8f81e
HC
2363 if (amdgpu_sriov_vf(adev))
2364 amdgpu_virt_init_data_exchange(adev);
2365
9bc92b9c
ML
2366 amdgpu_fbdev_init(adev);
2367
d2f52ac8
RZ
2368 r = amdgpu_pm_sysfs_init(adev);
2369 if (r)
2370 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2371
d38ceaf9 2372 r = amdgpu_gem_debugfs_init(adev);
3f14e623 2373 if (r)
d38ceaf9 2374 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2375
2376 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2377 if (r)
d38ceaf9 2378 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2379
4f0955fc
HR
2380 r = amdgpu_debugfs_test_ib_ring_init(adev);
2381 if (r)
2382 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
2383
50ab2533 2384 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2385 if (r)
50ab2533 2386 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2387
db95e218
KR
2388 r = amdgpu_debugfs_vbios_dump_init(adev);
2389 if (r)
2390 DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
2391
d38ceaf9
AD
2392 if ((amdgpu_testing & 1)) {
2393 if (adev->accel_working)
2394 amdgpu_test_moves(adev);
2395 else
2396 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2397 }
d38ceaf9
AD
2398 if (amdgpu_benchmarking) {
2399 if (adev->accel_working)
2400 amdgpu_benchmark(adev, amdgpu_benchmarking);
2401 else
2402 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2403 }
2404
2405 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2406 * explicit gating rather than handling it automatically.
2407 */
2408 r = amdgpu_late_init(adev);
2c1a2784
AD
2409 if (r) {
2410 dev_err(adev->dev, "amdgpu_late_init failed\n");
e23b74aa 2411 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2412 goto failed;
2c1a2784 2413 }
d38ceaf9
AD
2414
2415 return 0;
83ba126a
AD
2416
2417failed:
89041940 2418 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2419 if (runtime)
2420 vga_switcheroo_fini_domain_pm_ops(adev->dev);
8840a387 2421
83ba126a 2422 return r;
d38ceaf9
AD
2423}
2424
d38ceaf9
AD
2425/**
2426 * amdgpu_device_fini - tear down the driver
2427 *
2428 * @adev: amdgpu_device pointer
2429 *
2430 * Tear down the driver info (all asics).
2431 * Called at driver shutdown.
2432 */
2433void amdgpu_device_fini(struct amdgpu_device *adev)
2434{
2435 int r;
2436
2437 DRM_INFO("amdgpu: finishing device.\n");
2438 adev->shutdown = true;
db2c2a97
PD
2439 if (adev->mode_info.mode_config_initialized)
2440 drm_crtc_force_disable_all(adev->ddev);
d38ceaf9
AD
2441 /* evict vram memory */
2442 amdgpu_bo_evict_vram(adev);
2443 amdgpu_ib_pool_fini(adev);
a05502e5 2444 amdgpu_fw_reserve_vram_fini(adev);
d38ceaf9
AD
2445 amdgpu_fence_driver_fini(adev);
2446 amdgpu_fbdev_fini(adev);
2447 r = amdgpu_fini(adev);
ab4fe3e1
HR
2448 if (adev->firmware.gpu_info_fw) {
2449 release_firmware(adev->firmware.gpu_info_fw);
2450 adev->firmware.gpu_info_fw = NULL;
2451 }
d38ceaf9 2452 adev->accel_working = false;
2dc80b00 2453 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2454 /* free i2c buses */
4562236b
HW
2455 if (!amdgpu_device_has_dc_support(adev))
2456 amdgpu_i2c_fini(adev);
d38ceaf9
AD
2457 amdgpu_atombios_fini(adev);
2458 kfree(adev->bios);
2459 adev->bios = NULL;
84c8b22e
LW
2460 if (!pci_is_thunderbolt_attached(adev->pdev))
2461 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2462 if (adev->flags & AMD_IS_PX)
2463 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2464 vga_client_register(adev->pdev, NULL, NULL, NULL);
2465 if (adev->rio_mem)
2466 pci_iounmap(adev->pdev, adev->rio_mem);
2467 adev->rio_mem = NULL;
2468 iounmap(adev->rmmio);
2469 adev->rmmio = NULL;
705e519e 2470 amdgpu_doorbell_fini(adev);
d2f52ac8 2471 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2472 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2473}
2474
2475
2476/*
2477 * Suspend & resume.
2478 */
2479/**
810ddc3a 2480 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2481 *
2482 * @pdev: drm dev pointer
2483 * @state: suspend state
2484 *
2485 * Puts the hw in the suspend state (all asics).
2486 * Returns 0 for success or an error on failure.
2487 * Called at driver suspend.
2488 */
810ddc3a 2489int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2490{
2491 struct amdgpu_device *adev;
2492 struct drm_crtc *crtc;
2493 struct drm_connector *connector;
5ceb54c6 2494 int r;
d38ceaf9
AD
2495
2496 if (dev == NULL || dev->dev_private == NULL) {
2497 return -ENODEV;
2498 }
2499
2500 adev = dev->dev_private;
2501
2502 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2503 return 0;
2504
2505 drm_kms_helper_poll_disable(dev);
2506
4562236b
HW
2507 if (!amdgpu_device_has_dc_support(adev)) {
2508 /* turn off display hw */
2509 drm_modeset_lock_all(dev);
2510 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2511 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2512 }
2513 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2514 }
2515
ba997709
YZ
2516 amdgpu_amdkfd_suspend(adev);
2517
756e6880 2518 /* unpin the front buffers and cursors */
d38ceaf9 2519 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2520 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2521 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2522 struct amdgpu_bo *robj;
2523
756e6880
AD
2524 if (amdgpu_crtc->cursor_bo) {
2525 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2526 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2527 if (r == 0) {
2528 amdgpu_bo_unpin(aobj);
2529 amdgpu_bo_unreserve(aobj);
2530 }
2531 }
2532
d38ceaf9
AD
2533 if (rfb == NULL || rfb->obj == NULL) {
2534 continue;
2535 }
2536 robj = gem_to_amdgpu_bo(rfb->obj);
2537 /* don't unpin kernel fb objects */
2538 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2539 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2540 if (r == 0) {
2541 amdgpu_bo_unpin(robj);
2542 amdgpu_bo_unreserve(robj);
2543 }
2544 }
2545 }
2546 /* evict vram memory */
2547 amdgpu_bo_evict_vram(adev);
2548
5ceb54c6 2549 amdgpu_fence_driver_suspend(adev);
d38ceaf9
AD
2550
2551 r = amdgpu_suspend(adev);
2552
a0a71e49
AD
2553 /* evict remaining vram memory
2554 * This second call to evict vram is to evict the gart page table
2555 * using the CPU.
2556 */
d38ceaf9
AD
2557 amdgpu_bo_evict_vram(adev);
2558
d05da0e2 2559 amdgpu_atombios_scratch_regs_save(adev);
d38ceaf9
AD
2560 pci_save_state(dev->pdev);
2561 if (suspend) {
2562 /* Shut down the device */
2563 pci_disable_device(dev->pdev);
2564 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2565 } else {
2566 r = amdgpu_asic_reset(adev);
2567 if (r)
2568 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2569 }
2570
2571 if (fbcon) {
2572 console_lock();
2573 amdgpu_fbdev_set_suspend(adev, 1);
2574 console_unlock();
2575 }
2576 return 0;
2577}
2578
2579/**
810ddc3a 2580 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2581 *
2582 * @pdev: drm dev pointer
2583 *
2584 * Bring the hw back to operating state (all asics).
2585 * Returns 0 for success or an error on failure.
2586 * Called at driver resume.
2587 */
810ddc3a 2588int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2589{
2590 struct drm_connector *connector;
2591 struct amdgpu_device *adev = dev->dev_private;
756e6880 2592 struct drm_crtc *crtc;
03161a6e 2593 int r = 0;
d38ceaf9
AD
2594
2595 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2596 return 0;
2597
74b0b157 2598 if (fbcon)
d38ceaf9 2599 console_lock();
74b0b157 2600
d38ceaf9
AD
2601 if (resume) {
2602 pci_set_power_state(dev->pdev, PCI_D0);
2603 pci_restore_state(dev->pdev);
74b0b157 2604 r = pci_enable_device(dev->pdev);
03161a6e
HR
2605 if (r)
2606 goto unlock;
d38ceaf9 2607 }
d05da0e2 2608 amdgpu_atombios_scratch_regs_restore(adev);
d38ceaf9
AD
2609
2610 /* post card */
c836fec5 2611 if (amdgpu_need_post(adev)) {
74b0b157 2612 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2613 if (r)
2614 DRM_ERROR("amdgpu asic init failed\n");
2615 }
d38ceaf9
AD
2616
2617 r = amdgpu_resume(adev);
e6707218 2618 if (r) {
ca198528 2619 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
03161a6e 2620 goto unlock;
e6707218 2621 }
5ceb54c6
AD
2622 amdgpu_fence_driver_resume(adev);
2623
ca198528
FC
2624 if (resume) {
2625 r = amdgpu_ib_ring_tests(adev);
2626 if (r)
2627 DRM_ERROR("ib ring test failed (%d).\n", r);
2628 }
d38ceaf9
AD
2629
2630 r = amdgpu_late_init(adev);
03161a6e
HR
2631 if (r)
2632 goto unlock;
d38ceaf9 2633
756e6880
AD
2634 /* pin cursors */
2635 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2636 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2637
2638 if (amdgpu_crtc->cursor_bo) {
2639 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2640 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2641 if (r == 0) {
2642 r = amdgpu_bo_pin(aobj,
2643 AMDGPU_GEM_DOMAIN_VRAM,
2644 &amdgpu_crtc->cursor_addr);
2645 if (r != 0)
2646 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2647 amdgpu_bo_unreserve(aobj);
2648 }
2649 }
2650 }
ba997709
YZ
2651 r = amdgpu_amdkfd_resume(adev);
2652 if (r)
2653 return r;
756e6880 2654
d38ceaf9
AD
2655 /* blat the mode back in */
2656 if (fbcon) {
4562236b
HW
2657 if (!amdgpu_device_has_dc_support(adev)) {
2658 /* pre DCE11 */
2659 drm_helper_resume_force_mode(dev);
2660
2661 /* turn on display hw */
2662 drm_modeset_lock_all(dev);
2663 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2664 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2665 }
2666 drm_modeset_unlock_all(dev);
2667 } else {
2668 /*
2669 * There is no equivalent atomic helper to turn on
2670 * display, so we defined our own function for this,
2671 * once suspend resume is supported by the atomic
2672 * framework this will be reworked
2673 */
2674 amdgpu_dm_display_resume(adev);
d38ceaf9
AD
2675 }
2676 }
2677
2678 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2679
2680 /*
2681 * Most of the connector probing functions try to acquire runtime pm
2682 * refs to ensure that the GPU is powered on when connector polling is
2683 * performed. Since we're calling this from a runtime PM callback,
2684 * trying to acquire rpm refs will cause us to deadlock.
2685 *
2686 * Since we're guaranteed to be holding the rpm lock, it's safe to
2687 * temporarily disable the rpm helpers so this doesn't deadlock us.
2688 */
2689#ifdef CONFIG_PM
2690 dev->dev->power.disable_depth++;
2691#endif
4562236b
HW
2692 if (!amdgpu_device_has_dc_support(adev))
2693 drm_helper_hpd_irq_event(dev);
2694 else
2695 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2696#ifdef CONFIG_PM
2697 dev->dev->power.disable_depth--;
2698#endif
d38ceaf9 2699
03161a6e 2700 if (fbcon)
d38ceaf9 2701 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2702
2703unlock:
2704 if (fbcon)
d38ceaf9 2705 console_unlock();
d38ceaf9 2706
03161a6e 2707 return r;
d38ceaf9
AD
2708}
2709
63fbf42f
CZ
2710static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2711{
2712 int i;
2713 bool asic_hang = false;
2714
f993d628
ML
2715 if (amdgpu_sriov_vf(adev))
2716 return true;
2717
63fbf42f 2718 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2719 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2720 continue;
a1255107
AD
2721 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2722 adev->ip_blocks[i].status.hang =
2723 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2724 if (adev->ip_blocks[i].status.hang) {
2725 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2726 asic_hang = true;
2727 }
2728 }
2729 return asic_hang;
2730}
2731
4d446656 2732static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2733{
2734 int i, r = 0;
2735
2736 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2737 if (!adev->ip_blocks[i].status.valid)
d31a501e 2738 continue;
a1255107
AD
2739 if (adev->ip_blocks[i].status.hang &&
2740 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2741 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2742 if (r)
2743 return r;
2744 }
2745 }
2746
2747 return 0;
2748}
2749
35d782fe
CZ
2750static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2751{
da146d3b
AD
2752 int i;
2753
2754 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2755 if (!adev->ip_blocks[i].status.valid)
da146d3b 2756 continue;
a1255107
AD
2757 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2758 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2759 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
2760 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2761 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 2762 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2763 DRM_INFO("Some block need full reset!\n");
2764 return true;
2765 }
2766 }
35d782fe
CZ
2767 }
2768 return false;
2769}
2770
2771static int amdgpu_soft_reset(struct amdgpu_device *adev)
2772{
2773 int i, r = 0;
2774
2775 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2776 if (!adev->ip_blocks[i].status.valid)
35d782fe 2777 continue;
a1255107
AD
2778 if (adev->ip_blocks[i].status.hang &&
2779 adev->ip_blocks[i].version->funcs->soft_reset) {
2780 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2781 if (r)
2782 return r;
2783 }
2784 }
2785
2786 return 0;
2787}
2788
2789static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2790{
2791 int i, r = 0;
2792
2793 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2794 if (!adev->ip_blocks[i].status.valid)
35d782fe 2795 continue;
a1255107
AD
2796 if (adev->ip_blocks[i].status.hang &&
2797 adev->ip_blocks[i].version->funcs->post_soft_reset)
2798 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2799 if (r)
2800 return r;
2801 }
2802
2803 return 0;
2804}
2805
3ad81f16
CZ
2806bool amdgpu_need_backup(struct amdgpu_device *adev)
2807{
2808 if (adev->flags & AMD_IS_APU)
2809 return false;
2810
2811 return amdgpu_lockup_timeout > 0 ? true : false;
2812}
2813
53cdccd5
CZ
2814static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2815 struct amdgpu_ring *ring,
2816 struct amdgpu_bo *bo,
f54d1867 2817 struct dma_fence **fence)
53cdccd5
CZ
2818{
2819 uint32_t domain;
2820 int r;
2821
23d2e504
RH
2822 if (!bo->shadow)
2823 return 0;
2824
1d284797 2825 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2826 if (r)
2827 return r;
2828 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2829 /* if bo has been evicted, then no need to recover */
2830 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2831 r = amdgpu_bo_validate(bo->shadow);
2832 if (r) {
2833 DRM_ERROR("bo validate failed!\n");
2834 goto err;
2835 }
2836
23d2e504 2837 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2838 NULL, fence, true);
23d2e504
RH
2839 if (r) {
2840 DRM_ERROR("recover page table failed!\n");
2841 goto err;
2842 }
2843 }
53cdccd5 2844err:
23d2e504
RH
2845 amdgpu_bo_unreserve(bo);
2846 return r;
53cdccd5
CZ
2847}
2848
5740682e
ML
2849/*
2850 * amdgpu_reset - reset ASIC/GPU for bare-metal or passthrough
a90ad3c2
ML
2851 *
2852 * @adev: amdgpu device pointer
5740682e 2853 * @reset_flags: output param tells caller the reset result
a90ad3c2 2854 *
5740682e
ML
2855 * attempt to do soft-reset or full-reset and reinitialize Asic
2856 * return 0 means successed otherwise failed
2857*/
2858static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
a90ad3c2 2859{
5740682e
ML
2860 bool need_full_reset, vram_lost = 0;
2861 int r;
a90ad3c2 2862
5740682e 2863 need_full_reset = amdgpu_need_full_reset(adev);
a90ad3c2 2864
5740682e
ML
2865 if (!need_full_reset) {
2866 amdgpu_pre_soft_reset(adev);
2867 r = amdgpu_soft_reset(adev);
2868 amdgpu_post_soft_reset(adev);
2869 if (r || amdgpu_check_soft_reset(adev)) {
2870 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2871 need_full_reset = true;
2872 }
a90ad3c2 2873
5740682e 2874 }
a90ad3c2 2875
5740682e
ML
2876 if (need_full_reset) {
2877 r = amdgpu_suspend(adev);
a90ad3c2 2878
5740682e
ML
2879retry:
2880 amdgpu_atombios_scratch_regs_save(adev);
2881 r = amdgpu_asic_reset(adev);
2882 amdgpu_atombios_scratch_regs_restore(adev);
2883 /* post card */
2884 amdgpu_atom_asic_init(adev->mode_info.atom_context);
65781c78 2885
5740682e
ML
2886 if (!r) {
2887 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2888 r = amdgpu_resume_phase1(adev);
2889 if (r)
2890 goto out;
65781c78 2891
5740682e
ML
2892 vram_lost = amdgpu_check_vram_lost(adev);
2893 if (vram_lost) {
2894 DRM_ERROR("VRAM is lost!\n");
2895 atomic_inc(&adev->vram_lost_counter);
2896 }
2897
c1c7ce8f
CK
2898 r = amdgpu_gtt_mgr_recover(
2899 &adev->mman.bdev.man[TTM_PL_TT]);
5740682e
ML
2900 if (r)
2901 goto out;
2902
2903 r = amdgpu_resume_phase2(adev);
2904 if (r)
2905 goto out;
2906
2907 if (vram_lost)
2908 amdgpu_fill_reset_magic(adev);
65781c78 2909 }
5740682e 2910 }
65781c78 2911
5740682e
ML
2912out:
2913 if (!r) {
2914 amdgpu_irq_gpu_reset_resume_helper(adev);
2915 r = amdgpu_ib_ring_tests(adev);
2916 if (r) {
2917 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
2918 r = amdgpu_suspend(adev);
2919 need_full_reset = true;
2920 goto retry;
2921 }
2922 }
65781c78 2923
5740682e
ML
2924 if (reset_flags) {
2925 if (vram_lost)
2926 (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
a90ad3c2 2927
5740682e
ML
2928 if (need_full_reset)
2929 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
65781c78 2930 }
a90ad3c2 2931
5740682e
ML
2932 return r;
2933}
a90ad3c2 2934
5740682e
ML
2935/*
2936 * amdgpu_reset_sriov - reset ASIC for SR-IOV vf
2937 *
2938 * @adev: amdgpu device pointer
2939 * @reset_flags: output param tells caller the reset result
2940 *
2941 * do VF FLR and reinitialize Asic
2942 * return 0 means successed otherwise failed
2943*/
2944static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, bool from_hypervisor)
2945{
2946 int r;
2947
2948 if (from_hypervisor)
2949 r = amdgpu_virt_request_full_gpu(adev, true);
2950 else
2951 r = amdgpu_virt_reset_gpu(adev);
2952 if (r)
2953 return r;
a90ad3c2
ML
2954
2955 /* Resume IP prior to SMC */
5740682e
ML
2956 r = amdgpu_sriov_reinit_early(adev);
2957 if (r)
2958 goto error;
a90ad3c2
ML
2959
2960 /* we need recover gart prior to run SMC/CP/SDMA resume */
c1c7ce8f 2961 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
a90ad3c2
ML
2962
2963 /* now we are okay to resume SMC/CP/SDMA */
5740682e
ML
2964 r = amdgpu_sriov_reinit_late(adev);
2965 if (r)
2966 goto error;
a90ad3c2
ML
2967
2968 amdgpu_irq_gpu_reset_resume_helper(adev);
5740682e
ML
2969 r = amdgpu_ib_ring_tests(adev);
2970 if (r)
a90ad3c2
ML
2971 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2972
5740682e 2973error:
a90ad3c2
ML
2974 /* release full control of GPU after ib test */
2975 amdgpu_virt_release_full_gpu(adev, true);
2976
5740682e
ML
2977 if (reset_flags) {
2978 /* will get vram_lost from GIM in future, now all
2979 * reset request considered VRAM LOST
2980 */
2981 (*reset_flags) |= ~AMDGPU_RESET_INFO_VRAM_LOST;
2982 atomic_inc(&adev->vram_lost_counter);
a90ad3c2 2983
5740682e
ML
2984 /* VF FLR or hotlink reset is always full-reset */
2985 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
a90ad3c2
ML
2986 }
2987
2988 return r;
2989}
2990
d38ceaf9 2991/**
5740682e 2992 * amdgpu_gpu_recover - reset the asic and recover scheduler
d38ceaf9
AD
2993 *
2994 * @adev: amdgpu device pointer
5740682e 2995 * @job: which job trigger hang
d38ceaf9 2996 *
5740682e 2997 * Attempt to reset the GPU if it has hung (all asics).
d38ceaf9
AD
2998 * Returns 0 for success or an error on failure.
2999 */
5740682e 3000int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
d38ceaf9 3001{
4562236b 3002 struct drm_atomic_state *state = NULL;
5740682e
ML
3003 uint64_t reset_flags = 0;
3004 int i, r, resched;
fb140b29 3005
63fbf42f
CZ
3006 if (!amdgpu_check_soft_reset(adev)) {
3007 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
3008 return 0;
3009 }
d38ceaf9 3010
5740682e
ML
3011 dev_info(adev->dev, "GPU reset begin!\n");
3012
13a752e3 3013 mutex_lock(&adev->lock_reset);
d94aed5a 3014 atomic_inc(&adev->gpu_reset_counter);
13a752e3 3015 adev->in_gpu_reset = 1;
d38ceaf9 3016
a3c47d6b
CZ
3017 /* block TTM */
3018 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
4562236b
HW
3019 /* store modesetting */
3020 if (amdgpu_device_has_dc_support(adev))
3021 state = drm_atomic_helper_suspend(adev->ddev);
a3c47d6b 3022
0875dc9e
CZ
3023 /* block scheduler */
3024 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3025 struct amdgpu_ring *ring = adev->rings[i];
3026
51687759 3027 if (!ring || !ring->sched.thread)
0875dc9e 3028 continue;
5740682e
ML
3029
3030 /* only focus on the ring hit timeout if &job not NULL */
3031 if (job && job->ring->idx != i)
3032 continue;
3033
0875dc9e 3034 kthread_park(ring->sched.thread);
5740682e
ML
3035 amd_sched_hw_job_reset(&ring->sched, &job->base);
3036
2f9d4084
ML
3037 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3038 amdgpu_fence_driver_force_completion(ring);
0875dc9e 3039 }
d38ceaf9 3040
5740682e
ML
3041 if (amdgpu_sriov_vf(adev))
3042 r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true);
3043 else
3044 r = amdgpu_reset(adev, &reset_flags);
35d782fe 3045
d38ceaf9 3046 if (!r) {
5740682e
ML
3047 if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
3048 (reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) {
53cdccd5
CZ
3049 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
3050 struct amdgpu_bo *bo, *tmp;
f54d1867 3051 struct dma_fence *fence = NULL, *next = NULL;
53cdccd5
CZ
3052
3053 DRM_INFO("recover vram bo from shadow\n");
3054 mutex_lock(&adev->shadow_list_lock);
3055 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 3056 next = NULL;
53cdccd5
CZ
3057 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
3058 if (fence) {
f54d1867 3059 r = dma_fence_wait(fence, false);
53cdccd5 3060 if (r) {
1d7b17b0 3061 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5
CZ
3062 break;
3063 }
3064 }
1f465087 3065
f54d1867 3066 dma_fence_put(fence);
53cdccd5
CZ
3067 fence = next;
3068 }
3069 mutex_unlock(&adev->shadow_list_lock);
3070 if (fence) {
f54d1867 3071 r = dma_fence_wait(fence, false);
53cdccd5 3072 if (r)
1d7b17b0 3073 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5 3074 }
f54d1867 3075 dma_fence_put(fence);
53cdccd5 3076 }
5740682e 3077
d38ceaf9
AD
3078 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3079 struct amdgpu_ring *ring = adev->rings[i];
51687759
CZ
3080
3081 if (!ring || !ring->sched.thread)
d38ceaf9 3082 continue;
53cdccd5 3083
5740682e
ML
3084 /* only focus on the ring hit timeout if &job not NULL */
3085 if (job && job->ring->idx != i)
3086 continue;
3087
aa1c8900 3088 amd_sched_job_recovery(&ring->sched);
0875dc9e 3089 kthread_unpark(ring->sched.thread);
d38ceaf9 3090 }
d38ceaf9 3091 } else {
d38ceaf9 3092 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5740682e
ML
3093 struct amdgpu_ring *ring = adev->rings[i];
3094
3095 if (!ring || !ring->sched.thread)
3096 continue;
3097
3098 /* only focus on the ring hit timeout if &job not NULL */
3099 if (job && job->ring->idx != i)
3100 continue;
3101
3102 kthread_unpark(adev->rings[i]->sched.thread);
d38ceaf9
AD
3103 }
3104 }
3105
4562236b 3106 if (amdgpu_device_has_dc_support(adev)) {
5740682e
ML
3107 if (drm_atomic_helper_resume(adev->ddev, state))
3108 dev_info(adev->dev, "drm resume failed:%d\n", r);
4562236b 3109 amdgpu_dm_display_resume(adev);
5740682e 3110 } else {
4562236b 3111 drm_helper_resume_force_mode(adev->ddev);
5740682e 3112 }
d38ceaf9
AD
3113
3114 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
5740682e 3115
89041940 3116 if (r) {
d38ceaf9 3117 /* bad news, how to tell it to userspace ? */
5740682e
ML
3118 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3119 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3120 } else {
3121 dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
89041940 3122 }
d38ceaf9 3123
89041940 3124 amdgpu_vf_error_trans_all(adev);
13a752e3
ML
3125 adev->in_gpu_reset = 0;
3126 mutex_unlock(&adev->lock_reset);
d38ceaf9
AD
3127 return r;
3128}
3129
d0dd7f0c
AD
3130void amdgpu_get_pcie_info(struct amdgpu_device *adev)
3131{
3132 u32 mask;
3133 int ret;
3134
cd474ba0
AD
3135 if (amdgpu_pcie_gen_cap)
3136 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 3137
cd474ba0
AD
3138 if (amdgpu_pcie_lane_cap)
3139 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 3140
cd474ba0
AD
3141 /* covers APUs as well */
3142 if (pci_is_root_bus(adev->pdev->bus)) {
3143 if (adev->pm.pcie_gen_mask == 0)
3144 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3145 if (adev->pm.pcie_mlw_mask == 0)
3146 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 3147 return;
cd474ba0 3148 }
d0dd7f0c 3149
cd474ba0
AD
3150 if (adev->pm.pcie_gen_mask == 0) {
3151 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3152 if (!ret) {
3153 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3154 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3155 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3156
3157 if (mask & DRM_PCIE_SPEED_25)
3158 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3159 if (mask & DRM_PCIE_SPEED_50)
3160 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3161 if (mask & DRM_PCIE_SPEED_80)
3162 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3163 } else {
3164 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3165 }
3166 }
3167 if (adev->pm.pcie_mlw_mask == 0) {
3168 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3169 if (!ret) {
3170 switch (mask) {
3171 case 32:
3172 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3173 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3174 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3175 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3176 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3177 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3178 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3179 break;
3180 case 16:
3181 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3182 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3183 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3184 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3185 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3186 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3187 break;
3188 case 12:
3189 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3190 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3191 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3192 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3193 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3194 break;
3195 case 8:
3196 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3197 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3198 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3199 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3200 break;
3201 case 4:
3202 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3203 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3204 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3205 break;
3206 case 2:
3207 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3208 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3209 break;
3210 case 1:
3211 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3212 break;
3213 default:
3214 break;
3215 }
3216 } else {
3217 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
3218 }
3219 }
3220}
d38ceaf9
AD
3221
3222/*
3223 * Debugfs
3224 */
3225int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
06ab6832 3226 const struct drm_info_list *files,
d38ceaf9
AD
3227 unsigned nfiles)
3228{
3229 unsigned i;
3230
3231 for (i = 0; i < adev->debugfs_count; i++) {
3232 if (adev->debugfs[i].files == files) {
3233 /* Already registered */
3234 return 0;
3235 }
3236 }
3237
3238 i = adev->debugfs_count + 1;
3239 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3240 DRM_ERROR("Reached maximum number of debugfs components.\n");
3241 DRM_ERROR("Report so we increase "
3242 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3243 return -EINVAL;
3244 }
3245 adev->debugfs[adev->debugfs_count].files = files;
3246 adev->debugfs[adev->debugfs_count].num_files = nfiles;
3247 adev->debugfs_count = i;
3248#if defined(CONFIG_DEBUG_FS)
d38ceaf9
AD
3249 drm_debugfs_create_files(files, nfiles,
3250 adev->ddev->primary->debugfs_root,
3251 adev->ddev->primary);
3252#endif
3253 return 0;
3254}
3255
d38ceaf9
AD
3256#if defined(CONFIG_DEBUG_FS)
3257
3258static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3259 size_t size, loff_t *pos)
3260{
45063097 3261 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
3262 ssize_t result = 0;
3263 int r;
bd12267d 3264 bool pm_pg_lock, use_bank;
56628159 3265 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
3266
3267 if (size & 0x3 || *pos & 0x3)
3268 return -EINVAL;
3269
bd12267d
TSD
3270 /* are we reading registers for which a PG lock is necessary? */
3271 pm_pg_lock = (*pos >> 23) & 1;
3272
56628159 3273 if (*pos & (1ULL << 62)) {
0b968650
TSD
3274 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3275 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3276 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
32977f93
TSD
3277
3278 if (se_bank == 0x3FF)
3279 se_bank = 0xFFFFFFFF;
3280 if (sh_bank == 0x3FF)
3281 sh_bank = 0xFFFFFFFF;
3282 if (instance_bank == 0x3FF)
3283 instance_bank = 0xFFFFFFFF;
56628159 3284 use_bank = 1;
56628159
TSD
3285 } else {
3286 use_bank = 0;
3287 }
3288
801a6aa9 3289 *pos &= (1UL << 22) - 1;
bd12267d 3290
56628159 3291 if (use_bank) {
32977f93
TSD
3292 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3293 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
56628159
TSD
3294 return -EINVAL;
3295 mutex_lock(&adev->grbm_idx_mutex);
3296 amdgpu_gfx_select_se_sh(adev, se_bank,
3297 sh_bank, instance_bank);
3298 }
3299
bd12267d
TSD
3300 if (pm_pg_lock)
3301 mutex_lock(&adev->pm.mutex);
3302
d38ceaf9
AD
3303 while (size) {
3304 uint32_t value;
3305
3306 if (*pos > adev->rmmio_size)
56628159 3307 goto end;
d38ceaf9
AD
3308
3309 value = RREG32(*pos >> 2);
3310 r = put_user(value, (uint32_t *)buf);
56628159
TSD
3311 if (r) {
3312 result = r;
3313 goto end;
3314 }
d38ceaf9
AD
3315
3316 result += 4;
3317 buf += 4;
3318 *pos += 4;
3319 size -= 4;
3320 }
3321
56628159
TSD
3322end:
3323 if (use_bank) {
3324 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3325 mutex_unlock(&adev->grbm_idx_mutex);
3326 }
3327
bd12267d
TSD
3328 if (pm_pg_lock)
3329 mutex_unlock(&adev->pm.mutex);
3330
d38ceaf9
AD
3331 return result;
3332}
3333
3334static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3335 size_t size, loff_t *pos)
3336{
45063097 3337 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
3338 ssize_t result = 0;
3339 int r;
394fdde2
TSD
3340 bool pm_pg_lock, use_bank;
3341 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
3342
3343 if (size & 0x3 || *pos & 0x3)
3344 return -EINVAL;
3345
394fdde2
TSD
3346 /* are we reading registers for which a PG lock is necessary? */
3347 pm_pg_lock = (*pos >> 23) & 1;
3348
3349 if (*pos & (1ULL << 62)) {
0b968650
TSD
3350 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3351 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3352 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
394fdde2
TSD
3353
3354 if (se_bank == 0x3FF)
3355 se_bank = 0xFFFFFFFF;
3356 if (sh_bank == 0x3FF)
3357 sh_bank = 0xFFFFFFFF;
3358 if (instance_bank == 0x3FF)
3359 instance_bank = 0xFFFFFFFF;
3360 use_bank = 1;
3361 } else {
3362 use_bank = 0;
3363 }
3364
801a6aa9 3365 *pos &= (1UL << 22) - 1;
394fdde2
TSD
3366
3367 if (use_bank) {
3368 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3369 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3370 return -EINVAL;
3371 mutex_lock(&adev->grbm_idx_mutex);
3372 amdgpu_gfx_select_se_sh(adev, se_bank,
3373 sh_bank, instance_bank);
3374 }
3375
3376 if (pm_pg_lock)
3377 mutex_lock(&adev->pm.mutex);
3378
d38ceaf9
AD
3379 while (size) {
3380 uint32_t value;
3381
3382 if (*pos > adev->rmmio_size)
3383 return result;
3384
3385 r = get_user(value, (uint32_t *)buf);
3386 if (r)
3387 return r;
3388
3389 WREG32(*pos >> 2, value);
3390
3391 result += 4;
3392 buf += 4;
3393 *pos += 4;
3394 size -= 4;
3395 }
3396
394fdde2
TSD
3397 if (use_bank) {
3398 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3399 mutex_unlock(&adev->grbm_idx_mutex);
3400 }
3401
3402 if (pm_pg_lock)
3403 mutex_unlock(&adev->pm.mutex);
3404
d38ceaf9
AD
3405 return result;
3406}
3407
adcec288
TSD
3408static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3409 size_t size, loff_t *pos)
3410{
45063097 3411 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3412 ssize_t result = 0;
3413 int r;
3414
3415 if (size & 0x3 || *pos & 0x3)
3416 return -EINVAL;
3417
3418 while (size) {
3419 uint32_t value;
3420
3421 value = RREG32_PCIE(*pos >> 2);
3422 r = put_user(value, (uint32_t *)buf);
3423 if (r)
3424 return r;
3425
3426 result += 4;
3427 buf += 4;
3428 *pos += 4;
3429 size -= 4;
3430 }
3431
3432 return result;
3433}
3434
3435static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3436 size_t size, loff_t *pos)
3437{
45063097 3438 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3439 ssize_t result = 0;
3440 int r;
3441
3442 if (size & 0x3 || *pos & 0x3)
3443 return -EINVAL;
3444
3445 while (size) {
3446 uint32_t value;
3447
3448 r = get_user(value, (uint32_t *)buf);
3449 if (r)
3450 return r;
3451
3452 WREG32_PCIE(*pos >> 2, value);
3453
3454 result += 4;
3455 buf += 4;
3456 *pos += 4;
3457 size -= 4;
3458 }
3459
3460 return result;
3461}
3462
3463static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3464 size_t size, loff_t *pos)
3465{
45063097 3466 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3467 ssize_t result = 0;
3468 int r;
3469
3470 if (size & 0x3 || *pos & 0x3)
3471 return -EINVAL;
3472
3473 while (size) {
3474 uint32_t value;
3475
3476 value = RREG32_DIDT(*pos >> 2);
3477 r = put_user(value, (uint32_t *)buf);
3478 if (r)
3479 return r;
3480
3481 result += 4;
3482 buf += 4;
3483 *pos += 4;
3484 size -= 4;
3485 }
3486
3487 return result;
3488}
3489
3490static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3491 size_t size, loff_t *pos)
3492{
45063097 3493 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3494 ssize_t result = 0;
3495 int r;
3496
3497 if (size & 0x3 || *pos & 0x3)
3498 return -EINVAL;
3499
3500 while (size) {
3501 uint32_t value;
3502
3503 r = get_user(value, (uint32_t *)buf);
3504 if (r)
3505 return r;
3506
3507 WREG32_DIDT(*pos >> 2, value);
3508
3509 result += 4;
3510 buf += 4;
3511 *pos += 4;
3512 size -= 4;
3513 }
3514
3515 return result;
3516}
3517
3518static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3519 size_t size, loff_t *pos)
3520{
45063097 3521 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3522 ssize_t result = 0;
3523 int r;
3524
3525 if (size & 0x3 || *pos & 0x3)
3526 return -EINVAL;
3527
3528 while (size) {
3529 uint32_t value;
3530
6fc0deaf 3531 value = RREG32_SMC(*pos);
adcec288
TSD
3532 r = put_user(value, (uint32_t *)buf);
3533 if (r)
3534 return r;
3535
3536 result += 4;
3537 buf += 4;
3538 *pos += 4;
3539 size -= 4;
3540 }
3541
3542 return result;
3543}
3544
3545static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3546 size_t size, loff_t *pos)
3547{
45063097 3548 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3549 ssize_t result = 0;
3550 int r;
3551
3552 if (size & 0x3 || *pos & 0x3)
3553 return -EINVAL;
3554
3555 while (size) {
3556 uint32_t value;
3557
3558 r = get_user(value, (uint32_t *)buf);
3559 if (r)
3560 return r;
3561
6fc0deaf 3562 WREG32_SMC(*pos, value);
adcec288
TSD
3563
3564 result += 4;
3565 buf += 4;
3566 *pos += 4;
3567 size -= 4;
3568 }
3569
3570 return result;
3571}
3572
1e051413
TSD
3573static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3574 size_t size, loff_t *pos)
3575{
45063097 3576 struct amdgpu_device *adev = file_inode(f)->i_private;
1e051413
TSD
3577 ssize_t result = 0;
3578 int r;
3579 uint32_t *config, no_regs = 0;
3580
3581 if (size & 0x3 || *pos & 0x3)
3582 return -EINVAL;
3583
ecab7668 3584 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
1e051413
TSD
3585 if (!config)
3586 return -ENOMEM;
3587
3588 /* version, increment each time something is added */
9a999359 3589 config[no_regs++] = 3;
1e051413
TSD
3590 config[no_regs++] = adev->gfx.config.max_shader_engines;
3591 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3592 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3593 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3594 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3595 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3596 config[no_regs++] = adev->gfx.config.max_gprs;
3597 config[no_regs++] = adev->gfx.config.max_gs_threads;
3598 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3599 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3600 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3601 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3602 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3603 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3604 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3605 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3606 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3607 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3608 config[no_regs++] = adev->gfx.config.num_gpus;
3609 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3610 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3611 config[no_regs++] = adev->gfx.config.gb_addr_config;
3612 config[no_regs++] = adev->gfx.config.num_rbs;
3613
89a8f309
TSD
3614 /* rev==1 */
3615 config[no_regs++] = adev->rev_id;
3616 config[no_regs++] = adev->pg_flags;
3617 config[no_regs++] = adev->cg_flags;
3618
e9f11dc8
TSD
3619 /* rev==2 */
3620 config[no_regs++] = adev->family;
3621 config[no_regs++] = adev->external_rev_id;
3622
9a999359
TSD
3623 /* rev==3 */
3624 config[no_regs++] = adev->pdev->device;
3625 config[no_regs++] = adev->pdev->revision;
3626 config[no_regs++] = adev->pdev->subsystem_device;
3627 config[no_regs++] = adev->pdev->subsystem_vendor;
3628
1e051413
TSD
3629 while (size && (*pos < no_regs * 4)) {
3630 uint32_t value;
3631
3632 value = config[*pos >> 2];
3633 r = put_user(value, (uint32_t *)buf);
3634 if (r) {
3635 kfree(config);
3636 return r;
3637 }
3638
3639 result += 4;
3640 buf += 4;
3641 *pos += 4;
3642 size -= 4;
3643 }
3644
3645 kfree(config);
3646 return result;
3647}
3648
f2cdaf20
TSD
3649static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3650 size_t size, loff_t *pos)
3651{
45063097 3652 struct amdgpu_device *adev = file_inode(f)->i_private;
9f8df7d7
TSD
3653 int idx, x, outsize, r, valuesize;
3654 uint32_t values[16];
f2cdaf20 3655
9f8df7d7 3656 if (size & 3 || *pos & 0x3)
f2cdaf20
TSD
3657 return -EINVAL;
3658
3cbc614f
SP
3659 if (amdgpu_dpm == 0)
3660 return -EINVAL;
3661
f2cdaf20
TSD
3662 /* convert offset to sensor number */
3663 idx = *pos >> 2;
3664
9f8df7d7 3665 valuesize = sizeof(values);
f2cdaf20 3666 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
cd4d7464 3667 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
f2cdaf20
TSD
3668 else
3669 return -EINVAL;
3670
9f8df7d7
TSD
3671 if (size > valuesize)
3672 return -EINVAL;
3673
3674 outsize = 0;
3675 x = 0;
3676 if (!r) {
3677 while (size) {
3678 r = put_user(values[x++], (int32_t *)buf);
3679 buf += 4;
3680 size -= 4;
3681 outsize += 4;
3682 }
3683 }
f2cdaf20 3684
9f8df7d7 3685 return !r ? outsize : r;
f2cdaf20 3686}
1e051413 3687
273d7aa1
TSD
3688static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3689 size_t size, loff_t *pos)
3690{
3691 struct amdgpu_device *adev = f->f_inode->i_private;
3692 int r, x;
3693 ssize_t result=0;
472259f0 3694 uint32_t offset, se, sh, cu, wave, simd, data[32];
273d7aa1
TSD
3695
3696 if (size & 3 || *pos & 3)
3697 return -EINVAL;
3698
3699 /* decode offset */
0b968650
TSD
3700 offset = (*pos & GENMASK_ULL(6, 0));
3701 se = (*pos & GENMASK_ULL(14, 7)) >> 7;
3702 sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
3703 cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
3704 wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
3705 simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
273d7aa1
TSD
3706
3707 /* switch to the specific se/sh/cu */
3708 mutex_lock(&adev->grbm_idx_mutex);
3709 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3710
3711 x = 0;
472259f0
TSD
3712 if (adev->gfx.funcs->read_wave_data)
3713 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
273d7aa1
TSD
3714
3715 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3716 mutex_unlock(&adev->grbm_idx_mutex);
3717
5ecfb3b8
TSD
3718 if (!x)
3719 return -EINVAL;
3720
472259f0 3721 while (size && (offset < x * 4)) {
273d7aa1
TSD
3722 uint32_t value;
3723
472259f0 3724 value = data[offset >> 2];
273d7aa1
TSD
3725 r = put_user(value, (uint32_t *)buf);
3726 if (r)
3727 return r;
3728
3729 result += 4;
3730 buf += 4;
472259f0 3731 offset += 4;
273d7aa1
TSD
3732 size -= 4;
3733 }
3734
3735 return result;
3736}
3737
c5a60ce8
TSD
3738static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3739 size_t size, loff_t *pos)
3740{
3741 struct amdgpu_device *adev = f->f_inode->i_private;
3742 int r;
3743 ssize_t result = 0;
3744 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3745
3746 if (size & 3 || *pos & 3)
3747 return -EINVAL;
3748
3749 /* decode offset */
0b968650
TSD
3750 offset = *pos & GENMASK_ULL(11, 0);
3751 se = (*pos & GENMASK_ULL(19, 12)) >> 12;
3752 sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
3753 cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
3754 wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
3755 simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
3756 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
3757 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
c5a60ce8
TSD
3758
3759 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3760 if (!data)
3761 return -ENOMEM;
3762
3763 /* switch to the specific se/sh/cu */
3764 mutex_lock(&adev->grbm_idx_mutex);
3765 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3766
3767 if (bank == 0) {
3768 if (adev->gfx.funcs->read_wave_vgprs)
3769 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3770 } else {
3771 if (adev->gfx.funcs->read_wave_sgprs)
3772 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3773 }
3774
3775 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3776 mutex_unlock(&adev->grbm_idx_mutex);
3777
3778 while (size) {
3779 uint32_t value;
3780
3781 value = data[offset++];
3782 r = put_user(value, (uint32_t *)buf);
3783 if (r) {
3784 result = r;
3785 goto err;
3786 }
3787
3788 result += 4;
3789 buf += 4;
3790 size -= 4;
3791 }
3792
3793err:
3794 kfree(data);
3795 return result;
3796}
3797
d38ceaf9
AD
3798static const struct file_operations amdgpu_debugfs_regs_fops = {
3799 .owner = THIS_MODULE,
3800 .read = amdgpu_debugfs_regs_read,
3801 .write = amdgpu_debugfs_regs_write,
3802 .llseek = default_llseek
3803};
adcec288
TSD
3804static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3805 .owner = THIS_MODULE,
3806 .read = amdgpu_debugfs_regs_didt_read,
3807 .write = amdgpu_debugfs_regs_didt_write,
3808 .llseek = default_llseek
3809};
3810static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3811 .owner = THIS_MODULE,
3812 .read = amdgpu_debugfs_regs_pcie_read,
3813 .write = amdgpu_debugfs_regs_pcie_write,
3814 .llseek = default_llseek
3815};
3816static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3817 .owner = THIS_MODULE,
3818 .read = amdgpu_debugfs_regs_smc_read,
3819 .write = amdgpu_debugfs_regs_smc_write,
3820 .llseek = default_llseek
3821};
3822
1e051413
TSD
3823static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3824 .owner = THIS_MODULE,
3825 .read = amdgpu_debugfs_gca_config_read,
3826 .llseek = default_llseek
3827};
3828
f2cdaf20
TSD
3829static const struct file_operations amdgpu_debugfs_sensors_fops = {
3830 .owner = THIS_MODULE,
3831 .read = amdgpu_debugfs_sensor_read,
3832 .llseek = default_llseek
3833};
3834
273d7aa1
TSD
3835static const struct file_operations amdgpu_debugfs_wave_fops = {
3836 .owner = THIS_MODULE,
3837 .read = amdgpu_debugfs_wave_read,
3838 .llseek = default_llseek
3839};
c5a60ce8
TSD
3840static const struct file_operations amdgpu_debugfs_gpr_fops = {
3841 .owner = THIS_MODULE,
3842 .read = amdgpu_debugfs_gpr_read,
3843 .llseek = default_llseek
3844};
273d7aa1 3845
adcec288
TSD
3846static const struct file_operations *debugfs_regs[] = {
3847 &amdgpu_debugfs_regs_fops,
3848 &amdgpu_debugfs_regs_didt_fops,
3849 &amdgpu_debugfs_regs_pcie_fops,
3850 &amdgpu_debugfs_regs_smc_fops,
1e051413 3851 &amdgpu_debugfs_gca_config_fops,
f2cdaf20 3852 &amdgpu_debugfs_sensors_fops,
273d7aa1 3853 &amdgpu_debugfs_wave_fops,
c5a60ce8 3854 &amdgpu_debugfs_gpr_fops,
adcec288
TSD
3855};
3856
3857static const char *debugfs_regs_names[] = {
3858 "amdgpu_regs",
3859 "amdgpu_regs_didt",
3860 "amdgpu_regs_pcie",
3861 "amdgpu_regs_smc",
1e051413 3862 "amdgpu_gca_config",
f2cdaf20 3863 "amdgpu_sensors",
273d7aa1 3864 "amdgpu_wave",
c5a60ce8 3865 "amdgpu_gpr",
adcec288 3866};
d38ceaf9
AD
3867
3868static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3869{
3870 struct drm_minor *minor = adev->ddev->primary;
3871 struct dentry *ent, *root = minor->debugfs_root;
adcec288
TSD
3872 unsigned i, j;
3873
3874 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3875 ent = debugfs_create_file(debugfs_regs_names[i],
3876 S_IFREG | S_IRUGO, root,
3877 adev, debugfs_regs[i]);
3878 if (IS_ERR(ent)) {
3879 for (j = 0; j < i; j++) {
3880 debugfs_remove(adev->debugfs_regs[i]);
3881 adev->debugfs_regs[i] = NULL;
3882 }
3883 return PTR_ERR(ent);
3884 }
d38ceaf9 3885
adcec288
TSD
3886 if (!i)
3887 i_size_write(ent->d_inode, adev->rmmio_size);
3888 adev->debugfs_regs[i] = ent;
3889 }
d38ceaf9
AD
3890
3891 return 0;
3892}
3893
3894static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3895{
adcec288
TSD
3896 unsigned i;
3897
3898 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3899 if (adev->debugfs_regs[i]) {
3900 debugfs_remove(adev->debugfs_regs[i]);
3901 adev->debugfs_regs[i] = NULL;
3902 }
3903 }
d38ceaf9
AD
3904}
3905
4f0955fc
HR
3906static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3907{
3908 struct drm_info_node *node = (struct drm_info_node *) m->private;
3909 struct drm_device *dev = node->minor->dev;
3910 struct amdgpu_device *adev = dev->dev_private;
3911 int r = 0, i;
3912
3913 /* hold on the scheduler */
3914 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3915 struct amdgpu_ring *ring = adev->rings[i];
3916
3917 if (!ring || !ring->sched.thread)
3918 continue;
3919 kthread_park(ring->sched.thread);
3920 }
3921
3922 seq_printf(m, "run ib test:\n");
3923 r = amdgpu_ib_ring_tests(adev);
3924 if (r)
3925 seq_printf(m, "ib ring tests failed (%d).\n", r);
3926 else
3927 seq_printf(m, "ib ring tests passed.\n");
3928
3929 /* go on the scheduler */
3930 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3931 struct amdgpu_ring *ring = adev->rings[i];
3932
3933 if (!ring || !ring->sched.thread)
3934 continue;
3935 kthread_unpark(ring->sched.thread);
3936 }
3937
3938 return 0;
3939}
3940
3941static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
3942 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
3943};
3944
3945static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3946{
3947 return amdgpu_debugfs_add_files(adev,
3948 amdgpu_debugfs_test_ib_ring_list, 1);
3949}
3950
d38ceaf9
AD
3951int amdgpu_debugfs_init(struct drm_minor *minor)
3952{
3953 return 0;
3954}
db95e218
KR
3955
3956static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
3957{
3958 struct drm_info_node *node = (struct drm_info_node *) m->private;
3959 struct drm_device *dev = node->minor->dev;
3960 struct amdgpu_device *adev = dev->dev_private;
3961
3962 seq_write(m, adev->bios, adev->bios_size);
3963 return 0;
3964}
3965
db95e218
KR
3966static const struct drm_info_list amdgpu_vbios_dump_list[] = {
3967 {"amdgpu_vbios",
3968 amdgpu_debugfs_get_vbios_dump,
3969 0, NULL},
3970};
3971
db95e218
KR
3972static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3973{
3974 return amdgpu_debugfs_add_files(adev,
3975 amdgpu_vbios_dump_list, 1);
3976}
7cebc728 3977#else
27bad5b9 3978static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
4f0955fc
HR
3979{
3980 return 0;
3981}
7cebc728
AK
3982static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3983{
3984 return 0;
3985}
db95e218
KR
3986static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3987{
3988 return 0;
3989}
7cebc728 3990static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
d38ceaf9 3991#endif