drm/amdgpu/virt: implement wait_reset callbacks for vi/ai
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
4562236b 34#include <drm/drm_atomic_helper.h>
d38ceaf9
AD
35#include <drm/amdgpu_drm.h>
36#include <linux/vgaarb.h>
37#include <linux/vga_switcheroo.h>
38#include <linux/efi.h>
39#include "amdgpu.h"
f4b373f4 40#include "amdgpu_trace.h"
d38ceaf9
AD
41#include "amdgpu_i2c.h"
42#include "atom.h"
43#include "amdgpu_atombios.h"
a5bde2f9 44#include "amdgpu_atomfirmware.h"
d0dd7f0c 45#include "amd_pcie.h"
33f34802
KW
46#ifdef CONFIG_DRM_AMDGPU_SI
47#include "si.h"
48#endif
a2e73f56
AD
49#ifdef CONFIG_DRM_AMDGPU_CIK
50#include "cik.h"
51#endif
aaa36a97 52#include "vi.h"
460826e6 53#include "soc15.h"
d38ceaf9 54#include "bif/bif_4_1_d.h"
9accf2fd 55#include <linux/pci.h>
bec86378 56#include <linux/firmware.h>
89041940 57#include "amdgpu_vf_error.h"
d38ceaf9 58
ba997709 59#include "amdgpu_amdkfd.h"
d2f52ac8 60#include "amdgpu_pm.h"
d38ceaf9 61
e2a75f88 62MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
2d2e5e7e 63MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
e2a75f88 64
2dc80b00
S
65#define AMDGPU_RESUME_MS 2000
66
d38ceaf9
AD
67static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
68static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
4f0955fc 69static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
db95e218 70static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
d38ceaf9
AD
71
72static const char *amdgpu_asic_name[] = {
da69c161
KW
73 "TAHITI",
74 "PITCAIRN",
75 "VERDE",
76 "OLAND",
77 "HAINAN",
d38ceaf9
AD
78 "BONAIRE",
79 "KAVERI",
80 "KABINI",
81 "HAWAII",
82 "MULLINS",
83 "TOPAZ",
84 "TONGA",
48299f95 85 "FIJI",
d38ceaf9 86 "CARRIZO",
139f4917 87 "STONEY",
2cc0c0b5
FC
88 "POLARIS10",
89 "POLARIS11",
c4642a47 90 "POLARIS12",
d4196f01 91 "VEGA10",
2ca8a5d2 92 "RAVEN",
d38ceaf9
AD
93 "LAST",
94};
95
96bool amdgpu_device_is_px(struct drm_device *dev)
97{
98 struct amdgpu_device *adev = dev->dev_private;
99
2f7d10b3 100 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
101 return true;
102 return false;
103}
104
105/*
106 * MMIO register access helper functions.
107 */
108uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 109 uint32_t acc_flags)
d38ceaf9 110{
f4b373f4
TSD
111 uint32_t ret;
112
43ca8efa 113 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 114 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 115
15d72fd7 116 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 117 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
118 else {
119 unsigned long flags;
d38ceaf9
AD
120
121 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
122 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
123 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
124 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 125 }
f4b373f4
TSD
126 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
127 return ret;
d38ceaf9
AD
128}
129
130void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 131 uint32_t acc_flags)
d38ceaf9 132{
f4b373f4 133 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 134
47ed4e1c
KW
135 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
136 adev->last_mm_index = v;
137 }
138
43ca8efa 139 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 140 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 141
15d72fd7 142 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
143 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
144 else {
145 unsigned long flags;
146
147 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
148 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
149 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
150 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
151 }
47ed4e1c
KW
152
153 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
154 udelay(500);
155 }
d38ceaf9
AD
156}
157
158u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
159{
160 if ((reg * 4) < adev->rio_mem_size)
161 return ioread32(adev->rio_mem + (reg * 4));
162 else {
163 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
164 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
165 }
166}
167
168void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
169{
47ed4e1c
KW
170 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
171 adev->last_mm_index = v;
172 }
d38ceaf9
AD
173
174 if ((reg * 4) < adev->rio_mem_size)
175 iowrite32(v, adev->rio_mem + (reg * 4));
176 else {
177 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
178 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
179 }
47ed4e1c
KW
180
181 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
182 udelay(500);
183 }
d38ceaf9
AD
184}
185
186/**
187 * amdgpu_mm_rdoorbell - read a doorbell dword
188 *
189 * @adev: amdgpu_device pointer
190 * @index: doorbell index
191 *
192 * Returns the value in the doorbell aperture at the
193 * requested doorbell index (CIK).
194 */
195u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
196{
197 if (index < adev->doorbell.num_doorbells) {
198 return readl(adev->doorbell.ptr + index);
199 } else {
200 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
201 return 0;
202 }
203}
204
205/**
206 * amdgpu_mm_wdoorbell - write a doorbell dword
207 *
208 * @adev: amdgpu_device pointer
209 * @index: doorbell index
210 * @v: value to write
211 *
212 * Writes @v to the doorbell aperture at the
213 * requested doorbell index (CIK).
214 */
215void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
216{
217 if (index < adev->doorbell.num_doorbells) {
218 writel(v, adev->doorbell.ptr + index);
219 } else {
220 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
221 }
222}
223
832be404
KW
224/**
225 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
226 *
227 * @adev: amdgpu_device pointer
228 * @index: doorbell index
229 *
230 * Returns the value in the doorbell aperture at the
231 * requested doorbell index (VEGA10+).
232 */
233u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
234{
235 if (index < adev->doorbell.num_doorbells) {
236 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
237 } else {
238 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
239 return 0;
240 }
241}
242
243/**
244 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
245 *
246 * @adev: amdgpu_device pointer
247 * @index: doorbell index
248 * @v: value to write
249 *
250 * Writes @v to the doorbell aperture at the
251 * requested doorbell index (VEGA10+).
252 */
253void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
254{
255 if (index < adev->doorbell.num_doorbells) {
256 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
257 } else {
258 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
259 }
260}
261
d38ceaf9
AD
262/**
263 * amdgpu_invalid_rreg - dummy reg read function
264 *
265 * @adev: amdgpu device pointer
266 * @reg: offset of register
267 *
268 * Dummy register read function. Used for register blocks
269 * that certain asics don't have (all asics).
270 * Returns the value in the register.
271 */
272static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
273{
274 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
275 BUG();
276 return 0;
277}
278
279/**
280 * amdgpu_invalid_wreg - dummy reg write function
281 *
282 * @adev: amdgpu device pointer
283 * @reg: offset of register
284 * @v: value to write to the register
285 *
286 * Dummy register read function. Used for register blocks
287 * that certain asics don't have (all asics).
288 */
289static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
290{
291 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
292 reg, v);
293 BUG();
294}
295
296/**
297 * amdgpu_block_invalid_rreg - dummy reg read function
298 *
299 * @adev: amdgpu device pointer
300 * @block: offset of instance
301 * @reg: offset of register
302 *
303 * Dummy register read function. Used for register blocks
304 * that certain asics don't have (all asics).
305 * Returns the value in the register.
306 */
307static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
308 uint32_t block, uint32_t reg)
309{
310 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
311 reg, block);
312 BUG();
313 return 0;
314}
315
316/**
317 * amdgpu_block_invalid_wreg - dummy reg write function
318 *
319 * @adev: amdgpu device pointer
320 * @block: offset of instance
321 * @reg: offset of register
322 * @v: value to write to the register
323 *
324 * Dummy register read function. Used for register blocks
325 * that certain asics don't have (all asics).
326 */
327static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
328 uint32_t block,
329 uint32_t reg, uint32_t v)
330{
331 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
332 reg, block, v);
333 BUG();
334}
335
336static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
337{
a4a02777
CK
338 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
339 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
340 &adev->vram_scratch.robj,
341 &adev->vram_scratch.gpu_addr,
342 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
343}
344
345static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
346{
078af1a3 347 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
348}
349
350/**
351 * amdgpu_program_register_sequence - program an array of registers.
352 *
353 * @adev: amdgpu_device pointer
354 * @registers: pointer to the register array
355 * @array_size: size of the register array
356 *
357 * Programs an array or registers with and and or masks.
358 * This is a helper for setting golden registers.
359 */
360void amdgpu_program_register_sequence(struct amdgpu_device *adev,
361 const u32 *registers,
362 const u32 array_size)
363{
364 u32 tmp, reg, and_mask, or_mask;
365 int i;
366
367 if (array_size % 3)
368 return;
369
370 for (i = 0; i < array_size; i +=3) {
371 reg = registers[i + 0];
372 and_mask = registers[i + 1];
373 or_mask = registers[i + 2];
374
375 if (and_mask == 0xffffffff) {
376 tmp = or_mask;
377 } else {
378 tmp = RREG32(reg);
379 tmp &= ~and_mask;
380 tmp |= or_mask;
381 }
382 WREG32(reg, tmp);
383 }
384}
385
386void amdgpu_pci_config_reset(struct amdgpu_device *adev)
387{
388 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
389}
390
391/*
392 * GPU doorbell aperture helpers function.
393 */
394/**
395 * amdgpu_doorbell_init - Init doorbell driver information.
396 *
397 * @adev: amdgpu_device pointer
398 *
399 * Init doorbell driver information (CIK)
400 * Returns 0 on success, error on failure.
401 */
402static int amdgpu_doorbell_init(struct amdgpu_device *adev)
403{
705e519e
CK
404 /* No doorbell on SI hardware generation */
405 if (adev->asic_type < CHIP_BONAIRE) {
406 adev->doorbell.base = 0;
407 adev->doorbell.size = 0;
408 adev->doorbell.num_doorbells = 0;
409 adev->doorbell.ptr = NULL;
410 return 0;
411 }
412
d38ceaf9
AD
413 /* doorbell bar mapping */
414 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
415 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
416
edf600da 417 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
418 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
419 if (adev->doorbell.num_doorbells == 0)
420 return -EINVAL;
421
8972e5d2
CK
422 adev->doorbell.ptr = ioremap(adev->doorbell.base,
423 adev->doorbell.num_doorbells *
424 sizeof(u32));
425 if (adev->doorbell.ptr == NULL)
d38ceaf9 426 return -ENOMEM;
d38ceaf9
AD
427
428 return 0;
429}
430
431/**
432 * amdgpu_doorbell_fini - Tear down doorbell driver information.
433 *
434 * @adev: amdgpu_device pointer
435 *
436 * Tear down doorbell driver information (CIK)
437 */
438static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
439{
440 iounmap(adev->doorbell.ptr);
441 adev->doorbell.ptr = NULL;
442}
443
444/**
445 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
446 * setup amdkfd
447 *
448 * @adev: amdgpu_device pointer
449 * @aperture_base: output returning doorbell aperture base physical address
450 * @aperture_size: output returning doorbell aperture size in bytes
451 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
452 *
453 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
454 * takes doorbells required for its own rings and reports the setup to amdkfd.
455 * amdgpu reserved doorbells are at the start of the doorbell aperture.
456 */
457void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
458 phys_addr_t *aperture_base,
459 size_t *aperture_size,
460 size_t *start_offset)
461{
462 /*
463 * The first num_doorbells are used by amdgpu.
464 * amdkfd takes whatever's left in the aperture.
465 */
466 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
467 *aperture_base = adev->doorbell.base;
468 *aperture_size = adev->doorbell.size;
469 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
470 } else {
471 *aperture_base = 0;
472 *aperture_size = 0;
473 *start_offset = 0;
474 }
475}
476
477/*
478 * amdgpu_wb_*()
455a7bc2 479 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 480 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
481 */
482
483/**
484 * amdgpu_wb_fini - Disable Writeback and free memory
485 *
486 * @adev: amdgpu_device pointer
487 *
488 * Disables Writeback and frees the Writeback memory (all asics).
489 * Used at driver shutdown.
490 */
491static void amdgpu_wb_fini(struct amdgpu_device *adev)
492{
493 if (adev->wb.wb_obj) {
a76ed485
AD
494 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
495 &adev->wb.gpu_addr,
496 (void **)&adev->wb.wb);
d38ceaf9
AD
497 adev->wb.wb_obj = NULL;
498 }
499}
500
501/**
502 * amdgpu_wb_init- Init Writeback driver info and allocate memory
503 *
504 * @adev: amdgpu_device pointer
505 *
455a7bc2 506 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
507 * Used at driver startup.
508 * Returns 0 on success or an -error on failure.
509 */
510static int amdgpu_wb_init(struct amdgpu_device *adev)
511{
512 int r;
513
514 if (adev->wb.wb_obj == NULL) {
97407b63
AD
515 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
516 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
517 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
518 &adev->wb.wb_obj, &adev->wb.gpu_addr,
519 (void **)&adev->wb.wb);
d38ceaf9
AD
520 if (r) {
521 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
522 return r;
523 }
d38ceaf9
AD
524
525 adev->wb.num_wb = AMDGPU_MAX_WB;
526 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
527
528 /* clear wb memory */
60a970a6 529 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
d38ceaf9
AD
530 }
531
532 return 0;
533}
534
535/**
536 * amdgpu_wb_get - Allocate a wb entry
537 *
538 * @adev: amdgpu_device pointer
539 * @wb: wb index
540 *
541 * Allocate a wb slot for use by the driver (all asics).
542 * Returns 0 on success or -EINVAL on failure.
543 */
544int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
545{
546 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 547
97407b63 548 if (offset < adev->wb.num_wb) {
7014285a 549 __set_bit(offset, adev->wb.used);
63ae07ca 550 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
551 return 0;
552 } else {
553 return -EINVAL;
554 }
555}
556
d38ceaf9
AD
557/**
558 * amdgpu_wb_free - Free a wb entry
559 *
560 * @adev: amdgpu_device pointer
561 * @wb: wb index
562 *
563 * Free a wb slot allocated for use by the driver (all asics)
564 */
565void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
566{
567 if (wb < adev->wb.num_wb)
63ae07ca 568 __clear_bit(wb >> 3, adev->wb.used);
d38ceaf9
AD
569}
570
571/**
572 * amdgpu_vram_location - try to find VRAM location
573 * @adev: amdgpu device structure holding all necessary informations
574 * @mc: memory controller structure holding memory informations
575 * @base: base address at which to put VRAM
576 *
455a7bc2 577 * Function will try to place VRAM at base address provided
d38ceaf9
AD
578 * as parameter (which is so far either PCI aperture address or
579 * for IGP TOM base address).
580 *
581 * If there is not enough space to fit the unvisible VRAM in the 32bits
582 * address space then we limit the VRAM size to the aperture.
583 *
584 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
585 * this shouldn't be a problem as we are using the PCI aperture as a reference.
586 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
587 * not IGP.
588 *
589 * Note: we use mc_vram_size as on some board we need to program the mc to
590 * cover the whole aperture even if VRAM size is inferior to aperture size
591 * Novell bug 204882 + along with lots of ubuntu ones
592 *
593 * Note: when limiting vram it's safe to overwritte real_vram_size because
594 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
595 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
596 * ones)
597 *
598 * Note: IGP TOM addr should be the same as the aperture addr, we don't
455a7bc2 599 * explicitly check for that though.
d38ceaf9
AD
600 *
601 * FIXME: when reducing VRAM size align new size on power of 2.
602 */
603void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
604{
605 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
606
607 mc->vram_start = base;
608 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
609 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
610 mc->real_vram_size = mc->aper_size;
611 mc->mc_vram_size = mc->aper_size;
612 }
613 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
614 if (limit && limit < mc->real_vram_size)
615 mc->real_vram_size = limit;
616 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
617 mc->mc_vram_size >> 20, mc->vram_start,
618 mc->vram_end, mc->real_vram_size >> 20);
619}
620
621/**
6f02a696 622 * amdgpu_gart_location - try to find GTT location
d38ceaf9
AD
623 * @adev: amdgpu device structure holding all necessary informations
624 * @mc: memory controller structure holding memory informations
625 *
626 * Function will place try to place GTT before or after VRAM.
627 *
628 * If GTT size is bigger than space left then we ajust GTT size.
629 * Thus function will never fails.
630 *
631 * FIXME: when reducing GTT size align new size on power of 2.
632 */
6f02a696 633void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
d38ceaf9
AD
634{
635 u64 size_af, size_bf;
636
ed21c047
CK
637 size_af = adev->mc.mc_mask - mc->vram_end;
638 size_bf = mc->vram_start;
d38ceaf9 639 if (size_bf > size_af) {
6f02a696 640 if (mc->gart_size > size_bf) {
d38ceaf9 641 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 642 mc->gart_size = size_bf;
d38ceaf9 643 }
6f02a696 644 mc->gart_start = 0;
d38ceaf9 645 } else {
6f02a696 646 if (mc->gart_size > size_af) {
d38ceaf9 647 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 648 mc->gart_size = size_af;
d38ceaf9 649 }
6f02a696 650 mc->gart_start = mc->vram_end + 1;
d38ceaf9 651 }
6f02a696 652 mc->gart_end = mc->gart_start + mc->gart_size - 1;
d38ceaf9 653 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6f02a696 654 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
d38ceaf9
AD
655}
656
a05502e5
HC
657/*
658 * Firmware Reservation functions
659 */
660/**
661 * amdgpu_fw_reserve_vram_fini - free fw reserved vram
662 *
663 * @adev: amdgpu_device pointer
664 *
665 * free fw reserved vram if it has been reserved.
666 */
667void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
668{
669 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
670 NULL, &adev->fw_vram_usage.va);
671}
672
673/**
674 * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
675 *
676 * @adev: amdgpu_device pointer
677 *
678 * create bo vram reservation from fw.
679 */
680int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
681{
682 int r = 0;
683 u64 gpu_addr;
684 u64 vram_size = adev->mc.visible_vram_size;
685
686 adev->fw_vram_usage.va = NULL;
687 adev->fw_vram_usage.reserved_bo = NULL;
688
689 if (adev->fw_vram_usage.size > 0 &&
690 adev->fw_vram_usage.size <= vram_size) {
691
692 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
693 PAGE_SIZE, true, 0,
694 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
695 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
696 &adev->fw_vram_usage.reserved_bo);
697 if (r)
698 goto error_create;
699
700 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
701 if (r)
702 goto error_reserve;
703 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
704 AMDGPU_GEM_DOMAIN_VRAM,
705 adev->fw_vram_usage.start_offset,
706 (adev->fw_vram_usage.start_offset +
707 adev->fw_vram_usage.size), &gpu_addr);
708 if (r)
709 goto error_pin;
710 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
711 &adev->fw_vram_usage.va);
712 if (r)
713 goto error_kmap;
714
715 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
716 }
717 return r;
718
719error_kmap:
720 amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
721error_pin:
722 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
723error_reserve:
724 amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
725error_create:
726 adev->fw_vram_usage.va = NULL;
727 adev->fw_vram_usage.reserved_bo = NULL;
728 return r;
729}
730
731
d38ceaf9
AD
732/*
733 * GPU helpers function.
734 */
735/**
c836fec5 736 * amdgpu_need_post - check if the hw need post or not
d38ceaf9
AD
737 *
738 * @adev: amdgpu_device pointer
739 *
c836fec5
JQ
740 * Check if the asic has been initialized (all asics) at driver startup
741 * or post is needed if hw reset is performed.
742 * Returns true if need or false if not.
d38ceaf9 743 */
c836fec5 744bool amdgpu_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
745{
746 uint32_t reg;
747
bec86378
ML
748 if (amdgpu_sriov_vf(adev))
749 return false;
750
751 if (amdgpu_passthrough(adev)) {
1da2c326
ML
752 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
753 * some old smc fw still need driver do vPost otherwise gpu hang, while
754 * those smc fw version above 22.15 doesn't have this flaw, so we force
755 * vpost executed for smc version below 22.15
bec86378
ML
756 */
757 if (adev->asic_type == CHIP_FIJI) {
758 int err;
759 uint32_t fw_ver;
760 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
761 /* force vPost if error occured */
762 if (err)
763 return true;
764
765 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
766 if (fw_ver < 0x00160e00)
767 return true;
bec86378 768 }
bec86378 769 }
91fe77eb 770
771 if (adev->has_hw_reset) {
772 adev->has_hw_reset = false;
773 return true;
774 }
775
776 /* bios scratch used on CIK+ */
777 if (adev->asic_type >= CHIP_BONAIRE)
778 return amdgpu_atombios_scratch_need_asic_init(adev);
779
780 /* check MEM_SIZE for older asics */
781 reg = amdgpu_asic_get_config_memsize(adev);
782
783 if ((reg != 0) && (reg != 0xffffffff))
784 return false;
785
786 return true;
bec86378
ML
787}
788
d38ceaf9
AD
789/**
790 * amdgpu_dummy_page_init - init dummy page used by the driver
791 *
792 * @adev: amdgpu_device pointer
793 *
794 * Allocate the dummy page used by the driver (all asics).
795 * This dummy page is used by the driver as a filler for gart entries
796 * when pages are taken out of the GART
797 * Returns 0 on sucess, -ENOMEM on failure.
798 */
799int amdgpu_dummy_page_init(struct amdgpu_device *adev)
800{
801 if (adev->dummy_page.page)
802 return 0;
803 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
804 if (adev->dummy_page.page == NULL)
805 return -ENOMEM;
806 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
807 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
808 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
809 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
810 __free_page(adev->dummy_page.page);
811 adev->dummy_page.page = NULL;
812 return -ENOMEM;
813 }
814 return 0;
815}
816
817/**
818 * amdgpu_dummy_page_fini - free dummy page used by the driver
819 *
820 * @adev: amdgpu_device pointer
821 *
822 * Frees the dummy page used by the driver (all asics).
823 */
824void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
825{
826 if (adev->dummy_page.page == NULL)
827 return;
828 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
829 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
830 __free_page(adev->dummy_page.page);
831 adev->dummy_page.page = NULL;
832}
833
834
835/* ATOM accessor methods */
836/*
837 * ATOM is an interpreted byte code stored in tables in the vbios. The
838 * driver registers callbacks to access registers and the interpreter
839 * in the driver parses the tables and executes then to program specific
840 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
841 * atombios.h, and atom.c
842 */
843
844/**
845 * cail_pll_read - read PLL register
846 *
847 * @info: atom card_info pointer
848 * @reg: PLL register offset
849 *
850 * Provides a PLL register accessor for the atom interpreter (r4xx+).
851 * Returns the value of the PLL register.
852 */
853static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
854{
855 return 0;
856}
857
858/**
859 * cail_pll_write - write PLL register
860 *
861 * @info: atom card_info pointer
862 * @reg: PLL register offset
863 * @val: value to write to the pll register
864 *
865 * Provides a PLL register accessor for the atom interpreter (r4xx+).
866 */
867static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
868{
869
870}
871
872/**
873 * cail_mc_read - read MC (Memory Controller) register
874 *
875 * @info: atom card_info pointer
876 * @reg: MC register offset
877 *
878 * Provides an MC register accessor for the atom interpreter (r4xx+).
879 * Returns the value of the MC register.
880 */
881static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
882{
883 return 0;
884}
885
886/**
887 * cail_mc_write - write MC (Memory Controller) register
888 *
889 * @info: atom card_info pointer
890 * @reg: MC register offset
891 * @val: value to write to the pll register
892 *
893 * Provides a MC register accessor for the atom interpreter (r4xx+).
894 */
895static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
896{
897
898}
899
900/**
901 * cail_reg_write - write MMIO register
902 *
903 * @info: atom card_info pointer
904 * @reg: MMIO register offset
905 * @val: value to write to the pll register
906 *
907 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
908 */
909static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
910{
911 struct amdgpu_device *adev = info->dev->dev_private;
912
913 WREG32(reg, val);
914}
915
916/**
917 * cail_reg_read - read MMIO register
918 *
919 * @info: atom card_info pointer
920 * @reg: MMIO register offset
921 *
922 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
923 * Returns the value of the MMIO register.
924 */
925static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
926{
927 struct amdgpu_device *adev = info->dev->dev_private;
928 uint32_t r;
929
930 r = RREG32(reg);
931 return r;
932}
933
934/**
935 * cail_ioreg_write - write IO register
936 *
937 * @info: atom card_info pointer
938 * @reg: IO register offset
939 * @val: value to write to the pll register
940 *
941 * Provides a IO register accessor for the atom interpreter (r4xx+).
942 */
943static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
944{
945 struct amdgpu_device *adev = info->dev->dev_private;
946
947 WREG32_IO(reg, val);
948}
949
950/**
951 * cail_ioreg_read - read IO register
952 *
953 * @info: atom card_info pointer
954 * @reg: IO register offset
955 *
956 * Provides an IO register accessor for the atom interpreter (r4xx+).
957 * Returns the value of the IO register.
958 */
959static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
960{
961 struct amdgpu_device *adev = info->dev->dev_private;
962 uint32_t r;
963
964 r = RREG32_IO(reg);
965 return r;
966}
967
5b41d94c
KR
968static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
969 struct device_attribute *attr,
970 char *buf)
971{
972 struct drm_device *ddev = dev_get_drvdata(dev);
973 struct amdgpu_device *adev = ddev->dev_private;
974 struct atom_context *ctx = adev->mode_info.atom_context;
975
976 return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
977}
978
979static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
980 NULL);
981
d38ceaf9
AD
982/**
983 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
984 *
985 * @adev: amdgpu_device pointer
986 *
987 * Frees the driver info and register access callbacks for the ATOM
988 * interpreter (r4xx+).
989 * Called at driver shutdown.
990 */
991static void amdgpu_atombios_fini(struct amdgpu_device *adev)
992{
89e0ec9f 993 if (adev->mode_info.atom_context) {
d38ceaf9 994 kfree(adev->mode_info.atom_context->scratch);
89e0ec9f
ML
995 kfree(adev->mode_info.atom_context->iio);
996 }
d38ceaf9
AD
997 kfree(adev->mode_info.atom_context);
998 adev->mode_info.atom_context = NULL;
999 kfree(adev->mode_info.atom_card_info);
1000 adev->mode_info.atom_card_info = NULL;
5b41d94c 1001 device_remove_file(adev->dev, &dev_attr_vbios_version);
d38ceaf9
AD
1002}
1003
1004/**
1005 * amdgpu_atombios_init - init the driver info and callbacks for atombios
1006 *
1007 * @adev: amdgpu_device pointer
1008 *
1009 * Initializes the driver info and register access callbacks for the
1010 * ATOM interpreter (r4xx+).
1011 * Returns 0 on sucess, -ENOMEM on failure.
1012 * Called at driver startup.
1013 */
1014static int amdgpu_atombios_init(struct amdgpu_device *adev)
1015{
1016 struct card_info *atom_card_info =
1017 kzalloc(sizeof(struct card_info), GFP_KERNEL);
5b41d94c 1018 int ret;
d38ceaf9
AD
1019
1020 if (!atom_card_info)
1021 return -ENOMEM;
1022
1023 adev->mode_info.atom_card_info = atom_card_info;
1024 atom_card_info->dev = adev->ddev;
1025 atom_card_info->reg_read = cail_reg_read;
1026 atom_card_info->reg_write = cail_reg_write;
1027 /* needed for iio ops */
1028 if (adev->rio_mem) {
1029 atom_card_info->ioreg_read = cail_ioreg_read;
1030 atom_card_info->ioreg_write = cail_ioreg_write;
1031 } else {
9953b72f 1032 DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
d38ceaf9
AD
1033 atom_card_info->ioreg_read = cail_reg_read;
1034 atom_card_info->ioreg_write = cail_reg_write;
1035 }
1036 atom_card_info->mc_read = cail_mc_read;
1037 atom_card_info->mc_write = cail_mc_write;
1038 atom_card_info->pll_read = cail_pll_read;
1039 atom_card_info->pll_write = cail_pll_write;
1040
1041 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
1042 if (!adev->mode_info.atom_context) {
1043 amdgpu_atombios_fini(adev);
1044 return -ENOMEM;
1045 }
1046
1047 mutex_init(&adev->mode_info.atom_context->mutex);
a5bde2f9
AD
1048 if (adev->is_atom_fw) {
1049 amdgpu_atomfirmware_scratch_regs_init(adev);
1050 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1051 } else {
1052 amdgpu_atombios_scratch_regs_init(adev);
1053 amdgpu_atombios_allocate_fb_scratch(adev);
1054 }
5b41d94c
KR
1055
1056 ret = device_create_file(adev->dev, &dev_attr_vbios_version);
1057 if (ret) {
1058 DRM_ERROR("Failed to create device file for VBIOS version\n");
1059 return ret;
1060 }
1061
d38ceaf9
AD
1062 return 0;
1063}
1064
1065/* if we get transitioned to only one device, take VGA back */
1066/**
1067 * amdgpu_vga_set_decode - enable/disable vga decode
1068 *
1069 * @cookie: amdgpu_device pointer
1070 * @state: enable/disable vga decode
1071 *
1072 * Enable/disable vga decode (all asics).
1073 * Returns VGA resource flags.
1074 */
1075static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1076{
1077 struct amdgpu_device *adev = cookie;
1078 amdgpu_asic_set_vga_state(adev, state);
1079 if (state)
1080 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1081 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1082 else
1083 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1084}
1085
bab4fee7 1086static void amdgpu_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
1087{
1088 /* defines number of bits in page table versus page directory,
1089 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1090 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
1091 if (amdgpu_vm_block_size == -1)
1092 return;
a1adf8be 1093
bab4fee7 1094 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
1095 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1096 amdgpu_vm_block_size);
bab4fee7 1097 goto def_value;
a1adf8be
CZ
1098 }
1099
1100 if (amdgpu_vm_block_size > 24 ||
1101 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1102 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1103 amdgpu_vm_block_size);
bab4fee7 1104 goto def_value;
a1adf8be 1105 }
bab4fee7
JZ
1106
1107 return;
1108
1109def_value:
1110 amdgpu_vm_block_size = -1;
a1adf8be
CZ
1111}
1112
83ca145d
ZJ
1113static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1114{
64dab074
AD
1115 /* no need to check the default value */
1116 if (amdgpu_vm_size == -1)
1117 return;
1118
76117507 1119 if (!is_power_of_2(amdgpu_vm_size)) {
83ca145d
ZJ
1120 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1121 amdgpu_vm_size);
1122 goto def_value;
1123 }
1124
1125 if (amdgpu_vm_size < 1) {
1126 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1127 amdgpu_vm_size);
1128 goto def_value;
1129 }
1130
1131 /*
1132 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1133 */
1134 if (amdgpu_vm_size > 1024) {
1135 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1136 amdgpu_vm_size);
1137 goto def_value;
1138 }
1139
1140 return;
1141
1142def_value:
bab4fee7 1143 amdgpu_vm_size = -1;
83ca145d
ZJ
1144}
1145
d38ceaf9
AD
1146/**
1147 * amdgpu_check_arguments - validate module params
1148 *
1149 * @adev: amdgpu_device pointer
1150 *
1151 * Validates certain module parameters and updates
1152 * the associated values used by the driver (all asics).
1153 */
1154static void amdgpu_check_arguments(struct amdgpu_device *adev)
1155{
5b011235
CZ
1156 if (amdgpu_sched_jobs < 4) {
1157 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1158 amdgpu_sched_jobs);
1159 amdgpu_sched_jobs = 4;
76117507 1160 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
1161 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1162 amdgpu_sched_jobs);
1163 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1164 }
d38ceaf9 1165
83e74db6 1166 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
1167 /* gart size must be greater or equal to 32M */
1168 dev_warn(adev->dev, "gart size (%d) too small\n",
1169 amdgpu_gart_size);
83e74db6 1170 amdgpu_gart_size = -1;
d38ceaf9
AD
1171 }
1172
36d38372 1173 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 1174 /* gtt size must be greater or equal to 32M */
36d38372
CK
1175 dev_warn(adev->dev, "gtt size (%d) too small\n",
1176 amdgpu_gtt_size);
1177 amdgpu_gtt_size = -1;
d38ceaf9
AD
1178 }
1179
d07f14be
RH
1180 /* valid range is between 4 and 9 inclusive */
1181 if (amdgpu_vm_fragment_size != -1 &&
1182 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1183 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1184 amdgpu_vm_fragment_size = -1;
1185 }
1186
83ca145d 1187 amdgpu_check_vm_size(adev);
d38ceaf9 1188
bab4fee7 1189 amdgpu_check_block_size(adev);
6a7f76e7 1190
526bae37 1191 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 1192 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
1193 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1194 amdgpu_vram_page_split);
1195 amdgpu_vram_page_split = 1024;
1196 }
d38ceaf9
AD
1197}
1198
1199/**
1200 * amdgpu_switcheroo_set_state - set switcheroo state
1201 *
1202 * @pdev: pci dev pointer
1694467b 1203 * @state: vga_switcheroo state
d38ceaf9
AD
1204 *
1205 * Callback for the switcheroo driver. Suspends or resumes the
1206 * the asics before or after it is powered up using ACPI methods.
1207 */
1208static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1209{
1210 struct drm_device *dev = pci_get_drvdata(pdev);
1211
1212 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1213 return;
1214
1215 if (state == VGA_SWITCHEROO_ON) {
7ca85295 1216 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1217 /* don't suspend or resume card normally */
1218 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1219
810ddc3a 1220 amdgpu_device_resume(dev, true, true);
d38ceaf9 1221
d38ceaf9
AD
1222 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1223 drm_kms_helper_poll_enable(dev);
1224 } else {
7ca85295 1225 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1226 drm_kms_helper_poll_disable(dev);
1227 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1228 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1229 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1230 }
1231}
1232
1233/**
1234 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1235 *
1236 * @pdev: pci dev pointer
1237 *
1238 * Callback for the switcheroo driver. Check of the switcheroo
1239 * state can be changed.
1240 * Returns true if the state can be changed, false if not.
1241 */
1242static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1243{
1244 struct drm_device *dev = pci_get_drvdata(pdev);
1245
1246 /*
1247 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1248 * locking inversion with the driver load path. And the access here is
1249 * completely racy anyway. So don't bother with locking for now.
1250 */
1251 return dev->open_count == 0;
1252}
1253
1254static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1255 .set_gpu_state = amdgpu_switcheroo_set_state,
1256 .reprobe = NULL,
1257 .can_switch = amdgpu_switcheroo_can_switch,
1258};
1259
1260int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
5fc3aeeb 1261 enum amd_ip_block_type block_type,
1262 enum amd_clockgating_state state)
d38ceaf9
AD
1263{
1264 int i, r = 0;
1265
1266 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1267 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1268 continue;
c722865a
RZ
1269 if (adev->ip_blocks[i].version->type != block_type)
1270 continue;
1271 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1272 continue;
1273 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1274 (void *)adev, state);
1275 if (r)
1276 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1277 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1278 }
1279 return r;
1280}
1281
1282int amdgpu_set_powergating_state(struct amdgpu_device *adev,
5fc3aeeb 1283 enum amd_ip_block_type block_type,
1284 enum amd_powergating_state state)
d38ceaf9
AD
1285{
1286 int i, r = 0;
1287
1288 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1289 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1290 continue;
c722865a
RZ
1291 if (adev->ip_blocks[i].version->type != block_type)
1292 continue;
1293 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1294 continue;
1295 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1296 (void *)adev, state);
1297 if (r)
1298 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1299 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1300 }
1301 return r;
1302}
1303
6cb2d4e4
HR
1304void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1305{
1306 int i;
1307
1308 for (i = 0; i < adev->num_ip_blocks; i++) {
1309 if (!adev->ip_blocks[i].status.valid)
1310 continue;
1311 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1312 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1313 }
1314}
1315
5dbbb60b
AD
1316int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1317 enum amd_ip_block_type block_type)
1318{
1319 int i, r;
1320
1321 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1322 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1323 continue;
a1255107
AD
1324 if (adev->ip_blocks[i].version->type == block_type) {
1325 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1326 if (r)
1327 return r;
1328 break;
1329 }
1330 }
1331 return 0;
1332
1333}
1334
1335bool amdgpu_is_idle(struct amdgpu_device *adev,
1336 enum amd_ip_block_type block_type)
1337{
1338 int i;
1339
1340 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1341 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1342 continue;
a1255107
AD
1343 if (adev->ip_blocks[i].version->type == block_type)
1344 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1345 }
1346 return true;
1347
1348}
1349
a1255107
AD
1350struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1351 enum amd_ip_block_type type)
d38ceaf9
AD
1352{
1353 int i;
1354
1355 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1356 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1357 return &adev->ip_blocks[i];
1358
1359 return NULL;
1360}
1361
1362/**
1363 * amdgpu_ip_block_version_cmp
1364 *
1365 * @adev: amdgpu_device pointer
5fc3aeeb 1366 * @type: enum amd_ip_block_type
d38ceaf9
AD
1367 * @major: major version
1368 * @minor: minor version
1369 *
1370 * return 0 if equal or greater
1371 * return 1 if smaller or the ip_block doesn't exist
1372 */
1373int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
5fc3aeeb 1374 enum amd_ip_block_type type,
d38ceaf9
AD
1375 u32 major, u32 minor)
1376{
a1255107 1377 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
d38ceaf9 1378
a1255107
AD
1379 if (ip_block && ((ip_block->version->major > major) ||
1380 ((ip_block->version->major == major) &&
1381 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1382 return 0;
1383
1384 return 1;
1385}
1386
a1255107
AD
1387/**
1388 * amdgpu_ip_block_add
1389 *
1390 * @adev: amdgpu_device pointer
1391 * @ip_block_version: pointer to the IP to add
1392 *
1393 * Adds the IP block driver information to the collection of IPs
1394 * on the asic.
1395 */
1396int amdgpu_ip_block_add(struct amdgpu_device *adev,
1397 const struct amdgpu_ip_block_version *ip_block_version)
1398{
1399 if (!ip_block_version)
1400 return -EINVAL;
1401
a0bae357
HR
1402 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1403 ip_block_version->funcs->name);
1404
a1255107
AD
1405 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1406
1407 return 0;
1408}
1409
483ef985 1410static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1411{
1412 adev->enable_virtual_display = false;
1413
1414 if (amdgpu_virtual_display) {
1415 struct drm_device *ddev = adev->ddev;
1416 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1417 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1418
1419 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1420 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1421 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1422 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1423 if (!strcmp("all", pciaddname)
1424 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1425 long num_crtc;
1426 int res = -1;
1427
9accf2fd 1428 adev->enable_virtual_display = true;
0f66356d
ED
1429
1430 if (pciaddname_tmp)
1431 res = kstrtol(pciaddname_tmp, 10,
1432 &num_crtc);
1433
1434 if (!res) {
1435 if (num_crtc < 1)
1436 num_crtc = 1;
1437 if (num_crtc > 6)
1438 num_crtc = 6;
1439 adev->mode_info.num_crtc = num_crtc;
1440 } else {
1441 adev->mode_info.num_crtc = 1;
1442 }
9accf2fd
ED
1443 break;
1444 }
1445 }
1446
0f66356d
ED
1447 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1448 amdgpu_virtual_display, pci_address_name,
1449 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1450
1451 kfree(pciaddstr);
1452 }
1453}
1454
e2a75f88
AD
1455static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1456{
e2a75f88
AD
1457 const char *chip_name;
1458 char fw_name[30];
1459 int err;
1460 const struct gpu_info_firmware_header_v1_0 *hdr;
1461
ab4fe3e1
HR
1462 adev->firmware.gpu_info_fw = NULL;
1463
e2a75f88
AD
1464 switch (adev->asic_type) {
1465 case CHIP_TOPAZ:
1466 case CHIP_TONGA:
1467 case CHIP_FIJI:
1468 case CHIP_POLARIS11:
1469 case CHIP_POLARIS10:
1470 case CHIP_POLARIS12:
1471 case CHIP_CARRIZO:
1472 case CHIP_STONEY:
1473#ifdef CONFIG_DRM_AMDGPU_SI
1474 case CHIP_VERDE:
1475 case CHIP_TAHITI:
1476 case CHIP_PITCAIRN:
1477 case CHIP_OLAND:
1478 case CHIP_HAINAN:
1479#endif
1480#ifdef CONFIG_DRM_AMDGPU_CIK
1481 case CHIP_BONAIRE:
1482 case CHIP_HAWAII:
1483 case CHIP_KAVERI:
1484 case CHIP_KABINI:
1485 case CHIP_MULLINS:
1486#endif
1487 default:
1488 return 0;
1489 case CHIP_VEGA10:
1490 chip_name = "vega10";
1491 break;
2d2e5e7e
AD
1492 case CHIP_RAVEN:
1493 chip_name = "raven";
1494 break;
e2a75f88
AD
1495 }
1496
1497 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1498 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1499 if (err) {
1500 dev_err(adev->dev,
1501 "Failed to load gpu_info firmware \"%s\"\n",
1502 fw_name);
1503 goto out;
1504 }
ab4fe3e1 1505 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1506 if (err) {
1507 dev_err(adev->dev,
1508 "Failed to validate gpu_info firmware \"%s\"\n",
1509 fw_name);
1510 goto out;
1511 }
1512
ab4fe3e1 1513 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1514 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1515
1516 switch (hdr->version_major) {
1517 case 1:
1518 {
1519 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1520 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1521 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1522
b5ab16bf
AD
1523 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1524 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1525 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1526 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1527 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1528 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1529 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1530 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1531 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1532 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1533 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1534 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1535 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1536 adev->gfx.cu_info.max_waves_per_simd =
1537 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1538 adev->gfx.cu_info.max_scratch_slots_per_cu =
1539 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1540 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1541 break;
1542 }
1543 default:
1544 dev_err(adev->dev,
1545 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1546 err = -EINVAL;
1547 goto out;
1548 }
1549out:
e2a75f88
AD
1550 return err;
1551}
1552
d38ceaf9
AD
1553static int amdgpu_early_init(struct amdgpu_device *adev)
1554{
aaa36a97 1555 int i, r;
d38ceaf9 1556
483ef985 1557 amdgpu_device_enable_virtual_display(adev);
a6be7570 1558
d38ceaf9 1559 switch (adev->asic_type) {
aaa36a97
AD
1560 case CHIP_TOPAZ:
1561 case CHIP_TONGA:
48299f95 1562 case CHIP_FIJI:
2cc0c0b5
FC
1563 case CHIP_POLARIS11:
1564 case CHIP_POLARIS10:
c4642a47 1565 case CHIP_POLARIS12:
aaa36a97 1566 case CHIP_CARRIZO:
39bb0c92
SL
1567 case CHIP_STONEY:
1568 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1569 adev->family = AMDGPU_FAMILY_CZ;
1570 else
1571 adev->family = AMDGPU_FAMILY_VI;
1572
1573 r = vi_set_ip_blocks(adev);
1574 if (r)
1575 return r;
1576 break;
33f34802
KW
1577#ifdef CONFIG_DRM_AMDGPU_SI
1578 case CHIP_VERDE:
1579 case CHIP_TAHITI:
1580 case CHIP_PITCAIRN:
1581 case CHIP_OLAND:
1582 case CHIP_HAINAN:
295d0daf 1583 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1584 r = si_set_ip_blocks(adev);
1585 if (r)
1586 return r;
1587 break;
1588#endif
a2e73f56
AD
1589#ifdef CONFIG_DRM_AMDGPU_CIK
1590 case CHIP_BONAIRE:
1591 case CHIP_HAWAII:
1592 case CHIP_KAVERI:
1593 case CHIP_KABINI:
1594 case CHIP_MULLINS:
1595 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1596 adev->family = AMDGPU_FAMILY_CI;
1597 else
1598 adev->family = AMDGPU_FAMILY_KV;
1599
1600 r = cik_set_ip_blocks(adev);
1601 if (r)
1602 return r;
1603 break;
1604#endif
2ca8a5d2
CZ
1605 case CHIP_VEGA10:
1606 case CHIP_RAVEN:
1607 if (adev->asic_type == CHIP_RAVEN)
1608 adev->family = AMDGPU_FAMILY_RV;
1609 else
1610 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1611
1612 r = soc15_set_ip_blocks(adev);
1613 if (r)
1614 return r;
1615 break;
d38ceaf9
AD
1616 default:
1617 /* FIXME: not supported yet */
1618 return -EINVAL;
1619 }
1620
e2a75f88
AD
1621 r = amdgpu_device_parse_gpu_info_fw(adev);
1622 if (r)
1623 return r;
1624
3149d9da
XY
1625 if (amdgpu_sriov_vf(adev)) {
1626 r = amdgpu_virt_request_full_gpu(adev, true);
1627 if (r)
1628 return r;
1629 }
1630
d38ceaf9
AD
1631 for (i = 0; i < adev->num_ip_blocks; i++) {
1632 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1633 DRM_ERROR("disabled ip block: %d <%s>\n",
1634 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1635 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1636 } else {
a1255107
AD
1637 if (adev->ip_blocks[i].version->funcs->early_init) {
1638 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1639 if (r == -ENOENT) {
a1255107 1640 adev->ip_blocks[i].status.valid = false;
2c1a2784 1641 } else if (r) {
a1255107
AD
1642 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1643 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1644 return r;
2c1a2784 1645 } else {
a1255107 1646 adev->ip_blocks[i].status.valid = true;
2c1a2784 1647 }
974e6b64 1648 } else {
a1255107 1649 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1650 }
d38ceaf9
AD
1651 }
1652 }
1653
395d1fb9
NH
1654 adev->cg_flags &= amdgpu_cg_mask;
1655 adev->pg_flags &= amdgpu_pg_mask;
1656
d38ceaf9
AD
1657 return 0;
1658}
1659
1660static int amdgpu_init(struct amdgpu_device *adev)
1661{
1662 int i, r;
1663
1664 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1665 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1666 continue;
a1255107 1667 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1668 if (r) {
a1255107
AD
1669 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1670 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1671 return r;
2c1a2784 1672 }
a1255107 1673 adev->ip_blocks[i].status.sw = true;
d38ceaf9 1674 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1675 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9 1676 r = amdgpu_vram_scratch_init(adev);
2c1a2784
AD
1677 if (r) {
1678 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1679 return r;
2c1a2784 1680 }
a1255107 1681 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1682 if (r) {
1683 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1684 return r;
2c1a2784 1685 }
d38ceaf9 1686 r = amdgpu_wb_init(adev);
2c1a2784
AD
1687 if (r) {
1688 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
d38ceaf9 1689 return r;
2c1a2784 1690 }
a1255107 1691 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1692
1693 /* right after GMC hw init, we create CSA */
1694 if (amdgpu_sriov_vf(adev)) {
1695 r = amdgpu_allocate_static_csa(adev);
1696 if (r) {
1697 DRM_ERROR("allocate CSA failed %d\n", r);
1698 return r;
1699 }
1700 }
d38ceaf9
AD
1701 }
1702 }
1703
1704 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1705 if (!adev->ip_blocks[i].status.sw)
d38ceaf9
AD
1706 continue;
1707 /* gmc hw init is done early */
a1255107 1708 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
d38ceaf9 1709 continue;
a1255107 1710 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1711 if (r) {
a1255107
AD
1712 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1713 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1714 return r;
2c1a2784 1715 }
a1255107 1716 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1717 }
1718
1719 return 0;
1720}
1721
0c49e0b8
CZ
1722static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1723{
1724 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1725}
1726
1727static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1728{
1729 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1730 AMDGPU_RESET_MAGIC_NUM);
1731}
1732
2dc80b00 1733static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
d38ceaf9
AD
1734{
1735 int i = 0, r;
1736
1737 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1738 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1739 continue;
4a446d55 1740 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1741 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1742 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1743 /* enable clockgating to save power */
a1255107
AD
1744 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1745 AMD_CG_STATE_GATE);
4a446d55
AD
1746 if (r) {
1747 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1748 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1749 return r;
1750 }
b0b00ff1 1751 }
d38ceaf9 1752 }
2dc80b00
S
1753 return 0;
1754}
1755
1756static int amdgpu_late_init(struct amdgpu_device *adev)
1757{
1758 int i = 0, r;
1759
1760 for (i = 0; i < adev->num_ip_blocks; i++) {
1761 if (!adev->ip_blocks[i].status.valid)
1762 continue;
1763 if (adev->ip_blocks[i].version->funcs->late_init) {
1764 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1765 if (r) {
1766 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1767 adev->ip_blocks[i].version->funcs->name, r);
1768 return r;
1769 }
1770 adev->ip_blocks[i].status.late_initialized = true;
1771 }
1772 }
1773
1774 mod_delayed_work(system_wq, &adev->late_init_work,
1775 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1776
0c49e0b8 1777 amdgpu_fill_reset_magic(adev);
d38ceaf9
AD
1778
1779 return 0;
1780}
1781
1782static int amdgpu_fini(struct amdgpu_device *adev)
1783{
1784 int i, r;
1785
3e96dbfd
AD
1786 /* need to disable SMC first */
1787 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1788 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1789 continue;
a1255107 1790 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1791 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1792 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1793 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1794 if (r) {
1795 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1796 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1797 return r;
1798 }
a1255107 1799 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1800 /* XXX handle errors */
1801 if (r) {
1802 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1803 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1804 }
a1255107 1805 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1806 break;
1807 }
1808 }
1809
d38ceaf9 1810 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1811 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1812 continue;
a1255107 1813 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9
AD
1814 amdgpu_wb_fini(adev);
1815 amdgpu_vram_scratch_fini(adev);
1816 }
8201a67a
RZ
1817
1818 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1819 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1820 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1821 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1822 AMD_CG_STATE_UNGATE);
1823 if (r) {
1824 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1825 adev->ip_blocks[i].version->funcs->name, r);
1826 return r;
1827 }
2c1a2784 1828 }
8201a67a 1829
a1255107 1830 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1831 /* XXX handle errors */
2c1a2784 1832 if (r) {
a1255107
AD
1833 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1834 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1835 }
8201a67a 1836
a1255107 1837 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1838 }
1839
1840 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1841 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1842 continue;
a1255107 1843 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1844 /* XXX handle errors */
2c1a2784 1845 if (r) {
a1255107
AD
1846 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1847 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1848 }
a1255107
AD
1849 adev->ip_blocks[i].status.sw = false;
1850 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1851 }
1852
a6dcfd9c 1853 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1854 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1855 continue;
a1255107
AD
1856 if (adev->ip_blocks[i].version->funcs->late_fini)
1857 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1858 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1859 }
1860
030308fc 1861 if (amdgpu_sriov_vf(adev))
3149d9da 1862 amdgpu_virt_release_full_gpu(adev, false);
2493664f 1863
d38ceaf9
AD
1864 return 0;
1865}
1866
2dc80b00
S
1867static void amdgpu_late_init_func_handler(struct work_struct *work)
1868{
1869 struct amdgpu_device *adev =
1870 container_of(work, struct amdgpu_device, late_init_work.work);
1871 amdgpu_late_set_cg_state(adev);
1872}
1873
faefba95 1874int amdgpu_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1875{
1876 int i, r;
1877
e941ea99
XY
1878 if (amdgpu_sriov_vf(adev))
1879 amdgpu_virt_request_full_gpu(adev, false);
1880
c5a93a28
FC
1881 /* ungate SMC block first */
1882 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1883 AMD_CG_STATE_UNGATE);
1884 if (r) {
1885 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1886 }
1887
d38ceaf9 1888 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1889 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1890 continue;
1891 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1892 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1893 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1894 AMD_CG_STATE_UNGATE);
c5a93a28 1895 if (r) {
a1255107
AD
1896 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1897 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1898 }
2c1a2784 1899 }
d38ceaf9 1900 /* XXX handle errors */
a1255107 1901 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1902 /* XXX handle errors */
2c1a2784 1903 if (r) {
a1255107
AD
1904 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1905 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1906 }
d38ceaf9
AD
1907 }
1908
e941ea99
XY
1909 if (amdgpu_sriov_vf(adev))
1910 amdgpu_virt_release_full_gpu(adev, false);
1911
d38ceaf9
AD
1912 return 0;
1913}
1914
e4f0fdcc 1915static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
a90ad3c2
ML
1916{
1917 int i, r;
1918
2cb681b6
ML
1919 static enum amd_ip_block_type ip_order[] = {
1920 AMD_IP_BLOCK_TYPE_GMC,
1921 AMD_IP_BLOCK_TYPE_COMMON,
2cb681b6
ML
1922 AMD_IP_BLOCK_TYPE_IH,
1923 };
a90ad3c2 1924
2cb681b6
ML
1925 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1926 int j;
1927 struct amdgpu_ip_block *block;
a90ad3c2 1928
2cb681b6
ML
1929 for (j = 0; j < adev->num_ip_blocks; j++) {
1930 block = &adev->ip_blocks[j];
1931
1932 if (block->version->type != ip_order[i] ||
1933 !block->status.valid)
1934 continue;
1935
1936 r = block->version->funcs->hw_init(adev);
1937 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1938 }
1939 }
1940
1941 return 0;
1942}
1943
e4f0fdcc 1944static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
a90ad3c2
ML
1945{
1946 int i, r;
1947
2cb681b6
ML
1948 static enum amd_ip_block_type ip_order[] = {
1949 AMD_IP_BLOCK_TYPE_SMC,
ef4c166d 1950 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
1951 AMD_IP_BLOCK_TYPE_DCE,
1952 AMD_IP_BLOCK_TYPE_GFX,
1953 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
1954 AMD_IP_BLOCK_TYPE_UVD,
1955 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 1956 };
a90ad3c2 1957
2cb681b6
ML
1958 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1959 int j;
1960 struct amdgpu_ip_block *block;
a90ad3c2 1961
2cb681b6
ML
1962 for (j = 0; j < adev->num_ip_blocks; j++) {
1963 block = &adev->ip_blocks[j];
1964
1965 if (block->version->type != ip_order[i] ||
1966 !block->status.valid)
1967 continue;
1968
1969 r = block->version->funcs->hw_init(adev);
1970 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1971 }
1972 }
1973
1974 return 0;
1975}
1976
fcf0649f 1977static int amdgpu_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
1978{
1979 int i, r;
1980
a90ad3c2
ML
1981 for (i = 0; i < adev->num_ip_blocks; i++) {
1982 if (!adev->ip_blocks[i].status.valid)
1983 continue;
a90ad3c2
ML
1984 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1985 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
fcf0649f
CZ
1986 adev->ip_blocks[i].version->type ==
1987 AMD_IP_BLOCK_TYPE_IH) {
1988 r = adev->ip_blocks[i].version->funcs->resume(adev);
1989 if (r) {
1990 DRM_ERROR("resume of IP block <%s> failed %d\n",
1991 adev->ip_blocks[i].version->funcs->name, r);
1992 return r;
1993 }
a90ad3c2
ML
1994 }
1995 }
1996
1997 return 0;
1998}
1999
fcf0649f 2000static int amdgpu_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2001{
2002 int i, r;
2003
2004 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2005 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2006 continue;
fcf0649f
CZ
2007 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2008 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2009 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
2010 continue;
a1255107 2011 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 2012 if (r) {
a1255107
AD
2013 DRM_ERROR("resume of IP block <%s> failed %d\n",
2014 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 2015 return r;
2c1a2784 2016 }
d38ceaf9
AD
2017 }
2018
2019 return 0;
2020}
2021
fcf0649f
CZ
2022static int amdgpu_resume(struct amdgpu_device *adev)
2023{
2024 int r;
2025
2026 r = amdgpu_resume_phase1(adev);
2027 if (r)
2028 return r;
2029 r = amdgpu_resume_phase2(adev);
2030
2031 return r;
2032}
2033
4e99a44e 2034static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 2035{
6867e1b5
ML
2036 if (amdgpu_sriov_vf(adev)) {
2037 if (adev->is_atom_fw) {
2038 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2039 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2040 } else {
2041 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2042 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2043 }
2044
2045 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2046 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 2047 }
048765ad
AR
2048}
2049
4562236b
HW
2050bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2051{
2052 switch (asic_type) {
2053#if defined(CONFIG_DRM_AMD_DC)
2054 case CHIP_BONAIRE:
2055 case CHIP_HAWAII:
0d6fbccb 2056 case CHIP_KAVERI:
4562236b
HW
2057 case CHIP_CARRIZO:
2058 case CHIP_STONEY:
2059 case CHIP_POLARIS11:
2060 case CHIP_POLARIS10:
2c8ad2d5 2061 case CHIP_POLARIS12:
4562236b
HW
2062 case CHIP_TONGA:
2063 case CHIP_FIJI:
2064#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
2065 return amdgpu_dc != 0;
4562236b 2066#endif
17b7cf8c
AD
2067 case CHIP_KABINI:
2068 case CHIP_MULLINS:
2069 return amdgpu_dc > 0;
42f8ffa1
HW
2070 case CHIP_VEGA10:
2071#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 2072 case CHIP_RAVEN:
42f8ffa1 2073#endif
fd187853 2074 return amdgpu_dc != 0;
4562236b
HW
2075#endif
2076 default:
2077 return false;
2078 }
2079}
2080
2081/**
2082 * amdgpu_device_has_dc_support - check if dc is supported
2083 *
2084 * @adev: amdgpu_device_pointer
2085 *
2086 * Returns true for supported, false for not supported
2087 */
2088bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2089{
2555039d
XY
2090 if (amdgpu_sriov_vf(adev))
2091 return false;
2092
4562236b
HW
2093 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2094}
2095
d38ceaf9
AD
2096/**
2097 * amdgpu_device_init - initialize the driver
2098 *
2099 * @adev: amdgpu_device pointer
2100 * @pdev: drm dev pointer
2101 * @pdev: pci dev pointer
2102 * @flags: driver flags
2103 *
2104 * Initializes the driver info and hw (all asics).
2105 * Returns 0 for success or an error on failure.
2106 * Called at driver startup.
2107 */
2108int amdgpu_device_init(struct amdgpu_device *adev,
2109 struct drm_device *ddev,
2110 struct pci_dev *pdev,
2111 uint32_t flags)
2112{
2113 int r, i;
2114 bool runtime = false;
95844d20 2115 u32 max_MBps;
d38ceaf9
AD
2116
2117 adev->shutdown = false;
2118 adev->dev = &pdev->dev;
2119 adev->ddev = ddev;
2120 adev->pdev = pdev;
2121 adev->flags = flags;
2f7d10b3 2122 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 2123 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
6f02a696 2124 adev->mc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
2125 adev->accel_working = false;
2126 adev->num_rings = 0;
2127 adev->mman.buffer_funcs = NULL;
2128 adev->mman.buffer_funcs_ring = NULL;
2129 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 2130 adev->vm_manager.vm_pte_num_rings = 0;
d38ceaf9 2131 adev->gart.gart_funcs = NULL;
f54d1867 2132 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 2133 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
2134
2135 adev->smc_rreg = &amdgpu_invalid_rreg;
2136 adev->smc_wreg = &amdgpu_invalid_wreg;
2137 adev->pcie_rreg = &amdgpu_invalid_rreg;
2138 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
2139 adev->pciep_rreg = &amdgpu_invalid_rreg;
2140 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2141 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2142 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2143 adev->didt_rreg = &amdgpu_invalid_rreg;
2144 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
2145 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2146 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2147 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2148 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2149
3e39ab90
AD
2150 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2151 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2152 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
2153
2154 /* mutex initialization are all done here so we
2155 * can recall function without having locking issues */
d38ceaf9 2156 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 2157 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
2158 mutex_init(&adev->pm.mutex);
2159 mutex_init(&adev->gfx.gpu_clock_mutex);
2160 mutex_init(&adev->srbm_mutex);
b8866c26 2161 mutex_init(&adev->gfx.pipe_reserve_mutex);
d38ceaf9 2162 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 2163 mutex_init(&adev->mn_lock);
e23b74aa 2164 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9
AD
2165 hash_init(adev->mn_hash);
2166
2167 amdgpu_check_arguments(adev);
2168
d38ceaf9
AD
2169 spin_lock_init(&adev->mmio_idx_lock);
2170 spin_lock_init(&adev->smc_idx_lock);
2171 spin_lock_init(&adev->pcie_idx_lock);
2172 spin_lock_init(&adev->uvd_ctx_idx_lock);
2173 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 2174 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 2175 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 2176 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 2177 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 2178
0c4e7fa5
CZ
2179 INIT_LIST_HEAD(&adev->shadow_list);
2180 mutex_init(&adev->shadow_list_lock);
2181
5c1354bd
CZ
2182 INIT_LIST_HEAD(&adev->gtt_list);
2183 spin_lock_init(&adev->gtt_list_lock);
2184
795f2813
AR
2185 INIT_LIST_HEAD(&adev->ring_lru_list);
2186 spin_lock_init(&adev->ring_lru_list_lock);
2187
2dc80b00
S
2188 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2189
0fa49558
AX
2190 /* Registers mapping */
2191 /* TODO: block userspace mapping of io register */
da69c161
KW
2192 if (adev->asic_type >= CHIP_BONAIRE) {
2193 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2194 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2195 } else {
2196 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2197 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2198 }
d38ceaf9 2199
d38ceaf9
AD
2200 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2201 if (adev->rmmio == NULL) {
2202 return -ENOMEM;
2203 }
2204 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2205 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2206
705e519e
CK
2207 /* doorbell bar mapping */
2208 amdgpu_doorbell_init(adev);
d38ceaf9
AD
2209
2210 /* io port mapping */
2211 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2212 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2213 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2214 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2215 break;
2216 }
2217 }
2218 if (adev->rio_mem == NULL)
b64a18c5 2219 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
2220
2221 /* early init functions */
2222 r = amdgpu_early_init(adev);
2223 if (r)
2224 return r;
2225
2226 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2227 /* this will fail for cards that aren't VGA class devices, just
2228 * ignore it */
2229 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2230
2231 if (amdgpu_runtime_pm == 1)
2232 runtime = true;
e9bef455 2233 if (amdgpu_device_is_px(ddev))
d38ceaf9 2234 runtime = true;
84c8b22e
LW
2235 if (!pci_is_thunderbolt_attached(adev->pdev))
2236 vga_switcheroo_register_client(adev->pdev,
2237 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
2238 if (runtime)
2239 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2240
2241 /* Read BIOS */
83ba126a
AD
2242 if (!amdgpu_get_bios(adev)) {
2243 r = -EINVAL;
2244 goto failed;
2245 }
f7e9e9fe 2246
d38ceaf9 2247 r = amdgpu_atombios_init(adev);
2c1a2784
AD
2248 if (r) {
2249 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 2250 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 2251 goto failed;
2c1a2784 2252 }
d38ceaf9 2253
4e99a44e
ML
2254 /* detect if we are with an SRIOV vbios */
2255 amdgpu_device_detect_sriov_bios(adev);
048765ad 2256
d38ceaf9 2257 /* Post card if necessary */
91fe77eb 2258 if (amdgpu_need_post(adev)) {
d38ceaf9 2259 if (!adev->bios) {
bec86378 2260 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
2261 r = -EINVAL;
2262 goto failed;
d38ceaf9 2263 }
bec86378 2264 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
2265 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2266 if (r) {
2267 dev_err(adev->dev, "gpu post error!\n");
2268 goto failed;
2269 }
d38ceaf9
AD
2270 }
2271
88b64e95
AD
2272 if (adev->is_atom_fw) {
2273 /* Initialize clocks */
2274 r = amdgpu_atomfirmware_get_clock_info(adev);
2275 if (r) {
2276 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 2277 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
2278 goto failed;
2279 }
2280 } else {
a5bde2f9
AD
2281 /* Initialize clocks */
2282 r = amdgpu_atombios_get_clock_info(adev);
2283 if (r) {
2284 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 2285 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 2286 goto failed;
a5bde2f9
AD
2287 }
2288 /* init i2c buses */
4562236b
HW
2289 if (!amdgpu_device_has_dc_support(adev))
2290 amdgpu_atombios_i2c_init(adev);
2c1a2784 2291 }
d38ceaf9
AD
2292
2293 /* Fence driver */
2294 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
2295 if (r) {
2296 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 2297 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 2298 goto failed;
2c1a2784 2299 }
d38ceaf9
AD
2300
2301 /* init the mode config */
2302 drm_mode_config_init(adev->ddev);
2303
2304 r = amdgpu_init(adev);
2305 if (r) {
2c1a2784 2306 dev_err(adev->dev, "amdgpu_init failed\n");
e23b74aa 2307 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
d38ceaf9 2308 amdgpu_fini(adev);
83ba126a 2309 goto failed;
d38ceaf9
AD
2310 }
2311
2312 adev->accel_working = true;
2313
e59c0205
AX
2314 amdgpu_vm_check_compute_bug(adev);
2315
95844d20
MO
2316 /* Initialize the buffer migration limit. */
2317 if (amdgpu_moverate >= 0)
2318 max_MBps = amdgpu_moverate;
2319 else
2320 max_MBps = 8; /* Allow 8 MB/s. */
2321 /* Get a log2 for easy divisions. */
2322 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2323
d38ceaf9
AD
2324 r = amdgpu_ib_pool_init(adev);
2325 if (r) {
2326 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 2327 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 2328 goto failed;
d38ceaf9
AD
2329 }
2330
2331 r = amdgpu_ib_ring_tests(adev);
2332 if (r)
2333 DRM_ERROR("ib ring test failed (%d).\n", r);
2334
2dc8f81e
HC
2335 if (amdgpu_sriov_vf(adev))
2336 amdgpu_virt_init_data_exchange(adev);
2337
9bc92b9c
ML
2338 amdgpu_fbdev_init(adev);
2339
d2f52ac8
RZ
2340 r = amdgpu_pm_sysfs_init(adev);
2341 if (r)
2342 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2343
d38ceaf9 2344 r = amdgpu_gem_debugfs_init(adev);
3f14e623 2345 if (r)
d38ceaf9 2346 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2347
2348 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2349 if (r)
d38ceaf9 2350 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2351
4f0955fc
HR
2352 r = amdgpu_debugfs_test_ib_ring_init(adev);
2353 if (r)
2354 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
2355
50ab2533 2356 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2357 if (r)
50ab2533 2358 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2359
db95e218
KR
2360 r = amdgpu_debugfs_vbios_dump_init(adev);
2361 if (r)
2362 DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
2363
d38ceaf9
AD
2364 if ((amdgpu_testing & 1)) {
2365 if (adev->accel_working)
2366 amdgpu_test_moves(adev);
2367 else
2368 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2369 }
d38ceaf9
AD
2370 if (amdgpu_benchmarking) {
2371 if (adev->accel_working)
2372 amdgpu_benchmark(adev, amdgpu_benchmarking);
2373 else
2374 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2375 }
2376
2377 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2378 * explicit gating rather than handling it automatically.
2379 */
2380 r = amdgpu_late_init(adev);
2c1a2784
AD
2381 if (r) {
2382 dev_err(adev->dev, "amdgpu_late_init failed\n");
e23b74aa 2383 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2384 goto failed;
2c1a2784 2385 }
d38ceaf9
AD
2386
2387 return 0;
83ba126a
AD
2388
2389failed:
89041940 2390 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2391 if (runtime)
2392 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2393 return r;
d38ceaf9
AD
2394}
2395
d38ceaf9
AD
2396/**
2397 * amdgpu_device_fini - tear down the driver
2398 *
2399 * @adev: amdgpu_device pointer
2400 *
2401 * Tear down the driver info (all asics).
2402 * Called at driver shutdown.
2403 */
2404void amdgpu_device_fini(struct amdgpu_device *adev)
2405{
2406 int r;
2407
2408 DRM_INFO("amdgpu: finishing device.\n");
2409 adev->shutdown = true;
db2c2a97
PD
2410 if (adev->mode_info.mode_config_initialized)
2411 drm_crtc_force_disable_all(adev->ddev);
d38ceaf9
AD
2412 /* evict vram memory */
2413 amdgpu_bo_evict_vram(adev);
2414 amdgpu_ib_pool_fini(adev);
a05502e5 2415 amdgpu_fw_reserve_vram_fini(adev);
d38ceaf9
AD
2416 amdgpu_fence_driver_fini(adev);
2417 amdgpu_fbdev_fini(adev);
2418 r = amdgpu_fini(adev);
ab4fe3e1
HR
2419 if (adev->firmware.gpu_info_fw) {
2420 release_firmware(adev->firmware.gpu_info_fw);
2421 adev->firmware.gpu_info_fw = NULL;
2422 }
d38ceaf9 2423 adev->accel_working = false;
2dc80b00 2424 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2425 /* free i2c buses */
4562236b
HW
2426 if (!amdgpu_device_has_dc_support(adev))
2427 amdgpu_i2c_fini(adev);
d38ceaf9
AD
2428 amdgpu_atombios_fini(adev);
2429 kfree(adev->bios);
2430 adev->bios = NULL;
84c8b22e
LW
2431 if (!pci_is_thunderbolt_attached(adev->pdev))
2432 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2433 if (adev->flags & AMD_IS_PX)
2434 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2435 vga_client_register(adev->pdev, NULL, NULL, NULL);
2436 if (adev->rio_mem)
2437 pci_iounmap(adev->pdev, adev->rio_mem);
2438 adev->rio_mem = NULL;
2439 iounmap(adev->rmmio);
2440 adev->rmmio = NULL;
705e519e 2441 amdgpu_doorbell_fini(adev);
d2f52ac8 2442 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2443 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2444}
2445
2446
2447/*
2448 * Suspend & resume.
2449 */
2450/**
810ddc3a 2451 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2452 *
2453 * @pdev: drm dev pointer
2454 * @state: suspend state
2455 *
2456 * Puts the hw in the suspend state (all asics).
2457 * Returns 0 for success or an error on failure.
2458 * Called at driver suspend.
2459 */
810ddc3a 2460int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2461{
2462 struct amdgpu_device *adev;
2463 struct drm_crtc *crtc;
2464 struct drm_connector *connector;
5ceb54c6 2465 int r;
d38ceaf9
AD
2466
2467 if (dev == NULL || dev->dev_private == NULL) {
2468 return -ENODEV;
2469 }
2470
2471 adev = dev->dev_private;
2472
2473 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2474 return 0;
2475
2476 drm_kms_helper_poll_disable(dev);
2477
4562236b
HW
2478 if (!amdgpu_device_has_dc_support(adev)) {
2479 /* turn off display hw */
2480 drm_modeset_lock_all(dev);
2481 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2482 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2483 }
2484 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2485 }
2486
ba997709
YZ
2487 amdgpu_amdkfd_suspend(adev);
2488
756e6880 2489 /* unpin the front buffers and cursors */
d38ceaf9 2490 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2491 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2492 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2493 struct amdgpu_bo *robj;
2494
756e6880
AD
2495 if (amdgpu_crtc->cursor_bo) {
2496 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2497 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2498 if (r == 0) {
2499 amdgpu_bo_unpin(aobj);
2500 amdgpu_bo_unreserve(aobj);
2501 }
2502 }
2503
d38ceaf9
AD
2504 if (rfb == NULL || rfb->obj == NULL) {
2505 continue;
2506 }
2507 robj = gem_to_amdgpu_bo(rfb->obj);
2508 /* don't unpin kernel fb objects */
2509 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2510 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2511 if (r == 0) {
2512 amdgpu_bo_unpin(robj);
2513 amdgpu_bo_unreserve(robj);
2514 }
2515 }
2516 }
2517 /* evict vram memory */
2518 amdgpu_bo_evict_vram(adev);
2519
5ceb54c6 2520 amdgpu_fence_driver_suspend(adev);
d38ceaf9
AD
2521
2522 r = amdgpu_suspend(adev);
2523
a0a71e49
AD
2524 /* evict remaining vram memory
2525 * This second call to evict vram is to evict the gart page table
2526 * using the CPU.
2527 */
d38ceaf9
AD
2528 amdgpu_bo_evict_vram(adev);
2529
d05da0e2 2530 amdgpu_atombios_scratch_regs_save(adev);
d38ceaf9
AD
2531 pci_save_state(dev->pdev);
2532 if (suspend) {
2533 /* Shut down the device */
2534 pci_disable_device(dev->pdev);
2535 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2536 } else {
2537 r = amdgpu_asic_reset(adev);
2538 if (r)
2539 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2540 }
2541
2542 if (fbcon) {
2543 console_lock();
2544 amdgpu_fbdev_set_suspend(adev, 1);
2545 console_unlock();
2546 }
2547 return 0;
2548}
2549
2550/**
810ddc3a 2551 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2552 *
2553 * @pdev: drm dev pointer
2554 *
2555 * Bring the hw back to operating state (all asics).
2556 * Returns 0 for success or an error on failure.
2557 * Called at driver resume.
2558 */
810ddc3a 2559int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2560{
2561 struct drm_connector *connector;
2562 struct amdgpu_device *adev = dev->dev_private;
756e6880 2563 struct drm_crtc *crtc;
03161a6e 2564 int r = 0;
d38ceaf9
AD
2565
2566 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2567 return 0;
2568
74b0b157 2569 if (fbcon)
d38ceaf9 2570 console_lock();
74b0b157 2571
d38ceaf9
AD
2572 if (resume) {
2573 pci_set_power_state(dev->pdev, PCI_D0);
2574 pci_restore_state(dev->pdev);
74b0b157 2575 r = pci_enable_device(dev->pdev);
03161a6e
HR
2576 if (r)
2577 goto unlock;
d38ceaf9 2578 }
d05da0e2 2579 amdgpu_atombios_scratch_regs_restore(adev);
d38ceaf9
AD
2580
2581 /* post card */
c836fec5 2582 if (amdgpu_need_post(adev)) {
74b0b157 2583 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2584 if (r)
2585 DRM_ERROR("amdgpu asic init failed\n");
2586 }
d38ceaf9
AD
2587
2588 r = amdgpu_resume(adev);
e6707218 2589 if (r) {
ca198528 2590 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
03161a6e 2591 goto unlock;
e6707218 2592 }
5ceb54c6
AD
2593 amdgpu_fence_driver_resume(adev);
2594
ca198528
FC
2595 if (resume) {
2596 r = amdgpu_ib_ring_tests(adev);
2597 if (r)
2598 DRM_ERROR("ib ring test failed (%d).\n", r);
2599 }
d38ceaf9
AD
2600
2601 r = amdgpu_late_init(adev);
03161a6e
HR
2602 if (r)
2603 goto unlock;
d38ceaf9 2604
756e6880
AD
2605 /* pin cursors */
2606 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2607 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2608
2609 if (amdgpu_crtc->cursor_bo) {
2610 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2611 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2612 if (r == 0) {
2613 r = amdgpu_bo_pin(aobj,
2614 AMDGPU_GEM_DOMAIN_VRAM,
2615 &amdgpu_crtc->cursor_addr);
2616 if (r != 0)
2617 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2618 amdgpu_bo_unreserve(aobj);
2619 }
2620 }
2621 }
ba997709
YZ
2622 r = amdgpu_amdkfd_resume(adev);
2623 if (r)
2624 return r;
756e6880 2625
d38ceaf9
AD
2626 /* blat the mode back in */
2627 if (fbcon) {
4562236b
HW
2628 if (!amdgpu_device_has_dc_support(adev)) {
2629 /* pre DCE11 */
2630 drm_helper_resume_force_mode(dev);
2631
2632 /* turn on display hw */
2633 drm_modeset_lock_all(dev);
2634 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2635 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2636 }
2637 drm_modeset_unlock_all(dev);
2638 } else {
2639 /*
2640 * There is no equivalent atomic helper to turn on
2641 * display, so we defined our own function for this,
2642 * once suspend resume is supported by the atomic
2643 * framework this will be reworked
2644 */
2645 amdgpu_dm_display_resume(adev);
d38ceaf9
AD
2646 }
2647 }
2648
2649 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2650
2651 /*
2652 * Most of the connector probing functions try to acquire runtime pm
2653 * refs to ensure that the GPU is powered on when connector polling is
2654 * performed. Since we're calling this from a runtime PM callback,
2655 * trying to acquire rpm refs will cause us to deadlock.
2656 *
2657 * Since we're guaranteed to be holding the rpm lock, it's safe to
2658 * temporarily disable the rpm helpers so this doesn't deadlock us.
2659 */
2660#ifdef CONFIG_PM
2661 dev->dev->power.disable_depth++;
2662#endif
4562236b
HW
2663 if (!amdgpu_device_has_dc_support(adev))
2664 drm_helper_hpd_irq_event(dev);
2665 else
2666 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2667#ifdef CONFIG_PM
2668 dev->dev->power.disable_depth--;
2669#endif
d38ceaf9 2670
03161a6e 2671 if (fbcon)
d38ceaf9 2672 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2673
2674unlock:
2675 if (fbcon)
d38ceaf9 2676 console_unlock();
d38ceaf9 2677
03161a6e 2678 return r;
d38ceaf9
AD
2679}
2680
63fbf42f
CZ
2681static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2682{
2683 int i;
2684 bool asic_hang = false;
2685
f993d628
ML
2686 if (amdgpu_sriov_vf(adev))
2687 return true;
2688
63fbf42f 2689 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2690 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2691 continue;
a1255107
AD
2692 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2693 adev->ip_blocks[i].status.hang =
2694 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2695 if (adev->ip_blocks[i].status.hang) {
2696 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2697 asic_hang = true;
2698 }
2699 }
2700 return asic_hang;
2701}
2702
4d446656 2703static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2704{
2705 int i, r = 0;
2706
2707 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2708 if (!adev->ip_blocks[i].status.valid)
d31a501e 2709 continue;
a1255107
AD
2710 if (adev->ip_blocks[i].status.hang &&
2711 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2712 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2713 if (r)
2714 return r;
2715 }
2716 }
2717
2718 return 0;
2719}
2720
35d782fe
CZ
2721static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2722{
da146d3b
AD
2723 int i;
2724
2725 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2726 if (!adev->ip_blocks[i].status.valid)
da146d3b 2727 continue;
a1255107
AD
2728 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2729 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2730 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
2731 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2732 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 2733 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2734 DRM_INFO("Some block need full reset!\n");
2735 return true;
2736 }
2737 }
35d782fe
CZ
2738 }
2739 return false;
2740}
2741
2742static int amdgpu_soft_reset(struct amdgpu_device *adev)
2743{
2744 int i, r = 0;
2745
2746 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2747 if (!adev->ip_blocks[i].status.valid)
35d782fe 2748 continue;
a1255107
AD
2749 if (adev->ip_blocks[i].status.hang &&
2750 adev->ip_blocks[i].version->funcs->soft_reset) {
2751 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2752 if (r)
2753 return r;
2754 }
2755 }
2756
2757 return 0;
2758}
2759
2760static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2761{
2762 int i, r = 0;
2763
2764 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2765 if (!adev->ip_blocks[i].status.valid)
35d782fe 2766 continue;
a1255107
AD
2767 if (adev->ip_blocks[i].status.hang &&
2768 adev->ip_blocks[i].version->funcs->post_soft_reset)
2769 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2770 if (r)
2771 return r;
2772 }
2773
2774 return 0;
2775}
2776
3ad81f16
CZ
2777bool amdgpu_need_backup(struct amdgpu_device *adev)
2778{
2779 if (adev->flags & AMD_IS_APU)
2780 return false;
2781
2782 return amdgpu_lockup_timeout > 0 ? true : false;
2783}
2784
53cdccd5
CZ
2785static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2786 struct amdgpu_ring *ring,
2787 struct amdgpu_bo *bo,
f54d1867 2788 struct dma_fence **fence)
53cdccd5
CZ
2789{
2790 uint32_t domain;
2791 int r;
2792
23d2e504
RH
2793 if (!bo->shadow)
2794 return 0;
2795
1d284797 2796 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2797 if (r)
2798 return r;
2799 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2800 /* if bo has been evicted, then no need to recover */
2801 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2802 r = amdgpu_bo_validate(bo->shadow);
2803 if (r) {
2804 DRM_ERROR("bo validate failed!\n");
2805 goto err;
2806 }
2807
23d2e504 2808 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2809 NULL, fence, true);
23d2e504
RH
2810 if (r) {
2811 DRM_ERROR("recover page table failed!\n");
2812 goto err;
2813 }
2814 }
53cdccd5 2815err:
23d2e504
RH
2816 amdgpu_bo_unreserve(bo);
2817 return r;
53cdccd5
CZ
2818}
2819
a90ad3c2
ML
2820/**
2821 * amdgpu_sriov_gpu_reset - reset the asic
2822 *
2823 * @adev: amdgpu device pointer
7225f873 2824 * @job: which job trigger hang
a90ad3c2
ML
2825 *
2826 * Attempt the reset the GPU if it has hung (all asics).
2827 * for SRIOV case.
2828 * Returns 0 for success or an error on failure.
2829 */
7225f873 2830int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
a90ad3c2 2831{
65781c78 2832 int i, j, r = 0;
a90ad3c2
ML
2833 int resched;
2834 struct amdgpu_bo *bo, *tmp;
2835 struct amdgpu_ring *ring;
2836 struct dma_fence *fence = NULL, *next = NULL;
2837
147b5983 2838 mutex_lock(&adev->virt.lock_reset);
a90ad3c2 2839 atomic_inc(&adev->gpu_reset_counter);
3224a12b 2840 adev->in_sriov_reset = true;
a90ad3c2
ML
2841
2842 /* block TTM */
2843 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2844
65781c78
ML
2845 /* we start from the ring trigger GPU hang */
2846 j = job ? job->ring->idx : 0;
a90ad3c2 2847
65781c78
ML
2848 /* block scheduler */
2849 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2850 ring = adev->rings[i % AMDGPU_MAX_RINGS];
a90ad3c2
ML
2851 if (!ring || !ring->sched.thread)
2852 continue;
2853
2854 kthread_park(ring->sched.thread);
65781c78
ML
2855
2856 if (job && j != i)
2857 continue;
2858
4f059ecd 2859 /* here give the last chance to check if job removed from mirror-list
65781c78 2860 * since we already pay some time on kthread_park */
4f059ecd 2861 if (job && list_empty(&job->base.node)) {
65781c78
ML
2862 kthread_unpark(ring->sched.thread);
2863 goto give_up_reset;
2864 }
2865
2866 if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
2867 amd_sched_job_kickout(&job->base);
2868
2869 /* only do job_reset on the hang ring if @job not NULL */
a8a51a70 2870 amd_sched_hw_job_reset(&ring->sched, NULL);
a90ad3c2 2871
65781c78 2872 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2f9d4084 2873 amdgpu_fence_driver_force_completion(ring);
65781c78 2874 }
a90ad3c2
ML
2875
2876 /* request to take full control of GPU before re-initialization */
7225f873 2877 if (job)
a90ad3c2
ML
2878 amdgpu_virt_reset_gpu(adev);
2879 else
2880 amdgpu_virt_request_full_gpu(adev, true);
2881
2882
2883 /* Resume IP prior to SMC */
e4f0fdcc 2884 amdgpu_sriov_reinit_early(adev);
a90ad3c2
ML
2885
2886 /* we need recover gart prior to run SMC/CP/SDMA resume */
2887 amdgpu_ttm_recover_gart(adev);
2888
2889 /* now we are okay to resume SMC/CP/SDMA */
e4f0fdcc 2890 amdgpu_sriov_reinit_late(adev);
a90ad3c2
ML
2891
2892 amdgpu_irq_gpu_reset_resume_helper(adev);
2893
2894 if (amdgpu_ib_ring_tests(adev))
2895 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2896
2897 /* release full control of GPU after ib test */
2898 amdgpu_virt_release_full_gpu(adev, true);
2899
2900 DRM_INFO("recover vram bo from shadow\n");
2901
2902 ring = adev->mman.buffer_funcs_ring;
2903 mutex_lock(&adev->shadow_list_lock);
2904 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 2905 next = NULL;
a90ad3c2
ML
2906 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2907 if (fence) {
2908 r = dma_fence_wait(fence, false);
2909 if (r) {
2910 WARN(r, "recovery from shadow isn't completed\n");
2911 break;
2912 }
2913 }
2914
2915 dma_fence_put(fence);
2916 fence = next;
2917 }
2918 mutex_unlock(&adev->shadow_list_lock);
2919
2920 if (fence) {
2921 r = dma_fence_wait(fence, false);
2922 if (r)
2923 WARN(r, "recovery from shadow isn't completed\n");
2924 }
2925 dma_fence_put(fence);
2926
65781c78
ML
2927 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2928 ring = adev->rings[i % AMDGPU_MAX_RINGS];
a90ad3c2
ML
2929 if (!ring || !ring->sched.thread)
2930 continue;
2931
65781c78
ML
2932 if (job && j != i) {
2933 kthread_unpark(ring->sched.thread);
2934 continue;
2935 }
2936
a90ad3c2
ML
2937 amd_sched_job_recovery(&ring->sched);
2938 kthread_unpark(ring->sched.thread);
2939 }
2940
2941 drm_helper_resume_force_mode(adev->ddev);
65781c78 2942give_up_reset:
a90ad3c2
ML
2943 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2944 if (r) {
2945 /* bad news, how to tell it to userspace ? */
2946 dev_info(adev->dev, "GPU reset failed\n");
65781c78
ML
2947 } else {
2948 dev_info(adev->dev, "GPU reset successed!\n");
a90ad3c2
ML
2949 }
2950
3224a12b 2951 adev->in_sriov_reset = false;
147b5983 2952 mutex_unlock(&adev->virt.lock_reset);
a90ad3c2
ML
2953 return r;
2954}
2955
d38ceaf9
AD
2956/**
2957 * amdgpu_gpu_reset - reset the asic
2958 *
2959 * @adev: amdgpu device pointer
2960 *
2961 * Attempt the reset the GPU if it has hung (all asics).
2962 * Returns 0 for success or an error on failure.
2963 */
2964int amdgpu_gpu_reset(struct amdgpu_device *adev)
2965{
4562236b 2966 struct drm_atomic_state *state = NULL;
d38ceaf9
AD
2967 int i, r;
2968 int resched;
0c49e0b8 2969 bool need_full_reset, vram_lost = false;
fb140b29 2970
63fbf42f
CZ
2971 if (!amdgpu_check_soft_reset(adev)) {
2972 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2973 return 0;
2974 }
d38ceaf9 2975
d94aed5a 2976 atomic_inc(&adev->gpu_reset_counter);
d38ceaf9 2977
a3c47d6b
CZ
2978 /* block TTM */
2979 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
4562236b
HW
2980 /* store modesetting */
2981 if (amdgpu_device_has_dc_support(adev))
2982 state = drm_atomic_helper_suspend(adev->ddev);
a3c47d6b 2983
0875dc9e
CZ
2984 /* block scheduler */
2985 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2986 struct amdgpu_ring *ring = adev->rings[i];
2987
51687759 2988 if (!ring || !ring->sched.thread)
0875dc9e
CZ
2989 continue;
2990 kthread_park(ring->sched.thread);
a8a51a70 2991 amd_sched_hw_job_reset(&ring->sched, NULL);
2f9d4084
ML
2992 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2993 amdgpu_fence_driver_force_completion(ring);
0875dc9e 2994 }
d38ceaf9 2995
35d782fe 2996 need_full_reset = amdgpu_need_full_reset(adev);
d38ceaf9 2997
35d782fe
CZ
2998 if (!need_full_reset) {
2999 amdgpu_pre_soft_reset(adev);
3000 r = amdgpu_soft_reset(adev);
3001 amdgpu_post_soft_reset(adev);
3002 if (r || amdgpu_check_soft_reset(adev)) {
3003 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3004 need_full_reset = true;
3005 }
f1aa7e08
CZ
3006 }
3007
35d782fe 3008 if (need_full_reset) {
35d782fe 3009 r = amdgpu_suspend(adev);
bfa99269 3010
35d782fe 3011retry:
d05da0e2 3012 amdgpu_atombios_scratch_regs_save(adev);
35d782fe 3013 r = amdgpu_asic_reset(adev);
d05da0e2 3014 amdgpu_atombios_scratch_regs_restore(adev);
35d782fe
CZ
3015 /* post card */
3016 amdgpu_atom_asic_init(adev->mode_info.atom_context);
3017
3018 if (!r) {
3019 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
fcf0649f
CZ
3020 r = amdgpu_resume_phase1(adev);
3021 if (r)
3022 goto out;
0c49e0b8 3023 vram_lost = amdgpu_check_vram_lost(adev);
f1892138 3024 if (vram_lost) {
0c49e0b8 3025 DRM_ERROR("VRAM is lost!\n");
f1892138
CZ
3026 atomic_inc(&adev->vram_lost_counter);
3027 }
fcf0649f
CZ
3028 r = amdgpu_ttm_recover_gart(adev);
3029 if (r)
3030 goto out;
3031 r = amdgpu_resume_phase2(adev);
3032 if (r)
3033 goto out;
0c49e0b8
CZ
3034 if (vram_lost)
3035 amdgpu_fill_reset_magic(adev);
35d782fe 3036 }
d38ceaf9 3037 }
fcf0649f 3038out:
d38ceaf9 3039 if (!r) {
e72cfd58 3040 amdgpu_irq_gpu_reset_resume_helper(adev);
1f465087
CZ
3041 r = amdgpu_ib_ring_tests(adev);
3042 if (r) {
3043 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
40019dc4 3044 r = amdgpu_suspend(adev);
53cdccd5 3045 need_full_reset = true;
40019dc4 3046 goto retry;
1f465087 3047 }
53cdccd5
CZ
3048 /**
3049 * recovery vm page tables, since we cannot depend on VRAM is
3050 * consistent after gpu full reset.
3051 */
3052 if (need_full_reset && amdgpu_need_backup(adev)) {
3053 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
3054 struct amdgpu_bo *bo, *tmp;
f54d1867 3055 struct dma_fence *fence = NULL, *next = NULL;
53cdccd5
CZ
3056
3057 DRM_INFO("recover vram bo from shadow\n");
3058 mutex_lock(&adev->shadow_list_lock);
3059 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 3060 next = NULL;
53cdccd5
CZ
3061 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
3062 if (fence) {
f54d1867 3063 r = dma_fence_wait(fence, false);
53cdccd5 3064 if (r) {
1d7b17b0 3065 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5
CZ
3066 break;
3067 }
3068 }
1f465087 3069
f54d1867 3070 dma_fence_put(fence);
53cdccd5
CZ
3071 fence = next;
3072 }
3073 mutex_unlock(&adev->shadow_list_lock);
3074 if (fence) {
f54d1867 3075 r = dma_fence_wait(fence, false);
53cdccd5 3076 if (r)
1d7b17b0 3077 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5 3078 }
f54d1867 3079 dma_fence_put(fence);
53cdccd5 3080 }
d38ceaf9
AD
3081 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3082 struct amdgpu_ring *ring = adev->rings[i];
51687759
CZ
3083
3084 if (!ring || !ring->sched.thread)
d38ceaf9 3085 continue;
53cdccd5 3086
aa1c8900 3087 amd_sched_job_recovery(&ring->sched);
0875dc9e 3088 kthread_unpark(ring->sched.thread);
d38ceaf9 3089 }
d38ceaf9 3090 } else {
2200edac 3091 dev_err(adev->dev, "asic resume failed (%d).\n", r);
d38ceaf9 3092 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
51687759 3093 if (adev->rings[i] && adev->rings[i]->sched.thread) {
0875dc9e 3094 kthread_unpark(adev->rings[i]->sched.thread);
0875dc9e 3095 }
d38ceaf9
AD
3096 }
3097 }
3098
4562236b
HW
3099 if (amdgpu_device_has_dc_support(adev)) {
3100 r = drm_atomic_helper_resume(adev->ddev, state);
3101 amdgpu_dm_display_resume(adev);
3102 } else
3103 drm_helper_resume_force_mode(adev->ddev);
d38ceaf9
AD
3104
3105 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
89041940 3106 if (r) {
d38ceaf9
AD
3107 /* bad news, how to tell it to userspace ? */
3108 dev_info(adev->dev, "GPU reset failed\n");
89041940
GW
3109 }
3110 else {
6643be65 3111 dev_info(adev->dev, "GPU reset successed!\n");
89041940 3112 }
d38ceaf9 3113
89041940 3114 amdgpu_vf_error_trans_all(adev);
d38ceaf9
AD
3115 return r;
3116}
3117
d0dd7f0c
AD
3118void amdgpu_get_pcie_info(struct amdgpu_device *adev)
3119{
3120 u32 mask;
3121 int ret;
3122
cd474ba0
AD
3123 if (amdgpu_pcie_gen_cap)
3124 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 3125
cd474ba0
AD
3126 if (amdgpu_pcie_lane_cap)
3127 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 3128
cd474ba0
AD
3129 /* covers APUs as well */
3130 if (pci_is_root_bus(adev->pdev->bus)) {
3131 if (adev->pm.pcie_gen_mask == 0)
3132 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3133 if (adev->pm.pcie_mlw_mask == 0)
3134 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 3135 return;
cd474ba0 3136 }
d0dd7f0c 3137
cd474ba0
AD
3138 if (adev->pm.pcie_gen_mask == 0) {
3139 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3140 if (!ret) {
3141 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3142 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3143 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3144
3145 if (mask & DRM_PCIE_SPEED_25)
3146 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3147 if (mask & DRM_PCIE_SPEED_50)
3148 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3149 if (mask & DRM_PCIE_SPEED_80)
3150 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3151 } else {
3152 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3153 }
3154 }
3155 if (adev->pm.pcie_mlw_mask == 0) {
3156 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3157 if (!ret) {
3158 switch (mask) {
3159 case 32:
3160 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3161 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3162 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3163 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3164 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3165 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3166 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3167 break;
3168 case 16:
3169 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3170 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3171 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3172 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3173 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3174 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3175 break;
3176 case 12:
3177 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3178 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3179 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3180 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3181 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3182 break;
3183 case 8:
3184 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3185 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3186 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3187 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3188 break;
3189 case 4:
3190 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3191 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3192 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3193 break;
3194 case 2:
3195 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3196 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3197 break;
3198 case 1:
3199 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3200 break;
3201 default:
3202 break;
3203 }
3204 } else {
3205 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
3206 }
3207 }
3208}
d38ceaf9
AD
3209
3210/*
3211 * Debugfs
3212 */
3213int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
06ab6832 3214 const struct drm_info_list *files,
d38ceaf9
AD
3215 unsigned nfiles)
3216{
3217 unsigned i;
3218
3219 for (i = 0; i < adev->debugfs_count; i++) {
3220 if (adev->debugfs[i].files == files) {
3221 /* Already registered */
3222 return 0;
3223 }
3224 }
3225
3226 i = adev->debugfs_count + 1;
3227 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3228 DRM_ERROR("Reached maximum number of debugfs components.\n");
3229 DRM_ERROR("Report so we increase "
3230 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3231 return -EINVAL;
3232 }
3233 adev->debugfs[adev->debugfs_count].files = files;
3234 adev->debugfs[adev->debugfs_count].num_files = nfiles;
3235 adev->debugfs_count = i;
3236#if defined(CONFIG_DEBUG_FS)
d38ceaf9
AD
3237 drm_debugfs_create_files(files, nfiles,
3238 adev->ddev->primary->debugfs_root,
3239 adev->ddev->primary);
3240#endif
3241 return 0;
3242}
3243
d38ceaf9
AD
3244#if defined(CONFIG_DEBUG_FS)
3245
3246static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3247 size_t size, loff_t *pos)
3248{
45063097 3249 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
3250 ssize_t result = 0;
3251 int r;
bd12267d 3252 bool pm_pg_lock, use_bank;
56628159 3253 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
3254
3255 if (size & 0x3 || *pos & 0x3)
3256 return -EINVAL;
3257
bd12267d
TSD
3258 /* are we reading registers for which a PG lock is necessary? */
3259 pm_pg_lock = (*pos >> 23) & 1;
3260
56628159 3261 if (*pos & (1ULL << 62)) {
0b968650
TSD
3262 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3263 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3264 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
32977f93
TSD
3265
3266 if (se_bank == 0x3FF)
3267 se_bank = 0xFFFFFFFF;
3268 if (sh_bank == 0x3FF)
3269 sh_bank = 0xFFFFFFFF;
3270 if (instance_bank == 0x3FF)
3271 instance_bank = 0xFFFFFFFF;
56628159 3272 use_bank = 1;
56628159
TSD
3273 } else {
3274 use_bank = 0;
3275 }
3276
801a6aa9 3277 *pos &= (1UL << 22) - 1;
bd12267d 3278
56628159 3279 if (use_bank) {
32977f93
TSD
3280 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3281 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
56628159
TSD
3282 return -EINVAL;
3283 mutex_lock(&adev->grbm_idx_mutex);
3284 amdgpu_gfx_select_se_sh(adev, se_bank,
3285 sh_bank, instance_bank);
3286 }
3287
bd12267d
TSD
3288 if (pm_pg_lock)
3289 mutex_lock(&adev->pm.mutex);
3290
d38ceaf9
AD
3291 while (size) {
3292 uint32_t value;
3293
3294 if (*pos > adev->rmmio_size)
56628159 3295 goto end;
d38ceaf9
AD
3296
3297 value = RREG32(*pos >> 2);
3298 r = put_user(value, (uint32_t *)buf);
56628159
TSD
3299 if (r) {
3300 result = r;
3301 goto end;
3302 }
d38ceaf9
AD
3303
3304 result += 4;
3305 buf += 4;
3306 *pos += 4;
3307 size -= 4;
3308 }
3309
56628159
TSD
3310end:
3311 if (use_bank) {
3312 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3313 mutex_unlock(&adev->grbm_idx_mutex);
3314 }
3315
bd12267d
TSD
3316 if (pm_pg_lock)
3317 mutex_unlock(&adev->pm.mutex);
3318
d38ceaf9
AD
3319 return result;
3320}
3321
3322static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3323 size_t size, loff_t *pos)
3324{
45063097 3325 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
3326 ssize_t result = 0;
3327 int r;
394fdde2
TSD
3328 bool pm_pg_lock, use_bank;
3329 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
3330
3331 if (size & 0x3 || *pos & 0x3)
3332 return -EINVAL;
3333
394fdde2
TSD
3334 /* are we reading registers for which a PG lock is necessary? */
3335 pm_pg_lock = (*pos >> 23) & 1;
3336
3337 if (*pos & (1ULL << 62)) {
0b968650
TSD
3338 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3339 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3340 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
394fdde2
TSD
3341
3342 if (se_bank == 0x3FF)
3343 se_bank = 0xFFFFFFFF;
3344 if (sh_bank == 0x3FF)
3345 sh_bank = 0xFFFFFFFF;
3346 if (instance_bank == 0x3FF)
3347 instance_bank = 0xFFFFFFFF;
3348 use_bank = 1;
3349 } else {
3350 use_bank = 0;
3351 }
3352
801a6aa9 3353 *pos &= (1UL << 22) - 1;
394fdde2
TSD
3354
3355 if (use_bank) {
3356 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3357 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3358 return -EINVAL;
3359 mutex_lock(&adev->grbm_idx_mutex);
3360 amdgpu_gfx_select_se_sh(adev, se_bank,
3361 sh_bank, instance_bank);
3362 }
3363
3364 if (pm_pg_lock)
3365 mutex_lock(&adev->pm.mutex);
3366
d38ceaf9
AD
3367 while (size) {
3368 uint32_t value;
3369
3370 if (*pos > adev->rmmio_size)
3371 return result;
3372
3373 r = get_user(value, (uint32_t *)buf);
3374 if (r)
3375 return r;
3376
3377 WREG32(*pos >> 2, value);
3378
3379 result += 4;
3380 buf += 4;
3381 *pos += 4;
3382 size -= 4;
3383 }
3384
394fdde2
TSD
3385 if (use_bank) {
3386 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3387 mutex_unlock(&adev->grbm_idx_mutex);
3388 }
3389
3390 if (pm_pg_lock)
3391 mutex_unlock(&adev->pm.mutex);
3392
d38ceaf9
AD
3393 return result;
3394}
3395
adcec288
TSD
3396static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3397 size_t size, loff_t *pos)
3398{
45063097 3399 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3400 ssize_t result = 0;
3401 int r;
3402
3403 if (size & 0x3 || *pos & 0x3)
3404 return -EINVAL;
3405
3406 while (size) {
3407 uint32_t value;
3408
3409 value = RREG32_PCIE(*pos >> 2);
3410 r = put_user(value, (uint32_t *)buf);
3411 if (r)
3412 return r;
3413
3414 result += 4;
3415 buf += 4;
3416 *pos += 4;
3417 size -= 4;
3418 }
3419
3420 return result;
3421}
3422
3423static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3424 size_t size, loff_t *pos)
3425{
45063097 3426 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3427 ssize_t result = 0;
3428 int r;
3429
3430 if (size & 0x3 || *pos & 0x3)
3431 return -EINVAL;
3432
3433 while (size) {
3434 uint32_t value;
3435
3436 r = get_user(value, (uint32_t *)buf);
3437 if (r)
3438 return r;
3439
3440 WREG32_PCIE(*pos >> 2, value);
3441
3442 result += 4;
3443 buf += 4;
3444 *pos += 4;
3445 size -= 4;
3446 }
3447
3448 return result;
3449}
3450
3451static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3452 size_t size, loff_t *pos)
3453{
45063097 3454 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3455 ssize_t result = 0;
3456 int r;
3457
3458 if (size & 0x3 || *pos & 0x3)
3459 return -EINVAL;
3460
3461 while (size) {
3462 uint32_t value;
3463
3464 value = RREG32_DIDT(*pos >> 2);
3465 r = put_user(value, (uint32_t *)buf);
3466 if (r)
3467 return r;
3468
3469 result += 4;
3470 buf += 4;
3471 *pos += 4;
3472 size -= 4;
3473 }
3474
3475 return result;
3476}
3477
3478static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3479 size_t size, loff_t *pos)
3480{
45063097 3481 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3482 ssize_t result = 0;
3483 int r;
3484
3485 if (size & 0x3 || *pos & 0x3)
3486 return -EINVAL;
3487
3488 while (size) {
3489 uint32_t value;
3490
3491 r = get_user(value, (uint32_t *)buf);
3492 if (r)
3493 return r;
3494
3495 WREG32_DIDT(*pos >> 2, value);
3496
3497 result += 4;
3498 buf += 4;
3499 *pos += 4;
3500 size -= 4;
3501 }
3502
3503 return result;
3504}
3505
3506static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3507 size_t size, loff_t *pos)
3508{
45063097 3509 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3510 ssize_t result = 0;
3511 int r;
3512
3513 if (size & 0x3 || *pos & 0x3)
3514 return -EINVAL;
3515
3516 while (size) {
3517 uint32_t value;
3518
6fc0deaf 3519 value = RREG32_SMC(*pos);
adcec288
TSD
3520 r = put_user(value, (uint32_t *)buf);
3521 if (r)
3522 return r;
3523
3524 result += 4;
3525 buf += 4;
3526 *pos += 4;
3527 size -= 4;
3528 }
3529
3530 return result;
3531}
3532
3533static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3534 size_t size, loff_t *pos)
3535{
45063097 3536 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3537 ssize_t result = 0;
3538 int r;
3539
3540 if (size & 0x3 || *pos & 0x3)
3541 return -EINVAL;
3542
3543 while (size) {
3544 uint32_t value;
3545
3546 r = get_user(value, (uint32_t *)buf);
3547 if (r)
3548 return r;
3549
6fc0deaf 3550 WREG32_SMC(*pos, value);
adcec288
TSD
3551
3552 result += 4;
3553 buf += 4;
3554 *pos += 4;
3555 size -= 4;
3556 }
3557
3558 return result;
3559}
3560
1e051413
TSD
3561static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3562 size_t size, loff_t *pos)
3563{
45063097 3564 struct amdgpu_device *adev = file_inode(f)->i_private;
1e051413
TSD
3565 ssize_t result = 0;
3566 int r;
3567 uint32_t *config, no_regs = 0;
3568
3569 if (size & 0x3 || *pos & 0x3)
3570 return -EINVAL;
3571
ecab7668 3572 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
1e051413
TSD
3573 if (!config)
3574 return -ENOMEM;
3575
3576 /* version, increment each time something is added */
9a999359 3577 config[no_regs++] = 3;
1e051413
TSD
3578 config[no_regs++] = adev->gfx.config.max_shader_engines;
3579 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3580 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3581 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3582 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3583 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3584 config[no_regs++] = adev->gfx.config.max_gprs;
3585 config[no_regs++] = adev->gfx.config.max_gs_threads;
3586 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3587 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3588 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3589 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3590 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3591 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3592 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3593 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3594 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3595 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3596 config[no_regs++] = adev->gfx.config.num_gpus;
3597 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3598 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3599 config[no_regs++] = adev->gfx.config.gb_addr_config;
3600 config[no_regs++] = adev->gfx.config.num_rbs;
3601
89a8f309
TSD
3602 /* rev==1 */
3603 config[no_regs++] = adev->rev_id;
3604 config[no_regs++] = adev->pg_flags;
3605 config[no_regs++] = adev->cg_flags;
3606
e9f11dc8
TSD
3607 /* rev==2 */
3608 config[no_regs++] = adev->family;
3609 config[no_regs++] = adev->external_rev_id;
3610
9a999359
TSD
3611 /* rev==3 */
3612 config[no_regs++] = adev->pdev->device;
3613 config[no_regs++] = adev->pdev->revision;
3614 config[no_regs++] = adev->pdev->subsystem_device;
3615 config[no_regs++] = adev->pdev->subsystem_vendor;
3616
1e051413
TSD
3617 while (size && (*pos < no_regs * 4)) {
3618 uint32_t value;
3619
3620 value = config[*pos >> 2];
3621 r = put_user(value, (uint32_t *)buf);
3622 if (r) {
3623 kfree(config);
3624 return r;
3625 }
3626
3627 result += 4;
3628 buf += 4;
3629 *pos += 4;
3630 size -= 4;
3631 }
3632
3633 kfree(config);
3634 return result;
3635}
3636
f2cdaf20
TSD
3637static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3638 size_t size, loff_t *pos)
3639{
45063097 3640 struct amdgpu_device *adev = file_inode(f)->i_private;
9f8df7d7
TSD
3641 int idx, x, outsize, r, valuesize;
3642 uint32_t values[16];
f2cdaf20 3643
9f8df7d7 3644 if (size & 3 || *pos & 0x3)
f2cdaf20
TSD
3645 return -EINVAL;
3646
3cbc614f
SP
3647 if (amdgpu_dpm == 0)
3648 return -EINVAL;
3649
f2cdaf20
TSD
3650 /* convert offset to sensor number */
3651 idx = *pos >> 2;
3652
9f8df7d7 3653 valuesize = sizeof(values);
f2cdaf20 3654 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
cd4d7464 3655 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
f2cdaf20
TSD
3656 else
3657 return -EINVAL;
3658
9f8df7d7
TSD
3659 if (size > valuesize)
3660 return -EINVAL;
3661
3662 outsize = 0;
3663 x = 0;
3664 if (!r) {
3665 while (size) {
3666 r = put_user(values[x++], (int32_t *)buf);
3667 buf += 4;
3668 size -= 4;
3669 outsize += 4;
3670 }
3671 }
f2cdaf20 3672
9f8df7d7 3673 return !r ? outsize : r;
f2cdaf20 3674}
1e051413 3675
273d7aa1
TSD
3676static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3677 size_t size, loff_t *pos)
3678{
3679 struct amdgpu_device *adev = f->f_inode->i_private;
3680 int r, x;
3681 ssize_t result=0;
472259f0 3682 uint32_t offset, se, sh, cu, wave, simd, data[32];
273d7aa1
TSD
3683
3684 if (size & 3 || *pos & 3)
3685 return -EINVAL;
3686
3687 /* decode offset */
0b968650
TSD
3688 offset = (*pos & GENMASK_ULL(6, 0));
3689 se = (*pos & GENMASK_ULL(14, 7)) >> 7;
3690 sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
3691 cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
3692 wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
3693 simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
273d7aa1
TSD
3694
3695 /* switch to the specific se/sh/cu */
3696 mutex_lock(&adev->grbm_idx_mutex);
3697 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3698
3699 x = 0;
472259f0
TSD
3700 if (adev->gfx.funcs->read_wave_data)
3701 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
273d7aa1
TSD
3702
3703 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3704 mutex_unlock(&adev->grbm_idx_mutex);
3705
5ecfb3b8
TSD
3706 if (!x)
3707 return -EINVAL;
3708
472259f0 3709 while (size && (offset < x * 4)) {
273d7aa1
TSD
3710 uint32_t value;
3711
472259f0 3712 value = data[offset >> 2];
273d7aa1
TSD
3713 r = put_user(value, (uint32_t *)buf);
3714 if (r)
3715 return r;
3716
3717 result += 4;
3718 buf += 4;
472259f0 3719 offset += 4;
273d7aa1
TSD
3720 size -= 4;
3721 }
3722
3723 return result;
3724}
3725
c5a60ce8
TSD
3726static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3727 size_t size, loff_t *pos)
3728{
3729 struct amdgpu_device *adev = f->f_inode->i_private;
3730 int r;
3731 ssize_t result = 0;
3732 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3733
3734 if (size & 3 || *pos & 3)
3735 return -EINVAL;
3736
3737 /* decode offset */
0b968650
TSD
3738 offset = *pos & GENMASK_ULL(11, 0);
3739 se = (*pos & GENMASK_ULL(19, 12)) >> 12;
3740 sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
3741 cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
3742 wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
3743 simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
3744 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
3745 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
c5a60ce8
TSD
3746
3747 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3748 if (!data)
3749 return -ENOMEM;
3750
3751 /* switch to the specific se/sh/cu */
3752 mutex_lock(&adev->grbm_idx_mutex);
3753 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3754
3755 if (bank == 0) {
3756 if (adev->gfx.funcs->read_wave_vgprs)
3757 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3758 } else {
3759 if (adev->gfx.funcs->read_wave_sgprs)
3760 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3761 }
3762
3763 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3764 mutex_unlock(&adev->grbm_idx_mutex);
3765
3766 while (size) {
3767 uint32_t value;
3768
3769 value = data[offset++];
3770 r = put_user(value, (uint32_t *)buf);
3771 if (r) {
3772 result = r;
3773 goto err;
3774 }
3775
3776 result += 4;
3777 buf += 4;
3778 size -= 4;
3779 }
3780
3781err:
3782 kfree(data);
3783 return result;
3784}
3785
d38ceaf9
AD
3786static const struct file_operations amdgpu_debugfs_regs_fops = {
3787 .owner = THIS_MODULE,
3788 .read = amdgpu_debugfs_regs_read,
3789 .write = amdgpu_debugfs_regs_write,
3790 .llseek = default_llseek
3791};
adcec288
TSD
3792static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3793 .owner = THIS_MODULE,
3794 .read = amdgpu_debugfs_regs_didt_read,
3795 .write = amdgpu_debugfs_regs_didt_write,
3796 .llseek = default_llseek
3797};
3798static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3799 .owner = THIS_MODULE,
3800 .read = amdgpu_debugfs_regs_pcie_read,
3801 .write = amdgpu_debugfs_regs_pcie_write,
3802 .llseek = default_llseek
3803};
3804static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3805 .owner = THIS_MODULE,
3806 .read = amdgpu_debugfs_regs_smc_read,
3807 .write = amdgpu_debugfs_regs_smc_write,
3808 .llseek = default_llseek
3809};
3810
1e051413
TSD
3811static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3812 .owner = THIS_MODULE,
3813 .read = amdgpu_debugfs_gca_config_read,
3814 .llseek = default_llseek
3815};
3816
f2cdaf20
TSD
3817static const struct file_operations amdgpu_debugfs_sensors_fops = {
3818 .owner = THIS_MODULE,
3819 .read = amdgpu_debugfs_sensor_read,
3820 .llseek = default_llseek
3821};
3822
273d7aa1
TSD
3823static const struct file_operations amdgpu_debugfs_wave_fops = {
3824 .owner = THIS_MODULE,
3825 .read = amdgpu_debugfs_wave_read,
3826 .llseek = default_llseek
3827};
c5a60ce8
TSD
3828static const struct file_operations amdgpu_debugfs_gpr_fops = {
3829 .owner = THIS_MODULE,
3830 .read = amdgpu_debugfs_gpr_read,
3831 .llseek = default_llseek
3832};
273d7aa1 3833
adcec288
TSD
3834static const struct file_operations *debugfs_regs[] = {
3835 &amdgpu_debugfs_regs_fops,
3836 &amdgpu_debugfs_regs_didt_fops,
3837 &amdgpu_debugfs_regs_pcie_fops,
3838 &amdgpu_debugfs_regs_smc_fops,
1e051413 3839 &amdgpu_debugfs_gca_config_fops,
f2cdaf20 3840 &amdgpu_debugfs_sensors_fops,
273d7aa1 3841 &amdgpu_debugfs_wave_fops,
c5a60ce8 3842 &amdgpu_debugfs_gpr_fops,
adcec288
TSD
3843};
3844
3845static const char *debugfs_regs_names[] = {
3846 "amdgpu_regs",
3847 "amdgpu_regs_didt",
3848 "amdgpu_regs_pcie",
3849 "amdgpu_regs_smc",
1e051413 3850 "amdgpu_gca_config",
f2cdaf20 3851 "amdgpu_sensors",
273d7aa1 3852 "amdgpu_wave",
c5a60ce8 3853 "amdgpu_gpr",
adcec288 3854};
d38ceaf9
AD
3855
3856static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3857{
3858 struct drm_minor *minor = adev->ddev->primary;
3859 struct dentry *ent, *root = minor->debugfs_root;
adcec288
TSD
3860 unsigned i, j;
3861
3862 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3863 ent = debugfs_create_file(debugfs_regs_names[i],
3864 S_IFREG | S_IRUGO, root,
3865 adev, debugfs_regs[i]);
3866 if (IS_ERR(ent)) {
3867 for (j = 0; j < i; j++) {
3868 debugfs_remove(adev->debugfs_regs[i]);
3869 adev->debugfs_regs[i] = NULL;
3870 }
3871 return PTR_ERR(ent);
3872 }
d38ceaf9 3873
adcec288
TSD
3874 if (!i)
3875 i_size_write(ent->d_inode, adev->rmmio_size);
3876 adev->debugfs_regs[i] = ent;
3877 }
d38ceaf9
AD
3878
3879 return 0;
3880}
3881
3882static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3883{
adcec288
TSD
3884 unsigned i;
3885
3886 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3887 if (adev->debugfs_regs[i]) {
3888 debugfs_remove(adev->debugfs_regs[i]);
3889 adev->debugfs_regs[i] = NULL;
3890 }
3891 }
d38ceaf9
AD
3892}
3893
4f0955fc
HR
3894static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3895{
3896 struct drm_info_node *node = (struct drm_info_node *) m->private;
3897 struct drm_device *dev = node->minor->dev;
3898 struct amdgpu_device *adev = dev->dev_private;
3899 int r = 0, i;
3900
3901 /* hold on the scheduler */
3902 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3903 struct amdgpu_ring *ring = adev->rings[i];
3904
3905 if (!ring || !ring->sched.thread)
3906 continue;
3907 kthread_park(ring->sched.thread);
3908 }
3909
3910 seq_printf(m, "run ib test:\n");
3911 r = amdgpu_ib_ring_tests(adev);
3912 if (r)
3913 seq_printf(m, "ib ring tests failed (%d).\n", r);
3914 else
3915 seq_printf(m, "ib ring tests passed.\n");
3916
3917 /* go on the scheduler */
3918 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3919 struct amdgpu_ring *ring = adev->rings[i];
3920
3921 if (!ring || !ring->sched.thread)
3922 continue;
3923 kthread_unpark(ring->sched.thread);
3924 }
3925
3926 return 0;
3927}
3928
3929static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
3930 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
3931};
3932
3933static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3934{
3935 return amdgpu_debugfs_add_files(adev,
3936 amdgpu_debugfs_test_ib_ring_list, 1);
3937}
3938
d38ceaf9
AD
3939int amdgpu_debugfs_init(struct drm_minor *minor)
3940{
3941 return 0;
3942}
db95e218
KR
3943
3944static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
3945{
3946 struct drm_info_node *node = (struct drm_info_node *) m->private;
3947 struct drm_device *dev = node->minor->dev;
3948 struct amdgpu_device *adev = dev->dev_private;
3949
3950 seq_write(m, adev->bios, adev->bios_size);
3951 return 0;
3952}
3953
db95e218
KR
3954static const struct drm_info_list amdgpu_vbios_dump_list[] = {
3955 {"amdgpu_vbios",
3956 amdgpu_debugfs_get_vbios_dump,
3957 0, NULL},
3958};
3959
db95e218
KR
3960static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3961{
3962 return amdgpu_debugfs_add_files(adev,
3963 amdgpu_vbios_dump_list, 1);
3964}
7cebc728 3965#else
27bad5b9 3966static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
4f0955fc
HR
3967{
3968 return 0;
3969}
7cebc728
AK
3970static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3971{
3972 return 0;
3973}
db95e218
KR
3974static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3975{
3976 return 0;
3977}
7cebc728 3978static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
d38ceaf9 3979#endif