drm/amdgpu: add gpu_info firmware (v3)
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
f4b373f4 39#include "amdgpu_trace.h"
d38ceaf9
AD
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
a5bde2f9 43#include "amdgpu_atomfirmware.h"
d0dd7f0c 44#include "amd_pcie.h"
33f34802
KW
45#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
a2e73f56
AD
48#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
aaa36a97 51#include "vi.h"
460826e6 52#include "soc15.h"
d38ceaf9 53#include "bif/bif_4_1_d.h"
9accf2fd 54#include <linux/pci.h>
bec86378 55#include <linux/firmware.h>
d38ceaf9
AD
56
57static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
58static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
59
60static const char *amdgpu_asic_name[] = {
da69c161
KW
61 "TAHITI",
62 "PITCAIRN",
63 "VERDE",
64 "OLAND",
65 "HAINAN",
d38ceaf9
AD
66 "BONAIRE",
67 "KAVERI",
68 "KABINI",
69 "HAWAII",
70 "MULLINS",
71 "TOPAZ",
72 "TONGA",
48299f95 73 "FIJI",
d38ceaf9 74 "CARRIZO",
139f4917 75 "STONEY",
2cc0c0b5
FC
76 "POLARIS10",
77 "POLARIS11",
c4642a47 78 "POLARIS12",
d4196f01 79 "VEGA10",
d38ceaf9
AD
80 "LAST",
81};
82
83bool amdgpu_device_is_px(struct drm_device *dev)
84{
85 struct amdgpu_device *adev = dev->dev_private;
86
2f7d10b3 87 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
88 return true;
89 return false;
90}
91
92/*
93 * MMIO register access helper functions.
94 */
95uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 96 uint32_t acc_flags)
d38ceaf9 97{
f4b373f4
TSD
98 uint32_t ret;
99
15d72fd7 100 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
bc992ba5
XY
101 BUG_ON(in_interrupt());
102 return amdgpu_virt_kiq_rreg(adev, reg);
103 }
104
15d72fd7 105 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 106 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
107 else {
108 unsigned long flags;
d38ceaf9
AD
109
110 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
111 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
112 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
113 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 114 }
f4b373f4
TSD
115 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
116 return ret;
d38ceaf9
AD
117}
118
119void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 120 uint32_t acc_flags)
d38ceaf9 121{
f4b373f4 122 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 123
15d72fd7 124 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
bc992ba5
XY
125 BUG_ON(in_interrupt());
126 return amdgpu_virt_kiq_wreg(adev, reg, v);
127 }
128
15d72fd7 129 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
130 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
131 else {
132 unsigned long flags;
133
134 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
135 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
136 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
137 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
138 }
139}
140
141u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
142{
143 if ((reg * 4) < adev->rio_mem_size)
144 return ioread32(adev->rio_mem + (reg * 4));
145 else {
146 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
147 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
148 }
149}
150
151void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
152{
153
154 if ((reg * 4) < adev->rio_mem_size)
155 iowrite32(v, adev->rio_mem + (reg * 4));
156 else {
157 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
158 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
159 }
160}
161
162/**
163 * amdgpu_mm_rdoorbell - read a doorbell dword
164 *
165 * @adev: amdgpu_device pointer
166 * @index: doorbell index
167 *
168 * Returns the value in the doorbell aperture at the
169 * requested doorbell index (CIK).
170 */
171u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
172{
173 if (index < adev->doorbell.num_doorbells) {
174 return readl(adev->doorbell.ptr + index);
175 } else {
176 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
177 return 0;
178 }
179}
180
181/**
182 * amdgpu_mm_wdoorbell - write a doorbell dword
183 *
184 * @adev: amdgpu_device pointer
185 * @index: doorbell index
186 * @v: value to write
187 *
188 * Writes @v to the doorbell aperture at the
189 * requested doorbell index (CIK).
190 */
191void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
192{
193 if (index < adev->doorbell.num_doorbells) {
194 writel(v, adev->doorbell.ptr + index);
195 } else {
196 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
197 }
198}
199
832be404
KW
200/**
201 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
202 *
203 * @adev: amdgpu_device pointer
204 * @index: doorbell index
205 *
206 * Returns the value in the doorbell aperture at the
207 * requested doorbell index (VEGA10+).
208 */
209u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
210{
211 if (index < adev->doorbell.num_doorbells) {
212 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
213 } else {
214 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
215 return 0;
216 }
217}
218
219/**
220 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
221 *
222 * @adev: amdgpu_device pointer
223 * @index: doorbell index
224 * @v: value to write
225 *
226 * Writes @v to the doorbell aperture at the
227 * requested doorbell index (VEGA10+).
228 */
229void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
230{
231 if (index < adev->doorbell.num_doorbells) {
232 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
233 } else {
234 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
235 }
236}
237
d38ceaf9
AD
238/**
239 * amdgpu_invalid_rreg - dummy reg read function
240 *
241 * @adev: amdgpu device pointer
242 * @reg: offset of register
243 *
244 * Dummy register read function. Used for register blocks
245 * that certain asics don't have (all asics).
246 * Returns the value in the register.
247 */
248static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
249{
250 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
251 BUG();
252 return 0;
253}
254
255/**
256 * amdgpu_invalid_wreg - dummy reg write function
257 *
258 * @adev: amdgpu device pointer
259 * @reg: offset of register
260 * @v: value to write to the register
261 *
262 * Dummy register read function. Used for register blocks
263 * that certain asics don't have (all asics).
264 */
265static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
266{
267 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
268 reg, v);
269 BUG();
270}
271
272/**
273 * amdgpu_block_invalid_rreg - dummy reg read function
274 *
275 * @adev: amdgpu device pointer
276 * @block: offset of instance
277 * @reg: offset of register
278 *
279 * Dummy register read function. Used for register blocks
280 * that certain asics don't have (all asics).
281 * Returns the value in the register.
282 */
283static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
284 uint32_t block, uint32_t reg)
285{
286 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
287 reg, block);
288 BUG();
289 return 0;
290}
291
292/**
293 * amdgpu_block_invalid_wreg - dummy reg write function
294 *
295 * @adev: amdgpu device pointer
296 * @block: offset of instance
297 * @reg: offset of register
298 * @v: value to write to the register
299 *
300 * Dummy register read function. Used for register blocks
301 * that certain asics don't have (all asics).
302 */
303static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
304 uint32_t block,
305 uint32_t reg, uint32_t v)
306{
307 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
308 reg, block, v);
309 BUG();
310}
311
312static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
313{
314 int r;
315
316 if (adev->vram_scratch.robj == NULL) {
317 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
857d913d 318 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
03f48dd5
CK
319 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
320 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
72d7668b 321 NULL, NULL, &adev->vram_scratch.robj);
d38ceaf9
AD
322 if (r) {
323 return r;
324 }
325 }
326
327 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
328 if (unlikely(r != 0))
329 return r;
330 r = amdgpu_bo_pin(adev->vram_scratch.robj,
331 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
332 if (r) {
333 amdgpu_bo_unreserve(adev->vram_scratch.robj);
334 return r;
335 }
336 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
337 (void **)&adev->vram_scratch.ptr);
338 if (r)
339 amdgpu_bo_unpin(adev->vram_scratch.robj);
340 amdgpu_bo_unreserve(adev->vram_scratch.robj);
341
342 return r;
343}
344
345static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
346{
347 int r;
348
349 if (adev->vram_scratch.robj == NULL) {
350 return;
351 }
8ab25b4f 352 r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
d38ceaf9
AD
353 if (likely(r == 0)) {
354 amdgpu_bo_kunmap(adev->vram_scratch.robj);
355 amdgpu_bo_unpin(adev->vram_scratch.robj);
356 amdgpu_bo_unreserve(adev->vram_scratch.robj);
357 }
358 amdgpu_bo_unref(&adev->vram_scratch.robj);
359}
360
361/**
362 * amdgpu_program_register_sequence - program an array of registers.
363 *
364 * @adev: amdgpu_device pointer
365 * @registers: pointer to the register array
366 * @array_size: size of the register array
367 *
368 * Programs an array or registers with and and or masks.
369 * This is a helper for setting golden registers.
370 */
371void amdgpu_program_register_sequence(struct amdgpu_device *adev,
372 const u32 *registers,
373 const u32 array_size)
374{
375 u32 tmp, reg, and_mask, or_mask;
376 int i;
377
378 if (array_size % 3)
379 return;
380
381 for (i = 0; i < array_size; i +=3) {
382 reg = registers[i + 0];
383 and_mask = registers[i + 1];
384 or_mask = registers[i + 2];
385
386 if (and_mask == 0xffffffff) {
387 tmp = or_mask;
388 } else {
389 tmp = RREG32(reg);
390 tmp &= ~and_mask;
391 tmp |= or_mask;
392 }
393 WREG32(reg, tmp);
394 }
395}
396
397void amdgpu_pci_config_reset(struct amdgpu_device *adev)
398{
399 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
400}
401
402/*
403 * GPU doorbell aperture helpers function.
404 */
405/**
406 * amdgpu_doorbell_init - Init doorbell driver information.
407 *
408 * @adev: amdgpu_device pointer
409 *
410 * Init doorbell driver information (CIK)
411 * Returns 0 on success, error on failure.
412 */
413static int amdgpu_doorbell_init(struct amdgpu_device *adev)
414{
415 /* doorbell bar mapping */
416 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
417 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
418
edf600da 419 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
420 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
421 if (adev->doorbell.num_doorbells == 0)
422 return -EINVAL;
423
8972e5d2
CK
424 adev->doorbell.ptr = ioremap(adev->doorbell.base,
425 adev->doorbell.num_doorbells *
426 sizeof(u32));
427 if (adev->doorbell.ptr == NULL)
d38ceaf9 428 return -ENOMEM;
d38ceaf9
AD
429
430 return 0;
431}
432
433/**
434 * amdgpu_doorbell_fini - Tear down doorbell driver information.
435 *
436 * @adev: amdgpu_device pointer
437 *
438 * Tear down doorbell driver information (CIK)
439 */
440static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
441{
442 iounmap(adev->doorbell.ptr);
443 adev->doorbell.ptr = NULL;
444}
445
446/**
447 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
448 * setup amdkfd
449 *
450 * @adev: amdgpu_device pointer
451 * @aperture_base: output returning doorbell aperture base physical address
452 * @aperture_size: output returning doorbell aperture size in bytes
453 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
454 *
455 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
456 * takes doorbells required for its own rings and reports the setup to amdkfd.
457 * amdgpu reserved doorbells are at the start of the doorbell aperture.
458 */
459void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
460 phys_addr_t *aperture_base,
461 size_t *aperture_size,
462 size_t *start_offset)
463{
464 /*
465 * The first num_doorbells are used by amdgpu.
466 * amdkfd takes whatever's left in the aperture.
467 */
468 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
469 *aperture_base = adev->doorbell.base;
470 *aperture_size = adev->doorbell.size;
471 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
472 } else {
473 *aperture_base = 0;
474 *aperture_size = 0;
475 *start_offset = 0;
476 }
477}
478
479/*
480 * amdgpu_wb_*()
481 * Writeback is the the method by which the the GPU updates special pages
482 * in memory with the status of certain GPU events (fences, ring pointers,
483 * etc.).
484 */
485
486/**
487 * amdgpu_wb_fini - Disable Writeback and free memory
488 *
489 * @adev: amdgpu_device pointer
490 *
491 * Disables Writeback and frees the Writeback memory (all asics).
492 * Used at driver shutdown.
493 */
494static void amdgpu_wb_fini(struct amdgpu_device *adev)
495{
496 if (adev->wb.wb_obj) {
a76ed485
AD
497 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
498 &adev->wb.gpu_addr,
499 (void **)&adev->wb.wb);
d38ceaf9
AD
500 adev->wb.wb_obj = NULL;
501 }
502}
503
504/**
505 * amdgpu_wb_init- Init Writeback driver info and allocate memory
506 *
507 * @adev: amdgpu_device pointer
508 *
509 * Disables Writeback and frees the Writeback memory (all asics).
510 * Used at driver startup.
511 * Returns 0 on success or an -error on failure.
512 */
513static int amdgpu_wb_init(struct amdgpu_device *adev)
514{
515 int r;
516
517 if (adev->wb.wb_obj == NULL) {
60a970a6 518 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
a76ed485
AD
519 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
520 &adev->wb.wb_obj, &adev->wb.gpu_addr,
521 (void **)&adev->wb.wb);
d38ceaf9
AD
522 if (r) {
523 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
524 return r;
525 }
d38ceaf9
AD
526
527 adev->wb.num_wb = AMDGPU_MAX_WB;
528 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
529
530 /* clear wb memory */
60a970a6 531 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
d38ceaf9
AD
532 }
533
534 return 0;
535}
536
537/**
538 * amdgpu_wb_get - Allocate a wb entry
539 *
540 * @adev: amdgpu_device pointer
541 * @wb: wb index
542 *
543 * Allocate a wb slot for use by the driver (all asics).
544 * Returns 0 on success or -EINVAL on failure.
545 */
546int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
547{
548 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
549 if (offset < adev->wb.num_wb) {
550 __set_bit(offset, adev->wb.used);
551 *wb = offset;
552 return 0;
553 } else {
554 return -EINVAL;
555 }
556}
557
7014285a
KW
558/**
559 * amdgpu_wb_get_64bit - Allocate a wb entry
560 *
561 * @adev: amdgpu_device pointer
562 * @wb: wb index
563 *
564 * Allocate a wb slot for use by the driver (all asics).
565 * Returns 0 on success or -EINVAL on failure.
566 */
567int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
568{
569 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
570 adev->wb.num_wb, 0, 2, 7, 0);
571 if ((offset + 1) < adev->wb.num_wb) {
572 __set_bit(offset, adev->wb.used);
573 __set_bit(offset + 1, adev->wb.used);
574 *wb = offset;
575 return 0;
576 } else {
577 return -EINVAL;
578 }
579}
580
d38ceaf9
AD
581/**
582 * amdgpu_wb_free - Free a wb entry
583 *
584 * @adev: amdgpu_device pointer
585 * @wb: wb index
586 *
587 * Free a wb slot allocated for use by the driver (all asics)
588 */
589void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
590{
591 if (wb < adev->wb.num_wb)
592 __clear_bit(wb, adev->wb.used);
593}
594
7014285a
KW
595/**
596 * amdgpu_wb_free_64bit - Free a wb entry
597 *
598 * @adev: amdgpu_device pointer
599 * @wb: wb index
600 *
601 * Free a wb slot allocated for use by the driver (all asics)
602 */
603void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
604{
605 if ((wb + 1) < adev->wb.num_wb) {
606 __clear_bit(wb, adev->wb.used);
607 __clear_bit(wb + 1, adev->wb.used);
608 }
609}
610
d38ceaf9
AD
611/**
612 * amdgpu_vram_location - try to find VRAM location
613 * @adev: amdgpu device structure holding all necessary informations
614 * @mc: memory controller structure holding memory informations
615 * @base: base address at which to put VRAM
616 *
617 * Function will place try to place VRAM at base address provided
618 * as parameter (which is so far either PCI aperture address or
619 * for IGP TOM base address).
620 *
621 * If there is not enough space to fit the unvisible VRAM in the 32bits
622 * address space then we limit the VRAM size to the aperture.
623 *
624 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
625 * this shouldn't be a problem as we are using the PCI aperture as a reference.
626 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
627 * not IGP.
628 *
629 * Note: we use mc_vram_size as on some board we need to program the mc to
630 * cover the whole aperture even if VRAM size is inferior to aperture size
631 * Novell bug 204882 + along with lots of ubuntu ones
632 *
633 * Note: when limiting vram it's safe to overwritte real_vram_size because
634 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
635 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
636 * ones)
637 *
638 * Note: IGP TOM addr should be the same as the aperture addr, we don't
639 * explicitly check for that thought.
640 *
641 * FIXME: when reducing VRAM size align new size on power of 2.
642 */
643void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
644{
645 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
646
647 mc->vram_start = base;
648 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
649 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
650 mc->real_vram_size = mc->aper_size;
651 mc->mc_vram_size = mc->aper_size;
652 }
653 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
654 if (limit && limit < mc->real_vram_size)
655 mc->real_vram_size = limit;
656 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
657 mc->mc_vram_size >> 20, mc->vram_start,
658 mc->vram_end, mc->real_vram_size >> 20);
659}
660
661/**
662 * amdgpu_gtt_location - try to find GTT location
663 * @adev: amdgpu device structure holding all necessary informations
664 * @mc: memory controller structure holding memory informations
665 *
666 * Function will place try to place GTT before or after VRAM.
667 *
668 * If GTT size is bigger than space left then we ajust GTT size.
669 * Thus function will never fails.
670 *
671 * FIXME: when reducing GTT size align new size on power of 2.
672 */
673void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
674{
675 u64 size_af, size_bf;
676
677 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
678 size_bf = mc->vram_start & ~mc->gtt_base_align;
679 if (size_bf > size_af) {
680 if (mc->gtt_size > size_bf) {
681 dev_warn(adev->dev, "limiting GTT\n");
682 mc->gtt_size = size_bf;
683 }
9dc5a91e 684 mc->gtt_start = 0;
d38ceaf9
AD
685 } else {
686 if (mc->gtt_size > size_af) {
687 dev_warn(adev->dev, "limiting GTT\n");
688 mc->gtt_size = size_af;
689 }
690 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
691 }
692 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
693 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
694 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
695}
696
697/*
698 * GPU helpers function.
699 */
700/**
c836fec5 701 * amdgpu_need_post - check if the hw need post or not
d38ceaf9
AD
702 *
703 * @adev: amdgpu_device pointer
704 *
c836fec5
JQ
705 * Check if the asic has been initialized (all asics) at driver startup
706 * or post is needed if hw reset is performed.
707 * Returns true if need or false if not.
d38ceaf9 708 */
c836fec5 709bool amdgpu_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
710{
711 uint32_t reg;
712
c836fec5
JQ
713 if (adev->has_hw_reset) {
714 adev->has_hw_reset = false;
715 return true;
716 }
d38ceaf9 717 /* then check MEM_SIZE, in case the crtcs are off */
bbf282d8 718 reg = amdgpu_asic_get_config_memsize(adev);
d38ceaf9 719
f2713e8c 720 if ((reg != 0) && (reg != 0xffffffff))
c836fec5 721 return false;
d38ceaf9 722
c836fec5 723 return true;
d38ceaf9
AD
724
725}
726
bec86378
ML
727static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
728{
729 if (amdgpu_sriov_vf(adev))
730 return false;
731
732 if (amdgpu_passthrough(adev)) {
1da2c326
ML
733 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
734 * some old smc fw still need driver do vPost otherwise gpu hang, while
735 * those smc fw version above 22.15 doesn't have this flaw, so we force
736 * vpost executed for smc version below 22.15
bec86378
ML
737 */
738 if (adev->asic_type == CHIP_FIJI) {
739 int err;
740 uint32_t fw_ver;
741 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
742 /* force vPost if error occured */
743 if (err)
744 return true;
745
746 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
747 if (fw_ver < 0x00160e00)
748 return true;
bec86378 749 }
bec86378 750 }
c836fec5 751 return amdgpu_need_post(adev);
bec86378
ML
752}
753
d38ceaf9
AD
754/**
755 * amdgpu_dummy_page_init - init dummy page used by the driver
756 *
757 * @adev: amdgpu_device pointer
758 *
759 * Allocate the dummy page used by the driver (all asics).
760 * This dummy page is used by the driver as a filler for gart entries
761 * when pages are taken out of the GART
762 * Returns 0 on sucess, -ENOMEM on failure.
763 */
764int amdgpu_dummy_page_init(struct amdgpu_device *adev)
765{
766 if (adev->dummy_page.page)
767 return 0;
768 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
769 if (adev->dummy_page.page == NULL)
770 return -ENOMEM;
771 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
772 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
773 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
774 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
775 __free_page(adev->dummy_page.page);
776 adev->dummy_page.page = NULL;
777 return -ENOMEM;
778 }
779 return 0;
780}
781
782/**
783 * amdgpu_dummy_page_fini - free dummy page used by the driver
784 *
785 * @adev: amdgpu_device pointer
786 *
787 * Frees the dummy page used by the driver (all asics).
788 */
789void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
790{
791 if (adev->dummy_page.page == NULL)
792 return;
793 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
794 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
795 __free_page(adev->dummy_page.page);
796 adev->dummy_page.page = NULL;
797}
798
799
800/* ATOM accessor methods */
801/*
802 * ATOM is an interpreted byte code stored in tables in the vbios. The
803 * driver registers callbacks to access registers and the interpreter
804 * in the driver parses the tables and executes then to program specific
805 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
806 * atombios.h, and atom.c
807 */
808
809/**
810 * cail_pll_read - read PLL register
811 *
812 * @info: atom card_info pointer
813 * @reg: PLL register offset
814 *
815 * Provides a PLL register accessor for the atom interpreter (r4xx+).
816 * Returns the value of the PLL register.
817 */
818static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
819{
820 return 0;
821}
822
823/**
824 * cail_pll_write - write PLL register
825 *
826 * @info: atom card_info pointer
827 * @reg: PLL register offset
828 * @val: value to write to the pll register
829 *
830 * Provides a PLL register accessor for the atom interpreter (r4xx+).
831 */
832static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
833{
834
835}
836
837/**
838 * cail_mc_read - read MC (Memory Controller) register
839 *
840 * @info: atom card_info pointer
841 * @reg: MC register offset
842 *
843 * Provides an MC register accessor for the atom interpreter (r4xx+).
844 * Returns the value of the MC register.
845 */
846static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
847{
848 return 0;
849}
850
851/**
852 * cail_mc_write - write MC (Memory Controller) register
853 *
854 * @info: atom card_info pointer
855 * @reg: MC register offset
856 * @val: value to write to the pll register
857 *
858 * Provides a MC register accessor for the atom interpreter (r4xx+).
859 */
860static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
861{
862
863}
864
865/**
866 * cail_reg_write - write MMIO register
867 *
868 * @info: atom card_info pointer
869 * @reg: MMIO register offset
870 * @val: value to write to the pll register
871 *
872 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
873 */
874static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
875{
876 struct amdgpu_device *adev = info->dev->dev_private;
877
878 WREG32(reg, val);
879}
880
881/**
882 * cail_reg_read - read MMIO register
883 *
884 * @info: atom card_info pointer
885 * @reg: MMIO register offset
886 *
887 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
888 * Returns the value of the MMIO register.
889 */
890static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
891{
892 struct amdgpu_device *adev = info->dev->dev_private;
893 uint32_t r;
894
895 r = RREG32(reg);
896 return r;
897}
898
899/**
900 * cail_ioreg_write - write IO register
901 *
902 * @info: atom card_info pointer
903 * @reg: IO register offset
904 * @val: value to write to the pll register
905 *
906 * Provides a IO register accessor for the atom interpreter (r4xx+).
907 */
908static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
909{
910 struct amdgpu_device *adev = info->dev->dev_private;
911
912 WREG32_IO(reg, val);
913}
914
915/**
916 * cail_ioreg_read - read IO register
917 *
918 * @info: atom card_info pointer
919 * @reg: IO register offset
920 *
921 * Provides an IO register accessor for the atom interpreter (r4xx+).
922 * Returns the value of the IO register.
923 */
924static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
925{
926 struct amdgpu_device *adev = info->dev->dev_private;
927 uint32_t r;
928
929 r = RREG32_IO(reg);
930 return r;
931}
932
933/**
934 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
935 *
936 * @adev: amdgpu_device pointer
937 *
938 * Frees the driver info and register access callbacks for the ATOM
939 * interpreter (r4xx+).
940 * Called at driver shutdown.
941 */
942static void amdgpu_atombios_fini(struct amdgpu_device *adev)
943{
89e0ec9f 944 if (adev->mode_info.atom_context) {
d38ceaf9 945 kfree(adev->mode_info.atom_context->scratch);
89e0ec9f
ML
946 kfree(adev->mode_info.atom_context->iio);
947 }
d38ceaf9
AD
948 kfree(adev->mode_info.atom_context);
949 adev->mode_info.atom_context = NULL;
950 kfree(adev->mode_info.atom_card_info);
951 adev->mode_info.atom_card_info = NULL;
952}
953
954/**
955 * amdgpu_atombios_init - init the driver info and callbacks for atombios
956 *
957 * @adev: amdgpu_device pointer
958 *
959 * Initializes the driver info and register access callbacks for the
960 * ATOM interpreter (r4xx+).
961 * Returns 0 on sucess, -ENOMEM on failure.
962 * Called at driver startup.
963 */
964static int amdgpu_atombios_init(struct amdgpu_device *adev)
965{
966 struct card_info *atom_card_info =
967 kzalloc(sizeof(struct card_info), GFP_KERNEL);
968
969 if (!atom_card_info)
970 return -ENOMEM;
971
972 adev->mode_info.atom_card_info = atom_card_info;
973 atom_card_info->dev = adev->ddev;
974 atom_card_info->reg_read = cail_reg_read;
975 atom_card_info->reg_write = cail_reg_write;
976 /* needed for iio ops */
977 if (adev->rio_mem) {
978 atom_card_info->ioreg_read = cail_ioreg_read;
979 atom_card_info->ioreg_write = cail_ioreg_write;
980 } else {
b64a18c5 981 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
d38ceaf9
AD
982 atom_card_info->ioreg_read = cail_reg_read;
983 atom_card_info->ioreg_write = cail_reg_write;
984 }
985 atom_card_info->mc_read = cail_mc_read;
986 atom_card_info->mc_write = cail_mc_write;
987 atom_card_info->pll_read = cail_pll_read;
988 atom_card_info->pll_write = cail_pll_write;
989
990 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
991 if (!adev->mode_info.atom_context) {
992 amdgpu_atombios_fini(adev);
993 return -ENOMEM;
994 }
995
996 mutex_init(&adev->mode_info.atom_context->mutex);
a5bde2f9
AD
997 if (adev->is_atom_fw) {
998 amdgpu_atomfirmware_scratch_regs_init(adev);
999 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1000 } else {
1001 amdgpu_atombios_scratch_regs_init(adev);
1002 amdgpu_atombios_allocate_fb_scratch(adev);
1003 }
d38ceaf9
AD
1004 return 0;
1005}
1006
1007/* if we get transitioned to only one device, take VGA back */
1008/**
1009 * amdgpu_vga_set_decode - enable/disable vga decode
1010 *
1011 * @cookie: amdgpu_device pointer
1012 * @state: enable/disable vga decode
1013 *
1014 * Enable/disable vga decode (all asics).
1015 * Returns VGA resource flags.
1016 */
1017static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1018{
1019 struct amdgpu_device *adev = cookie;
1020 amdgpu_asic_set_vga_state(adev, state);
1021 if (state)
1022 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1023 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1024 else
1025 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1026}
1027
1028/**
1029 * amdgpu_check_pot_argument - check that argument is a power of two
1030 *
1031 * @arg: value to check
1032 *
1033 * Validates that a certain argument is a power of two (all asics).
1034 * Returns true if argument is valid.
1035 */
1036static bool amdgpu_check_pot_argument(int arg)
1037{
1038 return (arg & (arg - 1)) == 0;
1039}
1040
bab4fee7 1041static void amdgpu_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
1042{
1043 /* defines number of bits in page table versus page directory,
1044 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1045 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
1046 if (amdgpu_vm_block_size == -1)
1047 return;
a1adf8be 1048
bab4fee7 1049 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
1050 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1051 amdgpu_vm_block_size);
bab4fee7 1052 goto def_value;
a1adf8be
CZ
1053 }
1054
1055 if (amdgpu_vm_block_size > 24 ||
1056 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1057 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1058 amdgpu_vm_block_size);
bab4fee7 1059 goto def_value;
a1adf8be 1060 }
bab4fee7
JZ
1061
1062 return;
1063
1064def_value:
1065 amdgpu_vm_block_size = -1;
a1adf8be
CZ
1066}
1067
83ca145d
ZJ
1068static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1069{
1070 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
1071 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1072 amdgpu_vm_size);
1073 goto def_value;
1074 }
1075
1076 if (amdgpu_vm_size < 1) {
1077 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1078 amdgpu_vm_size);
1079 goto def_value;
1080 }
1081
1082 /*
1083 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1084 */
1085 if (amdgpu_vm_size > 1024) {
1086 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1087 amdgpu_vm_size);
1088 goto def_value;
1089 }
1090
1091 return;
1092
1093def_value:
bab4fee7 1094 amdgpu_vm_size = -1;
83ca145d
ZJ
1095}
1096
d38ceaf9
AD
1097/**
1098 * amdgpu_check_arguments - validate module params
1099 *
1100 * @adev: amdgpu_device pointer
1101 *
1102 * Validates certain module parameters and updates
1103 * the associated values used by the driver (all asics).
1104 */
1105static void amdgpu_check_arguments(struct amdgpu_device *adev)
1106{
5b011235
CZ
1107 if (amdgpu_sched_jobs < 4) {
1108 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1109 amdgpu_sched_jobs);
1110 amdgpu_sched_jobs = 4;
1111 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
1112 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1113 amdgpu_sched_jobs);
1114 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1115 }
d38ceaf9
AD
1116
1117 if (amdgpu_gart_size != -1) {
c4e1a13a 1118 /* gtt size must be greater or equal to 32M */
d38ceaf9
AD
1119 if (amdgpu_gart_size < 32) {
1120 dev_warn(adev->dev, "gart size (%d) too small\n",
1121 amdgpu_gart_size);
1122 amdgpu_gart_size = -1;
d38ceaf9
AD
1123 }
1124 }
1125
83ca145d 1126 amdgpu_check_vm_size(adev);
d38ceaf9 1127
bab4fee7 1128 amdgpu_check_block_size(adev);
6a7f76e7 1129
526bae37 1130 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1131 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
6a7f76e7
CK
1132 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1133 amdgpu_vram_page_split);
1134 amdgpu_vram_page_split = 1024;
1135 }
d38ceaf9
AD
1136}
1137
1138/**
1139 * amdgpu_switcheroo_set_state - set switcheroo state
1140 *
1141 * @pdev: pci dev pointer
1694467b 1142 * @state: vga_switcheroo state
d38ceaf9
AD
1143 *
1144 * Callback for the switcheroo driver. Suspends or resumes the
1145 * the asics before or after it is powered up using ACPI methods.
1146 */
1147static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1148{
1149 struct drm_device *dev = pci_get_drvdata(pdev);
1150
1151 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1152 return;
1153
1154 if (state == VGA_SWITCHEROO_ON) {
1155 unsigned d3_delay = dev->pdev->d3_delay;
1156
7ca85295 1157 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1158 /* don't suspend or resume card normally */
1159 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1160
810ddc3a 1161 amdgpu_device_resume(dev, true, true);
d38ceaf9
AD
1162
1163 dev->pdev->d3_delay = d3_delay;
1164
1165 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1166 drm_kms_helper_poll_enable(dev);
1167 } else {
7ca85295 1168 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1169 drm_kms_helper_poll_disable(dev);
1170 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1171 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1172 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1173 }
1174}
1175
1176/**
1177 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1178 *
1179 * @pdev: pci dev pointer
1180 *
1181 * Callback for the switcheroo driver. Check of the switcheroo
1182 * state can be changed.
1183 * Returns true if the state can be changed, false if not.
1184 */
1185static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1186{
1187 struct drm_device *dev = pci_get_drvdata(pdev);
1188
1189 /*
1190 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1191 * locking inversion with the driver load path. And the access here is
1192 * completely racy anyway. So don't bother with locking for now.
1193 */
1194 return dev->open_count == 0;
1195}
1196
1197static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1198 .set_gpu_state = amdgpu_switcheroo_set_state,
1199 .reprobe = NULL,
1200 .can_switch = amdgpu_switcheroo_can_switch,
1201};
1202
1203int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
5fc3aeeb 1204 enum amd_ip_block_type block_type,
1205 enum amd_clockgating_state state)
d38ceaf9
AD
1206{
1207 int i, r = 0;
1208
1209 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1210 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1211 continue;
c722865a
RZ
1212 if (adev->ip_blocks[i].version->type != block_type)
1213 continue;
1214 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1215 continue;
1216 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1217 (void *)adev, state);
1218 if (r)
1219 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1220 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1221 }
1222 return r;
1223}
1224
1225int amdgpu_set_powergating_state(struct amdgpu_device *adev,
5fc3aeeb 1226 enum amd_ip_block_type block_type,
1227 enum amd_powergating_state state)
d38ceaf9
AD
1228{
1229 int i, r = 0;
1230
1231 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1232 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1233 continue;
c722865a
RZ
1234 if (adev->ip_blocks[i].version->type != block_type)
1235 continue;
1236 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1237 continue;
1238 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1239 (void *)adev, state);
1240 if (r)
1241 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1242 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1243 }
1244 return r;
1245}
1246
6cb2d4e4
HR
1247void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1248{
1249 int i;
1250
1251 for (i = 0; i < adev->num_ip_blocks; i++) {
1252 if (!adev->ip_blocks[i].status.valid)
1253 continue;
1254 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1255 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1256 }
1257}
1258
5dbbb60b
AD
1259int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1260 enum amd_ip_block_type block_type)
1261{
1262 int i, r;
1263
1264 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1265 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1266 continue;
a1255107
AD
1267 if (adev->ip_blocks[i].version->type == block_type) {
1268 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1269 if (r)
1270 return r;
1271 break;
1272 }
1273 }
1274 return 0;
1275
1276}
1277
1278bool amdgpu_is_idle(struct amdgpu_device *adev,
1279 enum amd_ip_block_type block_type)
1280{
1281 int i;
1282
1283 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1284 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1285 continue;
a1255107
AD
1286 if (adev->ip_blocks[i].version->type == block_type)
1287 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1288 }
1289 return true;
1290
1291}
1292
a1255107
AD
1293struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1294 enum amd_ip_block_type type)
d38ceaf9
AD
1295{
1296 int i;
1297
1298 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1299 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1300 return &adev->ip_blocks[i];
1301
1302 return NULL;
1303}
1304
1305/**
1306 * amdgpu_ip_block_version_cmp
1307 *
1308 * @adev: amdgpu_device pointer
5fc3aeeb 1309 * @type: enum amd_ip_block_type
d38ceaf9
AD
1310 * @major: major version
1311 * @minor: minor version
1312 *
1313 * return 0 if equal or greater
1314 * return 1 if smaller or the ip_block doesn't exist
1315 */
1316int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
5fc3aeeb 1317 enum amd_ip_block_type type,
d38ceaf9
AD
1318 u32 major, u32 minor)
1319{
a1255107 1320 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
d38ceaf9 1321
a1255107
AD
1322 if (ip_block && ((ip_block->version->major > major) ||
1323 ((ip_block->version->major == major) &&
1324 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1325 return 0;
1326
1327 return 1;
1328}
1329
a1255107
AD
1330/**
1331 * amdgpu_ip_block_add
1332 *
1333 * @adev: amdgpu_device pointer
1334 * @ip_block_version: pointer to the IP to add
1335 *
1336 * Adds the IP block driver information to the collection of IPs
1337 * on the asic.
1338 */
1339int amdgpu_ip_block_add(struct amdgpu_device *adev,
1340 const struct amdgpu_ip_block_version *ip_block_version)
1341{
1342 if (!ip_block_version)
1343 return -EINVAL;
1344
1345 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1346
1347 return 0;
1348}
1349
483ef985 1350static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1351{
1352 adev->enable_virtual_display = false;
1353
1354 if (amdgpu_virtual_display) {
1355 struct drm_device *ddev = adev->ddev;
1356 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1357 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1358
1359 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1360 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1361 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1362 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1363 if (!strcmp("all", pciaddname)
1364 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1365 long num_crtc;
1366 int res = -1;
1367
9accf2fd 1368 adev->enable_virtual_display = true;
0f66356d
ED
1369
1370 if (pciaddname_tmp)
1371 res = kstrtol(pciaddname_tmp, 10,
1372 &num_crtc);
1373
1374 if (!res) {
1375 if (num_crtc < 1)
1376 num_crtc = 1;
1377 if (num_crtc > 6)
1378 num_crtc = 6;
1379 adev->mode_info.num_crtc = num_crtc;
1380 } else {
1381 adev->mode_info.num_crtc = 1;
1382 }
9accf2fd
ED
1383 break;
1384 }
1385 }
1386
0f66356d
ED
1387 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1388 amdgpu_virtual_display, pci_address_name,
1389 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1390
1391 kfree(pciaddstr);
1392 }
1393}
1394
d38ceaf9
AD
1395static int amdgpu_early_init(struct amdgpu_device *adev)
1396{
aaa36a97 1397 int i, r;
d38ceaf9 1398
483ef985 1399 amdgpu_device_enable_virtual_display(adev);
a6be7570 1400
d38ceaf9 1401 switch (adev->asic_type) {
aaa36a97
AD
1402 case CHIP_TOPAZ:
1403 case CHIP_TONGA:
48299f95 1404 case CHIP_FIJI:
2cc0c0b5
FC
1405 case CHIP_POLARIS11:
1406 case CHIP_POLARIS10:
c4642a47 1407 case CHIP_POLARIS12:
aaa36a97 1408 case CHIP_CARRIZO:
39bb0c92
SL
1409 case CHIP_STONEY:
1410 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1411 adev->family = AMDGPU_FAMILY_CZ;
1412 else
1413 adev->family = AMDGPU_FAMILY_VI;
1414
1415 r = vi_set_ip_blocks(adev);
1416 if (r)
1417 return r;
1418 break;
33f34802
KW
1419#ifdef CONFIG_DRM_AMDGPU_SI
1420 case CHIP_VERDE:
1421 case CHIP_TAHITI:
1422 case CHIP_PITCAIRN:
1423 case CHIP_OLAND:
1424 case CHIP_HAINAN:
295d0daf 1425 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1426 r = si_set_ip_blocks(adev);
1427 if (r)
1428 return r;
1429 break;
1430#endif
a2e73f56
AD
1431#ifdef CONFIG_DRM_AMDGPU_CIK
1432 case CHIP_BONAIRE:
1433 case CHIP_HAWAII:
1434 case CHIP_KAVERI:
1435 case CHIP_KABINI:
1436 case CHIP_MULLINS:
1437 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1438 adev->family = AMDGPU_FAMILY_CI;
1439 else
1440 adev->family = AMDGPU_FAMILY_KV;
1441
1442 r = cik_set_ip_blocks(adev);
1443 if (r)
1444 return r;
1445 break;
1446#endif
460826e6
KW
1447 case CHIP_VEGA10:
1448 adev->family = AMDGPU_FAMILY_AI;
1449
1450 r = soc15_set_ip_blocks(adev);
1451 if (r)
1452 return r;
1453 break;
d38ceaf9
AD
1454 default:
1455 /* FIXME: not supported yet */
1456 return -EINVAL;
1457 }
1458
3149d9da
XY
1459 if (amdgpu_sriov_vf(adev)) {
1460 r = amdgpu_virt_request_full_gpu(adev, true);
1461 if (r)
1462 return r;
1463 }
1464
d38ceaf9
AD
1465 for (i = 0; i < adev->num_ip_blocks; i++) {
1466 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1467 DRM_ERROR("disabled ip block: %d\n", i);
a1255107 1468 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1469 } else {
a1255107
AD
1470 if (adev->ip_blocks[i].version->funcs->early_init) {
1471 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1472 if (r == -ENOENT) {
a1255107 1473 adev->ip_blocks[i].status.valid = false;
2c1a2784 1474 } else if (r) {
a1255107
AD
1475 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1476 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1477 return r;
2c1a2784 1478 } else {
a1255107 1479 adev->ip_blocks[i].status.valid = true;
2c1a2784 1480 }
974e6b64 1481 } else {
a1255107 1482 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1483 }
d38ceaf9
AD
1484 }
1485 }
1486
395d1fb9
NH
1487 adev->cg_flags &= amdgpu_cg_mask;
1488 adev->pg_flags &= amdgpu_pg_mask;
1489
d38ceaf9
AD
1490 return 0;
1491}
1492
1493static int amdgpu_init(struct amdgpu_device *adev)
1494{
1495 int i, r;
1496
1497 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1498 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1499 continue;
a1255107 1500 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1501 if (r) {
a1255107
AD
1502 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1503 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1504 return r;
2c1a2784 1505 }
a1255107 1506 adev->ip_blocks[i].status.sw = true;
d38ceaf9 1507 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1508 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9 1509 r = amdgpu_vram_scratch_init(adev);
2c1a2784
AD
1510 if (r) {
1511 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1512 return r;
2c1a2784 1513 }
a1255107 1514 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1515 if (r) {
1516 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1517 return r;
2c1a2784 1518 }
d38ceaf9 1519 r = amdgpu_wb_init(adev);
2c1a2784
AD
1520 if (r) {
1521 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
d38ceaf9 1522 return r;
2c1a2784 1523 }
a1255107 1524 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1525
1526 /* right after GMC hw init, we create CSA */
1527 if (amdgpu_sriov_vf(adev)) {
1528 r = amdgpu_allocate_static_csa(adev);
1529 if (r) {
1530 DRM_ERROR("allocate CSA failed %d\n", r);
1531 return r;
1532 }
1533 }
d38ceaf9
AD
1534 }
1535 }
1536
1537 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1538 if (!adev->ip_blocks[i].status.sw)
d38ceaf9
AD
1539 continue;
1540 /* gmc hw init is done early */
a1255107 1541 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
d38ceaf9 1542 continue;
a1255107 1543 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1544 if (r) {
a1255107
AD
1545 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1546 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1547 return r;
2c1a2784 1548 }
a1255107 1549 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1550 }
1551
1552 return 0;
1553}
1554
1555static int amdgpu_late_init(struct amdgpu_device *adev)
1556{
1557 int i = 0, r;
1558
1559 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1560 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1561 continue;
a1255107
AD
1562 if (adev->ip_blocks[i].version->funcs->late_init) {
1563 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2c1a2784 1564 if (r) {
a1255107
AD
1565 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1566 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1567 return r;
2c1a2784 1568 }
a1255107 1569 adev->ip_blocks[i].status.late_initialized = true;
d38ceaf9 1570 }
4a446d55 1571 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1572 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1573 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1574 /* enable clockgating to save power */
a1255107
AD
1575 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1576 AMD_CG_STATE_GATE);
4a446d55
AD
1577 if (r) {
1578 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1579 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1580 return r;
1581 }
b0b00ff1 1582 }
d38ceaf9
AD
1583 }
1584
1585 return 0;
1586}
1587
1588static int amdgpu_fini(struct amdgpu_device *adev)
1589{
1590 int i, r;
1591
3e96dbfd
AD
1592 /* need to disable SMC first */
1593 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1594 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1595 continue;
a1255107 1596 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1597 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1598 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1599 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1600 if (r) {
1601 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1602 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1603 return r;
1604 }
a1255107 1605 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1606 /* XXX handle errors */
1607 if (r) {
1608 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1609 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1610 }
a1255107 1611 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1612 break;
1613 }
1614 }
1615
d38ceaf9 1616 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1617 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1618 continue;
a1255107 1619 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9
AD
1620 amdgpu_wb_fini(adev);
1621 amdgpu_vram_scratch_fini(adev);
1622 }
8201a67a
RZ
1623
1624 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1625 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1626 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1627 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1628 AMD_CG_STATE_UNGATE);
1629 if (r) {
1630 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1631 adev->ip_blocks[i].version->funcs->name, r);
1632 return r;
1633 }
2c1a2784 1634 }
8201a67a 1635
a1255107 1636 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1637 /* XXX handle errors */
2c1a2784 1638 if (r) {
a1255107
AD
1639 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1640 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1641 }
8201a67a 1642
a1255107 1643 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1644 }
1645
1646 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1647 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1648 continue;
a1255107 1649 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1650 /* XXX handle errors */
2c1a2784 1651 if (r) {
a1255107
AD
1652 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1653 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1654 }
a1255107
AD
1655 adev->ip_blocks[i].status.sw = false;
1656 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1657 }
1658
a6dcfd9c 1659 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1660 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1661 continue;
a1255107
AD
1662 if (adev->ip_blocks[i].version->funcs->late_fini)
1663 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1664 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1665 }
1666
3149d9da 1667 if (amdgpu_sriov_vf(adev)) {
2493664f 1668 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
3149d9da
XY
1669 amdgpu_virt_release_full_gpu(adev, false);
1670 }
2493664f 1671
d38ceaf9
AD
1672 return 0;
1673}
1674
faefba95 1675int amdgpu_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1676{
1677 int i, r;
1678
e941ea99
XY
1679 if (amdgpu_sriov_vf(adev))
1680 amdgpu_virt_request_full_gpu(adev, false);
1681
c5a93a28
FC
1682 /* ungate SMC block first */
1683 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1684 AMD_CG_STATE_UNGATE);
1685 if (r) {
1686 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1687 }
1688
d38ceaf9 1689 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1690 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1691 continue;
1692 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1693 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1694 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1695 AMD_CG_STATE_UNGATE);
c5a93a28 1696 if (r) {
a1255107
AD
1697 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1698 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1699 }
2c1a2784 1700 }
d38ceaf9 1701 /* XXX handle errors */
a1255107 1702 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1703 /* XXX handle errors */
2c1a2784 1704 if (r) {
a1255107
AD
1705 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1706 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1707 }
d38ceaf9
AD
1708 }
1709
e941ea99
XY
1710 if (amdgpu_sriov_vf(adev))
1711 amdgpu_virt_release_full_gpu(adev, false);
1712
d38ceaf9
AD
1713 return 0;
1714}
1715
e4f0fdcc 1716static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
a90ad3c2
ML
1717{
1718 int i, r;
1719
1720 for (i = 0; i < adev->num_ip_blocks; i++) {
1721 if (!adev->ip_blocks[i].status.valid)
1722 continue;
1723
1724 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1725 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1726 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
e4f0fdcc 1727 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
a90ad3c2
ML
1728
1729 if (r) {
1730 DRM_ERROR("resume of IP block <%s> failed %d\n",
1731 adev->ip_blocks[i].version->funcs->name, r);
1732 return r;
1733 }
1734 }
1735
1736 return 0;
1737}
1738
e4f0fdcc 1739static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
a90ad3c2
ML
1740{
1741 int i, r;
1742
1743 for (i = 0; i < adev->num_ip_blocks; i++) {
1744 if (!adev->ip_blocks[i].status.valid)
1745 continue;
1746
1747 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1748 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1749 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1750 continue;
1751
e4f0fdcc 1752 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
a90ad3c2
ML
1753 if (r) {
1754 DRM_ERROR("resume of IP block <%s> failed %d\n",
1755 adev->ip_blocks[i].version->funcs->name, r);
1756 return r;
1757 }
1758 }
1759
1760 return 0;
1761}
1762
d38ceaf9
AD
1763static int amdgpu_resume(struct amdgpu_device *adev)
1764{
1765 int i, r;
1766
1767 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1768 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1769 continue;
a1255107 1770 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 1771 if (r) {
a1255107
AD
1772 DRM_ERROR("resume of IP block <%s> failed %d\n",
1773 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1774 return r;
2c1a2784 1775 }
d38ceaf9
AD
1776 }
1777
1778 return 0;
1779}
1780
4e99a44e 1781static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 1782{
a5bde2f9
AD
1783 if (adev->is_atom_fw) {
1784 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1785 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1786 } else {
1787 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1788 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1789 }
048765ad
AR
1790}
1791
d38ceaf9
AD
1792/**
1793 * amdgpu_device_init - initialize the driver
1794 *
1795 * @adev: amdgpu_device pointer
1796 * @pdev: drm dev pointer
1797 * @pdev: pci dev pointer
1798 * @flags: driver flags
1799 *
1800 * Initializes the driver info and hw (all asics).
1801 * Returns 0 for success or an error on failure.
1802 * Called at driver startup.
1803 */
1804int amdgpu_device_init(struct amdgpu_device *adev,
1805 struct drm_device *ddev,
1806 struct pci_dev *pdev,
1807 uint32_t flags)
1808{
1809 int r, i;
1810 bool runtime = false;
95844d20 1811 u32 max_MBps;
d38ceaf9
AD
1812
1813 adev->shutdown = false;
1814 adev->dev = &pdev->dev;
1815 adev->ddev = ddev;
1816 adev->pdev = pdev;
1817 adev->flags = flags;
2f7d10b3 1818 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9
AD
1819 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1820 adev->mc.gtt_size = 512 * 1024 * 1024;
1821 adev->accel_working = false;
1822 adev->num_rings = 0;
1823 adev->mman.buffer_funcs = NULL;
1824 adev->mman.buffer_funcs_ring = NULL;
1825 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 1826 adev->vm_manager.vm_pte_num_rings = 0;
d38ceaf9 1827 adev->gart.gart_funcs = NULL;
f54d1867 1828 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
d38ceaf9
AD
1829
1830 adev->smc_rreg = &amdgpu_invalid_rreg;
1831 adev->smc_wreg = &amdgpu_invalid_wreg;
1832 adev->pcie_rreg = &amdgpu_invalid_rreg;
1833 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
1834 adev->pciep_rreg = &amdgpu_invalid_rreg;
1835 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1836 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1837 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1838 adev->didt_rreg = &amdgpu_invalid_rreg;
1839 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
1840 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1841 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1842 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1843 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1844
ccdbb20a 1845
3e39ab90
AD
1846 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1847 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1848 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
1849
1850 /* mutex initialization are all done here so we
1851 * can recall function without having locking issues */
d38ceaf9 1852 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 1853 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
1854 mutex_init(&adev->pm.mutex);
1855 mutex_init(&adev->gfx.gpu_clock_mutex);
1856 mutex_init(&adev->srbm_mutex);
1857 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9
AD
1858 mutex_init(&adev->mn_lock);
1859 hash_init(adev->mn_hash);
1860
1861 amdgpu_check_arguments(adev);
1862
1863 /* Registers mapping */
1864 /* TODO: block userspace mapping of io register */
1865 spin_lock_init(&adev->mmio_idx_lock);
1866 spin_lock_init(&adev->smc_idx_lock);
1867 spin_lock_init(&adev->pcie_idx_lock);
1868 spin_lock_init(&adev->uvd_ctx_idx_lock);
1869 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 1870 spin_lock_init(&adev->gc_cac_idx_lock);
d38ceaf9 1871 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 1872 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 1873
0c4e7fa5
CZ
1874 INIT_LIST_HEAD(&adev->shadow_list);
1875 mutex_init(&adev->shadow_list_lock);
1876
5c1354bd
CZ
1877 INIT_LIST_HEAD(&adev->gtt_list);
1878 spin_lock_init(&adev->gtt_list_lock);
1879
da69c161
KW
1880 if (adev->asic_type >= CHIP_BONAIRE) {
1881 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1882 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1883 } else {
1884 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1885 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1886 }
d38ceaf9 1887
d38ceaf9
AD
1888 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1889 if (adev->rmmio == NULL) {
1890 return -ENOMEM;
1891 }
1892 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1893 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1894
da69c161
KW
1895 if (adev->asic_type >= CHIP_BONAIRE)
1896 /* doorbell bar mapping */
1897 amdgpu_doorbell_init(adev);
d38ceaf9
AD
1898
1899 /* io port mapping */
1900 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1901 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1902 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1903 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1904 break;
1905 }
1906 }
1907 if (adev->rio_mem == NULL)
b64a18c5 1908 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
1909
1910 /* early init functions */
1911 r = amdgpu_early_init(adev);
1912 if (r)
1913 return r;
1914
1915 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1916 /* this will fail for cards that aren't VGA class devices, just
1917 * ignore it */
1918 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1919
1920 if (amdgpu_runtime_pm == 1)
1921 runtime = true;
e9bef455 1922 if (amdgpu_device_is_px(ddev))
d38ceaf9 1923 runtime = true;
84c8b22e
LW
1924 if (!pci_is_thunderbolt_attached(adev->pdev))
1925 vga_switcheroo_register_client(adev->pdev,
1926 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
1927 if (runtime)
1928 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1929
1930 /* Read BIOS */
83ba126a
AD
1931 if (!amdgpu_get_bios(adev)) {
1932 r = -EINVAL;
1933 goto failed;
1934 }
f7e9e9fe 1935
d38ceaf9 1936 r = amdgpu_atombios_init(adev);
2c1a2784
AD
1937 if (r) {
1938 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
83ba126a 1939 goto failed;
2c1a2784 1940 }
d38ceaf9 1941
4e99a44e
ML
1942 /* detect if we are with an SRIOV vbios */
1943 amdgpu_device_detect_sriov_bios(adev);
048765ad 1944
d38ceaf9 1945 /* Post card if necessary */
bec86378 1946 if (amdgpu_vpost_needed(adev)) {
d38ceaf9 1947 if (!adev->bios) {
bec86378 1948 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
1949 r = -EINVAL;
1950 goto failed;
d38ceaf9 1951 }
bec86378 1952 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
1953 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1954 if (r) {
1955 dev_err(adev->dev, "gpu post error!\n");
1956 goto failed;
1957 }
1958 } else {
1959 DRM_INFO("GPU post is not needed\n");
d38ceaf9
AD
1960 }
1961
a5bde2f9
AD
1962 if (!adev->is_atom_fw) {
1963 /* Initialize clocks */
1964 r = amdgpu_atombios_get_clock_info(adev);
1965 if (r) {
1966 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
1967 return r;
1968 }
1969 /* init i2c buses */
1970 amdgpu_atombios_i2c_init(adev);
2c1a2784 1971 }
d38ceaf9
AD
1972
1973 /* Fence driver */
1974 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
1975 if (r) {
1976 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
83ba126a 1977 goto failed;
2c1a2784 1978 }
d38ceaf9
AD
1979
1980 /* init the mode config */
1981 drm_mode_config_init(adev->ddev);
1982
1983 r = amdgpu_init(adev);
1984 if (r) {
2c1a2784 1985 dev_err(adev->dev, "amdgpu_init failed\n");
d38ceaf9 1986 amdgpu_fini(adev);
83ba126a 1987 goto failed;
d38ceaf9
AD
1988 }
1989
1990 adev->accel_working = true;
1991
95844d20
MO
1992 /* Initialize the buffer migration limit. */
1993 if (amdgpu_moverate >= 0)
1994 max_MBps = amdgpu_moverate;
1995 else
1996 max_MBps = 8; /* Allow 8 MB/s. */
1997 /* Get a log2 for easy divisions. */
1998 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
1999
d38ceaf9
AD
2000 r = amdgpu_ib_pool_init(adev);
2001 if (r) {
2002 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
83ba126a 2003 goto failed;
d38ceaf9
AD
2004 }
2005
2006 r = amdgpu_ib_ring_tests(adev);
2007 if (r)
2008 DRM_ERROR("ib ring test failed (%d).\n", r);
2009
9bc92b9c
ML
2010 amdgpu_fbdev_init(adev);
2011
d38ceaf9 2012 r = amdgpu_gem_debugfs_init(adev);
3f14e623 2013 if (r)
d38ceaf9 2014 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2015
2016 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2017 if (r)
d38ceaf9 2018 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2019
50ab2533 2020 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2021 if (r)
50ab2533 2022 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2023
d38ceaf9
AD
2024 if ((amdgpu_testing & 1)) {
2025 if (adev->accel_working)
2026 amdgpu_test_moves(adev);
2027 else
2028 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2029 }
d38ceaf9
AD
2030 if (amdgpu_benchmarking) {
2031 if (adev->accel_working)
2032 amdgpu_benchmark(adev, amdgpu_benchmarking);
2033 else
2034 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2035 }
2036
2037 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2038 * explicit gating rather than handling it automatically.
2039 */
2040 r = amdgpu_late_init(adev);
2c1a2784
AD
2041 if (r) {
2042 dev_err(adev->dev, "amdgpu_late_init failed\n");
83ba126a 2043 goto failed;
2c1a2784 2044 }
d38ceaf9
AD
2045
2046 return 0;
83ba126a
AD
2047
2048failed:
2049 if (runtime)
2050 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2051 return r;
d38ceaf9
AD
2052}
2053
d38ceaf9
AD
2054/**
2055 * amdgpu_device_fini - tear down the driver
2056 *
2057 * @adev: amdgpu_device pointer
2058 *
2059 * Tear down the driver info (all asics).
2060 * Called at driver shutdown.
2061 */
2062void amdgpu_device_fini(struct amdgpu_device *adev)
2063{
2064 int r;
2065
2066 DRM_INFO("amdgpu: finishing device.\n");
2067 adev->shutdown = true;
db2c2a97
PD
2068 if (adev->mode_info.mode_config_initialized)
2069 drm_crtc_force_disable_all(adev->ddev);
d38ceaf9
AD
2070 /* evict vram memory */
2071 amdgpu_bo_evict_vram(adev);
2072 amdgpu_ib_pool_fini(adev);
2073 amdgpu_fence_driver_fini(adev);
2074 amdgpu_fbdev_fini(adev);
2075 r = amdgpu_fini(adev);
d38ceaf9
AD
2076 adev->accel_working = false;
2077 /* free i2c buses */
2078 amdgpu_i2c_fini(adev);
2079 amdgpu_atombios_fini(adev);
2080 kfree(adev->bios);
2081 adev->bios = NULL;
84c8b22e
LW
2082 if (!pci_is_thunderbolt_attached(adev->pdev))
2083 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2084 if (adev->flags & AMD_IS_PX)
2085 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2086 vga_client_register(adev->pdev, NULL, NULL, NULL);
2087 if (adev->rio_mem)
2088 pci_iounmap(adev->pdev, adev->rio_mem);
2089 adev->rio_mem = NULL;
2090 iounmap(adev->rmmio);
2091 adev->rmmio = NULL;
da69c161
KW
2092 if (adev->asic_type >= CHIP_BONAIRE)
2093 amdgpu_doorbell_fini(adev);
d38ceaf9 2094 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2095}
2096
2097
2098/*
2099 * Suspend & resume.
2100 */
2101/**
810ddc3a 2102 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2103 *
2104 * @pdev: drm dev pointer
2105 * @state: suspend state
2106 *
2107 * Puts the hw in the suspend state (all asics).
2108 * Returns 0 for success or an error on failure.
2109 * Called at driver suspend.
2110 */
810ddc3a 2111int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2112{
2113 struct amdgpu_device *adev;
2114 struct drm_crtc *crtc;
2115 struct drm_connector *connector;
5ceb54c6 2116 int r;
d38ceaf9
AD
2117
2118 if (dev == NULL || dev->dev_private == NULL) {
2119 return -ENODEV;
2120 }
2121
2122 adev = dev->dev_private;
2123
2124 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2125 return 0;
2126
2127 drm_kms_helper_poll_disable(dev);
2128
2129 /* turn off display hw */
4c7fbc39 2130 drm_modeset_lock_all(dev);
d38ceaf9
AD
2131 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2132 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2133 }
4c7fbc39 2134 drm_modeset_unlock_all(dev);
d38ceaf9 2135
756e6880 2136 /* unpin the front buffers and cursors */
d38ceaf9 2137 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2138 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2139 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2140 struct amdgpu_bo *robj;
2141
756e6880
AD
2142 if (amdgpu_crtc->cursor_bo) {
2143 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2144 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2145 if (r == 0) {
2146 amdgpu_bo_unpin(aobj);
2147 amdgpu_bo_unreserve(aobj);
2148 }
2149 }
2150
d38ceaf9
AD
2151 if (rfb == NULL || rfb->obj == NULL) {
2152 continue;
2153 }
2154 robj = gem_to_amdgpu_bo(rfb->obj);
2155 /* don't unpin kernel fb objects */
2156 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2157 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2158 if (r == 0) {
2159 amdgpu_bo_unpin(robj);
2160 amdgpu_bo_unreserve(robj);
2161 }
2162 }
2163 }
2164 /* evict vram memory */
2165 amdgpu_bo_evict_vram(adev);
2166
5ceb54c6 2167 amdgpu_fence_driver_suspend(adev);
d38ceaf9
AD
2168
2169 r = amdgpu_suspend(adev);
2170
a0a71e49
AD
2171 /* evict remaining vram memory
2172 * This second call to evict vram is to evict the gart page table
2173 * using the CPU.
2174 */
d38ceaf9
AD
2175 amdgpu_bo_evict_vram(adev);
2176
be34d3bf
AD
2177 if (adev->is_atom_fw)
2178 amdgpu_atomfirmware_scratch_regs_save(adev);
2179 else
2180 amdgpu_atombios_scratch_regs_save(adev);
d38ceaf9
AD
2181 pci_save_state(dev->pdev);
2182 if (suspend) {
2183 /* Shut down the device */
2184 pci_disable_device(dev->pdev);
2185 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2186 } else {
2187 r = amdgpu_asic_reset(adev);
2188 if (r)
2189 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2190 }
2191
2192 if (fbcon) {
2193 console_lock();
2194 amdgpu_fbdev_set_suspend(adev, 1);
2195 console_unlock();
2196 }
2197 return 0;
2198}
2199
2200/**
810ddc3a 2201 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2202 *
2203 * @pdev: drm dev pointer
2204 *
2205 * Bring the hw back to operating state (all asics).
2206 * Returns 0 for success or an error on failure.
2207 * Called at driver resume.
2208 */
810ddc3a 2209int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2210{
2211 struct drm_connector *connector;
2212 struct amdgpu_device *adev = dev->dev_private;
756e6880 2213 struct drm_crtc *crtc;
03161a6e 2214 int r = 0;
d38ceaf9
AD
2215
2216 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2217 return 0;
2218
74b0b157 2219 if (fbcon)
d38ceaf9 2220 console_lock();
74b0b157 2221
d38ceaf9
AD
2222 if (resume) {
2223 pci_set_power_state(dev->pdev, PCI_D0);
2224 pci_restore_state(dev->pdev);
74b0b157 2225 r = pci_enable_device(dev->pdev);
03161a6e
HR
2226 if (r)
2227 goto unlock;
d38ceaf9 2228 }
be34d3bf
AD
2229 if (adev->is_atom_fw)
2230 amdgpu_atomfirmware_scratch_regs_restore(adev);
2231 else
2232 amdgpu_atombios_scratch_regs_restore(adev);
d38ceaf9
AD
2233
2234 /* post card */
c836fec5 2235 if (amdgpu_need_post(adev)) {
74b0b157 2236 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2237 if (r)
2238 DRM_ERROR("amdgpu asic init failed\n");
2239 }
d38ceaf9
AD
2240
2241 r = amdgpu_resume(adev);
e6707218 2242 if (r) {
ca198528 2243 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
03161a6e 2244 goto unlock;
e6707218 2245 }
5ceb54c6
AD
2246 amdgpu_fence_driver_resume(adev);
2247
ca198528
FC
2248 if (resume) {
2249 r = amdgpu_ib_ring_tests(adev);
2250 if (r)
2251 DRM_ERROR("ib ring test failed (%d).\n", r);
2252 }
d38ceaf9
AD
2253
2254 r = amdgpu_late_init(adev);
03161a6e
HR
2255 if (r)
2256 goto unlock;
d38ceaf9 2257
756e6880
AD
2258 /* pin cursors */
2259 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2260 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2261
2262 if (amdgpu_crtc->cursor_bo) {
2263 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2264 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2265 if (r == 0) {
2266 r = amdgpu_bo_pin(aobj,
2267 AMDGPU_GEM_DOMAIN_VRAM,
2268 &amdgpu_crtc->cursor_addr);
2269 if (r != 0)
2270 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2271 amdgpu_bo_unreserve(aobj);
2272 }
2273 }
2274 }
2275
d38ceaf9
AD
2276 /* blat the mode back in */
2277 if (fbcon) {
2278 drm_helper_resume_force_mode(dev);
2279 /* turn on display hw */
4c7fbc39 2280 drm_modeset_lock_all(dev);
d38ceaf9
AD
2281 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2282 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2283 }
4c7fbc39 2284 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2285 }
2286
2287 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2288
2289 /*
2290 * Most of the connector probing functions try to acquire runtime pm
2291 * refs to ensure that the GPU is powered on when connector polling is
2292 * performed. Since we're calling this from a runtime PM callback,
2293 * trying to acquire rpm refs will cause us to deadlock.
2294 *
2295 * Since we're guaranteed to be holding the rpm lock, it's safe to
2296 * temporarily disable the rpm helpers so this doesn't deadlock us.
2297 */
2298#ifdef CONFIG_PM
2299 dev->dev->power.disable_depth++;
2300#endif
54fb2a5c 2301 drm_helper_hpd_irq_event(dev);
23a1a9e5
L
2302#ifdef CONFIG_PM
2303 dev->dev->power.disable_depth--;
2304#endif
d38ceaf9 2305
03161a6e 2306 if (fbcon)
d38ceaf9 2307 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2308
2309unlock:
2310 if (fbcon)
d38ceaf9 2311 console_unlock();
d38ceaf9 2312
03161a6e 2313 return r;
d38ceaf9
AD
2314}
2315
63fbf42f
CZ
2316static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2317{
2318 int i;
2319 bool asic_hang = false;
2320
2321 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2322 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2323 continue;
a1255107
AD
2324 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2325 adev->ip_blocks[i].status.hang =
2326 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2327 if (adev->ip_blocks[i].status.hang) {
2328 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2329 asic_hang = true;
2330 }
2331 }
2332 return asic_hang;
2333}
2334
4d446656 2335static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2336{
2337 int i, r = 0;
2338
2339 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2340 if (!adev->ip_blocks[i].status.valid)
d31a501e 2341 continue;
a1255107
AD
2342 if (adev->ip_blocks[i].status.hang &&
2343 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2344 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2345 if (r)
2346 return r;
2347 }
2348 }
2349
2350 return 0;
2351}
2352
35d782fe
CZ
2353static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2354{
da146d3b
AD
2355 int i;
2356
2357 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2358 if (!adev->ip_blocks[i].status.valid)
da146d3b 2359 continue;
a1255107
AD
2360 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2361 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2362 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2363 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2364 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2365 DRM_INFO("Some block need full reset!\n");
2366 return true;
2367 }
2368 }
35d782fe
CZ
2369 }
2370 return false;
2371}
2372
2373static int amdgpu_soft_reset(struct amdgpu_device *adev)
2374{
2375 int i, r = 0;
2376
2377 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2378 if (!adev->ip_blocks[i].status.valid)
35d782fe 2379 continue;
a1255107
AD
2380 if (adev->ip_blocks[i].status.hang &&
2381 adev->ip_blocks[i].version->funcs->soft_reset) {
2382 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2383 if (r)
2384 return r;
2385 }
2386 }
2387
2388 return 0;
2389}
2390
2391static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2392{
2393 int i, r = 0;
2394
2395 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2396 if (!adev->ip_blocks[i].status.valid)
35d782fe 2397 continue;
a1255107
AD
2398 if (adev->ip_blocks[i].status.hang &&
2399 adev->ip_blocks[i].version->funcs->post_soft_reset)
2400 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2401 if (r)
2402 return r;
2403 }
2404
2405 return 0;
2406}
2407
3ad81f16
CZ
2408bool amdgpu_need_backup(struct amdgpu_device *adev)
2409{
2410 if (adev->flags & AMD_IS_APU)
2411 return false;
2412
2413 return amdgpu_lockup_timeout > 0 ? true : false;
2414}
2415
53cdccd5
CZ
2416static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2417 struct amdgpu_ring *ring,
2418 struct amdgpu_bo *bo,
f54d1867 2419 struct dma_fence **fence)
53cdccd5
CZ
2420{
2421 uint32_t domain;
2422 int r;
2423
23d2e504
RH
2424 if (!bo->shadow)
2425 return 0;
2426
1d284797 2427 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2428 if (r)
2429 return r;
2430 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2431 /* if bo has been evicted, then no need to recover */
2432 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2433 r = amdgpu_bo_validate(bo->shadow);
2434 if (r) {
2435 DRM_ERROR("bo validate failed!\n");
2436 goto err;
2437 }
2438
2439 r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
2440 if (r) {
2441 DRM_ERROR("%p bind failed\n", bo->shadow);
2442 goto err;
2443 }
2444
23d2e504 2445 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2446 NULL, fence, true);
23d2e504
RH
2447 if (r) {
2448 DRM_ERROR("recover page table failed!\n");
2449 goto err;
2450 }
2451 }
53cdccd5 2452err:
23d2e504
RH
2453 amdgpu_bo_unreserve(bo);
2454 return r;
53cdccd5
CZ
2455}
2456
a90ad3c2
ML
2457/**
2458 * amdgpu_sriov_gpu_reset - reset the asic
2459 *
2460 * @adev: amdgpu device pointer
2461 * @voluntary: if this reset is requested by guest.
2462 * (true means by guest and false means by HYPERVISOR )
2463 *
2464 * Attempt the reset the GPU if it has hung (all asics).
2465 * for SRIOV case.
2466 * Returns 0 for success or an error on failure.
2467 */
2468int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
2469{
2470 int i, r = 0;
2471 int resched;
2472 struct amdgpu_bo *bo, *tmp;
2473 struct amdgpu_ring *ring;
2474 struct dma_fence *fence = NULL, *next = NULL;
2475
147b5983 2476 mutex_lock(&adev->virt.lock_reset);
a90ad3c2 2477 atomic_inc(&adev->gpu_reset_counter);
1fb37a3d 2478 adev->gfx.in_reset = true;
a90ad3c2
ML
2479
2480 /* block TTM */
2481 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2482
2483 /* block scheduler */
2484 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2485 ring = adev->rings[i];
2486
2487 if (!ring || !ring->sched.thread)
2488 continue;
2489
2490 kthread_park(ring->sched.thread);
2491 amd_sched_hw_job_reset(&ring->sched);
2492 }
2493
2494 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2495 amdgpu_fence_driver_force_completion(adev);
2496
2497 /* request to take full control of GPU before re-initialization */
2498 if (voluntary)
2499 amdgpu_virt_reset_gpu(adev);
2500 else
2501 amdgpu_virt_request_full_gpu(adev, true);
2502
2503
2504 /* Resume IP prior to SMC */
e4f0fdcc 2505 amdgpu_sriov_reinit_early(adev);
a90ad3c2
ML
2506
2507 /* we need recover gart prior to run SMC/CP/SDMA resume */
2508 amdgpu_ttm_recover_gart(adev);
2509
2510 /* now we are okay to resume SMC/CP/SDMA */
e4f0fdcc 2511 amdgpu_sriov_reinit_late(adev);
a90ad3c2
ML
2512
2513 amdgpu_irq_gpu_reset_resume_helper(adev);
2514
2515 if (amdgpu_ib_ring_tests(adev))
2516 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2517
2518 /* release full control of GPU after ib test */
2519 amdgpu_virt_release_full_gpu(adev, true);
2520
2521 DRM_INFO("recover vram bo from shadow\n");
2522
2523 ring = adev->mman.buffer_funcs_ring;
2524 mutex_lock(&adev->shadow_list_lock);
2525 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 2526 next = NULL;
a90ad3c2
ML
2527 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2528 if (fence) {
2529 r = dma_fence_wait(fence, false);
2530 if (r) {
2531 WARN(r, "recovery from shadow isn't completed\n");
2532 break;
2533 }
2534 }
2535
2536 dma_fence_put(fence);
2537 fence = next;
2538 }
2539 mutex_unlock(&adev->shadow_list_lock);
2540
2541 if (fence) {
2542 r = dma_fence_wait(fence, false);
2543 if (r)
2544 WARN(r, "recovery from shadow isn't completed\n");
2545 }
2546 dma_fence_put(fence);
2547
2548 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2549 struct amdgpu_ring *ring = adev->rings[i];
2550 if (!ring || !ring->sched.thread)
2551 continue;
2552
2553 amd_sched_job_recovery(&ring->sched);
2554 kthread_unpark(ring->sched.thread);
2555 }
2556
2557 drm_helper_resume_force_mode(adev->ddev);
2558 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2559 if (r) {
2560 /* bad news, how to tell it to userspace ? */
2561 dev_info(adev->dev, "GPU reset failed\n");
2562 }
2563
1fb37a3d 2564 adev->gfx.in_reset = false;
147b5983 2565 mutex_unlock(&adev->virt.lock_reset);
a90ad3c2
ML
2566 return r;
2567}
2568
d38ceaf9
AD
2569/**
2570 * amdgpu_gpu_reset - reset the asic
2571 *
2572 * @adev: amdgpu device pointer
2573 *
2574 * Attempt the reset the GPU if it has hung (all asics).
2575 * Returns 0 for success or an error on failure.
2576 */
2577int amdgpu_gpu_reset(struct amdgpu_device *adev)
2578{
d38ceaf9
AD
2579 int i, r;
2580 int resched;
35d782fe 2581 bool need_full_reset;
d38ceaf9 2582
fb140b29 2583 if (amdgpu_sriov_vf(adev))
a90ad3c2 2584 return amdgpu_sriov_gpu_reset(adev, true);
fb140b29 2585
63fbf42f
CZ
2586 if (!amdgpu_check_soft_reset(adev)) {
2587 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2588 return 0;
2589 }
d38ceaf9 2590
d94aed5a 2591 atomic_inc(&adev->gpu_reset_counter);
d38ceaf9 2592
a3c47d6b
CZ
2593 /* block TTM */
2594 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2595
0875dc9e
CZ
2596 /* block scheduler */
2597 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2598 struct amdgpu_ring *ring = adev->rings[i];
2599
51687759 2600 if (!ring || !ring->sched.thread)
0875dc9e
CZ
2601 continue;
2602 kthread_park(ring->sched.thread);
aa1c8900 2603 amd_sched_hw_job_reset(&ring->sched);
0875dc9e 2604 }
2200edac
CZ
2605 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2606 amdgpu_fence_driver_force_completion(adev);
d38ceaf9 2607
35d782fe 2608 need_full_reset = amdgpu_need_full_reset(adev);
d38ceaf9 2609
35d782fe
CZ
2610 if (!need_full_reset) {
2611 amdgpu_pre_soft_reset(adev);
2612 r = amdgpu_soft_reset(adev);
2613 amdgpu_post_soft_reset(adev);
2614 if (r || amdgpu_check_soft_reset(adev)) {
2615 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2616 need_full_reset = true;
2617 }
f1aa7e08
CZ
2618 }
2619
35d782fe 2620 if (need_full_reset) {
35d782fe 2621 r = amdgpu_suspend(adev);
bfa99269 2622
35d782fe
CZ
2623retry:
2624 /* Disable fb access */
2625 if (adev->mode_info.num_crtc) {
2626 struct amdgpu_mode_mc_save save;
2627 amdgpu_display_stop_mc_access(adev, &save);
2628 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2629 }
be34d3bf
AD
2630 if (adev->is_atom_fw)
2631 amdgpu_atomfirmware_scratch_regs_save(adev);
2632 else
2633 amdgpu_atombios_scratch_regs_save(adev);
35d782fe 2634 r = amdgpu_asic_reset(adev);
be34d3bf
AD
2635 if (adev->is_atom_fw)
2636 amdgpu_atomfirmware_scratch_regs_restore(adev);
2637 else
2638 amdgpu_atombios_scratch_regs_restore(adev);
35d782fe
CZ
2639 /* post card */
2640 amdgpu_atom_asic_init(adev->mode_info.atom_context);
2641
2642 if (!r) {
2643 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2644 r = amdgpu_resume(adev);
2645 }
d38ceaf9 2646 }
d38ceaf9 2647 if (!r) {
e72cfd58 2648 amdgpu_irq_gpu_reset_resume_helper(adev);
2c0d7318
CZ
2649 if (need_full_reset && amdgpu_need_backup(adev)) {
2650 r = amdgpu_ttm_recover_gart(adev);
2651 if (r)
2652 DRM_ERROR("gart recovery failed!!!\n");
2653 }
1f465087
CZ
2654 r = amdgpu_ib_ring_tests(adev);
2655 if (r) {
2656 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
40019dc4 2657 r = amdgpu_suspend(adev);
53cdccd5 2658 need_full_reset = true;
40019dc4 2659 goto retry;
1f465087 2660 }
53cdccd5
CZ
2661 /**
2662 * recovery vm page tables, since we cannot depend on VRAM is
2663 * consistent after gpu full reset.
2664 */
2665 if (need_full_reset && amdgpu_need_backup(adev)) {
2666 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2667 struct amdgpu_bo *bo, *tmp;
f54d1867 2668 struct dma_fence *fence = NULL, *next = NULL;
53cdccd5
CZ
2669
2670 DRM_INFO("recover vram bo from shadow\n");
2671 mutex_lock(&adev->shadow_list_lock);
2672 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 2673 next = NULL;
53cdccd5
CZ
2674 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2675 if (fence) {
f54d1867 2676 r = dma_fence_wait(fence, false);
53cdccd5 2677 if (r) {
1d7b17b0 2678 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5
CZ
2679 break;
2680 }
2681 }
1f465087 2682
f54d1867 2683 dma_fence_put(fence);
53cdccd5
CZ
2684 fence = next;
2685 }
2686 mutex_unlock(&adev->shadow_list_lock);
2687 if (fence) {
f54d1867 2688 r = dma_fence_wait(fence, false);
53cdccd5 2689 if (r)
1d7b17b0 2690 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5 2691 }
f54d1867 2692 dma_fence_put(fence);
53cdccd5 2693 }
d38ceaf9
AD
2694 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2695 struct amdgpu_ring *ring = adev->rings[i];
51687759
CZ
2696
2697 if (!ring || !ring->sched.thread)
d38ceaf9 2698 continue;
53cdccd5 2699
aa1c8900 2700 amd_sched_job_recovery(&ring->sched);
0875dc9e 2701 kthread_unpark(ring->sched.thread);
d38ceaf9 2702 }
d38ceaf9 2703 } else {
2200edac 2704 dev_err(adev->dev, "asic resume failed (%d).\n", r);
d38ceaf9 2705 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
51687759 2706 if (adev->rings[i] && adev->rings[i]->sched.thread) {
0875dc9e 2707 kthread_unpark(adev->rings[i]->sched.thread);
0875dc9e 2708 }
d38ceaf9
AD
2709 }
2710 }
2711
2712 drm_helper_resume_force_mode(adev->ddev);
2713
2714 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2715 if (r) {
2716 /* bad news, how to tell it to userspace ? */
2717 dev_info(adev->dev, "GPU reset failed\n");
2718 }
2719
d38ceaf9
AD
2720 return r;
2721}
2722
d0dd7f0c
AD
2723void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2724{
2725 u32 mask;
2726 int ret;
2727
cd474ba0
AD
2728 if (amdgpu_pcie_gen_cap)
2729 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 2730
cd474ba0
AD
2731 if (amdgpu_pcie_lane_cap)
2732 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 2733
cd474ba0
AD
2734 /* covers APUs as well */
2735 if (pci_is_root_bus(adev->pdev->bus)) {
2736 if (adev->pm.pcie_gen_mask == 0)
2737 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2738 if (adev->pm.pcie_mlw_mask == 0)
2739 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 2740 return;
cd474ba0 2741 }
d0dd7f0c 2742
cd474ba0
AD
2743 if (adev->pm.pcie_gen_mask == 0) {
2744 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2745 if (!ret) {
2746 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2747 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2748 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2749
2750 if (mask & DRM_PCIE_SPEED_25)
2751 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2752 if (mask & DRM_PCIE_SPEED_50)
2753 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2754 if (mask & DRM_PCIE_SPEED_80)
2755 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2756 } else {
2757 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2758 }
2759 }
2760 if (adev->pm.pcie_mlw_mask == 0) {
2761 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2762 if (!ret) {
2763 switch (mask) {
2764 case 32:
2765 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2766 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2767 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2768 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2769 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2770 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2771 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2772 break;
2773 case 16:
2774 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2775 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2776 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2777 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2778 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2779 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2780 break;
2781 case 12:
2782 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2783 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2784 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2785 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2786 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2787 break;
2788 case 8:
2789 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2790 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2791 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2792 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2793 break;
2794 case 4:
2795 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2796 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2797 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2798 break;
2799 case 2:
2800 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2801 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2802 break;
2803 case 1:
2804 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2805 break;
2806 default:
2807 break;
2808 }
2809 } else {
2810 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
2811 }
2812 }
2813}
d38ceaf9
AD
2814
2815/*
2816 * Debugfs
2817 */
2818int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
06ab6832 2819 const struct drm_info_list *files,
d38ceaf9
AD
2820 unsigned nfiles)
2821{
2822 unsigned i;
2823
2824 for (i = 0; i < adev->debugfs_count; i++) {
2825 if (adev->debugfs[i].files == files) {
2826 /* Already registered */
2827 return 0;
2828 }
2829 }
2830
2831 i = adev->debugfs_count + 1;
2832 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2833 DRM_ERROR("Reached maximum number of debugfs components.\n");
2834 DRM_ERROR("Report so we increase "
2835 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2836 return -EINVAL;
2837 }
2838 adev->debugfs[adev->debugfs_count].files = files;
2839 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2840 adev->debugfs_count = i;
2841#if defined(CONFIG_DEBUG_FS)
d38ceaf9
AD
2842 drm_debugfs_create_files(files, nfiles,
2843 adev->ddev->primary->debugfs_root,
2844 adev->ddev->primary);
2845#endif
2846 return 0;
2847}
2848
d38ceaf9
AD
2849#if defined(CONFIG_DEBUG_FS)
2850
2851static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2852 size_t size, loff_t *pos)
2853{
45063097 2854 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
2855 ssize_t result = 0;
2856 int r;
bd12267d 2857 bool pm_pg_lock, use_bank;
56628159 2858 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
2859
2860 if (size & 0x3 || *pos & 0x3)
2861 return -EINVAL;
2862
bd12267d
TSD
2863 /* are we reading registers for which a PG lock is necessary? */
2864 pm_pg_lock = (*pos >> 23) & 1;
2865
56628159
TSD
2866 if (*pos & (1ULL << 62)) {
2867 se_bank = (*pos >> 24) & 0x3FF;
2868 sh_bank = (*pos >> 34) & 0x3FF;
2869 instance_bank = (*pos >> 44) & 0x3FF;
32977f93
TSD
2870
2871 if (se_bank == 0x3FF)
2872 se_bank = 0xFFFFFFFF;
2873 if (sh_bank == 0x3FF)
2874 sh_bank = 0xFFFFFFFF;
2875 if (instance_bank == 0x3FF)
2876 instance_bank = 0xFFFFFFFF;
56628159 2877 use_bank = 1;
56628159
TSD
2878 } else {
2879 use_bank = 0;
2880 }
2881
801a6aa9 2882 *pos &= (1UL << 22) - 1;
bd12267d 2883
56628159 2884 if (use_bank) {
32977f93
TSD
2885 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2886 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
56628159
TSD
2887 return -EINVAL;
2888 mutex_lock(&adev->grbm_idx_mutex);
2889 amdgpu_gfx_select_se_sh(adev, se_bank,
2890 sh_bank, instance_bank);
2891 }
2892
bd12267d
TSD
2893 if (pm_pg_lock)
2894 mutex_lock(&adev->pm.mutex);
2895
d38ceaf9
AD
2896 while (size) {
2897 uint32_t value;
2898
2899 if (*pos > adev->rmmio_size)
56628159 2900 goto end;
d38ceaf9
AD
2901
2902 value = RREG32(*pos >> 2);
2903 r = put_user(value, (uint32_t *)buf);
56628159
TSD
2904 if (r) {
2905 result = r;
2906 goto end;
2907 }
d38ceaf9
AD
2908
2909 result += 4;
2910 buf += 4;
2911 *pos += 4;
2912 size -= 4;
2913 }
2914
56628159
TSD
2915end:
2916 if (use_bank) {
2917 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2918 mutex_unlock(&adev->grbm_idx_mutex);
2919 }
2920
bd12267d
TSD
2921 if (pm_pg_lock)
2922 mutex_unlock(&adev->pm.mutex);
2923
d38ceaf9
AD
2924 return result;
2925}
2926
2927static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2928 size_t size, loff_t *pos)
2929{
45063097 2930 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
2931 ssize_t result = 0;
2932 int r;
394fdde2
TSD
2933 bool pm_pg_lock, use_bank;
2934 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
2935
2936 if (size & 0x3 || *pos & 0x3)
2937 return -EINVAL;
2938
394fdde2
TSD
2939 /* are we reading registers for which a PG lock is necessary? */
2940 pm_pg_lock = (*pos >> 23) & 1;
2941
2942 if (*pos & (1ULL << 62)) {
2943 se_bank = (*pos >> 24) & 0x3FF;
2944 sh_bank = (*pos >> 34) & 0x3FF;
2945 instance_bank = (*pos >> 44) & 0x3FF;
2946
2947 if (se_bank == 0x3FF)
2948 se_bank = 0xFFFFFFFF;
2949 if (sh_bank == 0x3FF)
2950 sh_bank = 0xFFFFFFFF;
2951 if (instance_bank == 0x3FF)
2952 instance_bank = 0xFFFFFFFF;
2953 use_bank = 1;
2954 } else {
2955 use_bank = 0;
2956 }
2957
801a6aa9 2958 *pos &= (1UL << 22) - 1;
394fdde2
TSD
2959
2960 if (use_bank) {
2961 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2962 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2963 return -EINVAL;
2964 mutex_lock(&adev->grbm_idx_mutex);
2965 amdgpu_gfx_select_se_sh(adev, se_bank,
2966 sh_bank, instance_bank);
2967 }
2968
2969 if (pm_pg_lock)
2970 mutex_lock(&adev->pm.mutex);
2971
d38ceaf9
AD
2972 while (size) {
2973 uint32_t value;
2974
2975 if (*pos > adev->rmmio_size)
2976 return result;
2977
2978 r = get_user(value, (uint32_t *)buf);
2979 if (r)
2980 return r;
2981
2982 WREG32(*pos >> 2, value);
2983
2984 result += 4;
2985 buf += 4;
2986 *pos += 4;
2987 size -= 4;
2988 }
2989
394fdde2
TSD
2990 if (use_bank) {
2991 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2992 mutex_unlock(&adev->grbm_idx_mutex);
2993 }
2994
2995 if (pm_pg_lock)
2996 mutex_unlock(&adev->pm.mutex);
2997
d38ceaf9
AD
2998 return result;
2999}
3000
adcec288
TSD
3001static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3002 size_t size, loff_t *pos)
3003{
45063097 3004 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3005 ssize_t result = 0;
3006 int r;
3007
3008 if (size & 0x3 || *pos & 0x3)
3009 return -EINVAL;
3010
3011 while (size) {
3012 uint32_t value;
3013
3014 value = RREG32_PCIE(*pos >> 2);
3015 r = put_user(value, (uint32_t *)buf);
3016 if (r)
3017 return r;
3018
3019 result += 4;
3020 buf += 4;
3021 *pos += 4;
3022 size -= 4;
3023 }
3024
3025 return result;
3026}
3027
3028static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3029 size_t size, loff_t *pos)
3030{
45063097 3031 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3032 ssize_t result = 0;
3033 int r;
3034
3035 if (size & 0x3 || *pos & 0x3)
3036 return -EINVAL;
3037
3038 while (size) {
3039 uint32_t value;
3040
3041 r = get_user(value, (uint32_t *)buf);
3042 if (r)
3043 return r;
3044
3045 WREG32_PCIE(*pos >> 2, value);
3046
3047 result += 4;
3048 buf += 4;
3049 *pos += 4;
3050 size -= 4;
3051 }
3052
3053 return result;
3054}
3055
3056static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3057 size_t size, loff_t *pos)
3058{
45063097 3059 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3060 ssize_t result = 0;
3061 int r;
3062
3063 if (size & 0x3 || *pos & 0x3)
3064 return -EINVAL;
3065
3066 while (size) {
3067 uint32_t value;
3068
3069 value = RREG32_DIDT(*pos >> 2);
3070 r = put_user(value, (uint32_t *)buf);
3071 if (r)
3072 return r;
3073
3074 result += 4;
3075 buf += 4;
3076 *pos += 4;
3077 size -= 4;
3078 }
3079
3080 return result;
3081}
3082
3083static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3084 size_t size, loff_t *pos)
3085{
45063097 3086 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3087 ssize_t result = 0;
3088 int r;
3089
3090 if (size & 0x3 || *pos & 0x3)
3091 return -EINVAL;
3092
3093 while (size) {
3094 uint32_t value;
3095
3096 r = get_user(value, (uint32_t *)buf);
3097 if (r)
3098 return r;
3099
3100 WREG32_DIDT(*pos >> 2, value);
3101
3102 result += 4;
3103 buf += 4;
3104 *pos += 4;
3105 size -= 4;
3106 }
3107
3108 return result;
3109}
3110
3111static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3112 size_t size, loff_t *pos)
3113{
45063097 3114 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3115 ssize_t result = 0;
3116 int r;
3117
3118 if (size & 0x3 || *pos & 0x3)
3119 return -EINVAL;
3120
3121 while (size) {
3122 uint32_t value;
3123
6fc0deaf 3124 value = RREG32_SMC(*pos);
adcec288
TSD
3125 r = put_user(value, (uint32_t *)buf);
3126 if (r)
3127 return r;
3128
3129 result += 4;
3130 buf += 4;
3131 *pos += 4;
3132 size -= 4;
3133 }
3134
3135 return result;
3136}
3137
3138static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3139 size_t size, loff_t *pos)
3140{
45063097 3141 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3142 ssize_t result = 0;
3143 int r;
3144
3145 if (size & 0x3 || *pos & 0x3)
3146 return -EINVAL;
3147
3148 while (size) {
3149 uint32_t value;
3150
3151 r = get_user(value, (uint32_t *)buf);
3152 if (r)
3153 return r;
3154
6fc0deaf 3155 WREG32_SMC(*pos, value);
adcec288
TSD
3156
3157 result += 4;
3158 buf += 4;
3159 *pos += 4;
3160 size -= 4;
3161 }
3162
3163 return result;
3164}
3165
1e051413
TSD
3166static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3167 size_t size, loff_t *pos)
3168{
45063097 3169 struct amdgpu_device *adev = file_inode(f)->i_private;
1e051413
TSD
3170 ssize_t result = 0;
3171 int r;
3172 uint32_t *config, no_regs = 0;
3173
3174 if (size & 0x3 || *pos & 0x3)
3175 return -EINVAL;
3176
ecab7668 3177 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
1e051413
TSD
3178 if (!config)
3179 return -ENOMEM;
3180
3181 /* version, increment each time something is added */
9a999359 3182 config[no_regs++] = 3;
1e051413
TSD
3183 config[no_regs++] = adev->gfx.config.max_shader_engines;
3184 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3185 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3186 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3187 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3188 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3189 config[no_regs++] = adev->gfx.config.max_gprs;
3190 config[no_regs++] = adev->gfx.config.max_gs_threads;
3191 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3192 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3193 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3194 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3195 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3196 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3197 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3198 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3199 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3200 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3201 config[no_regs++] = adev->gfx.config.num_gpus;
3202 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3203 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3204 config[no_regs++] = adev->gfx.config.gb_addr_config;
3205 config[no_regs++] = adev->gfx.config.num_rbs;
3206
89a8f309
TSD
3207 /* rev==1 */
3208 config[no_regs++] = adev->rev_id;
3209 config[no_regs++] = adev->pg_flags;
3210 config[no_regs++] = adev->cg_flags;
3211
e9f11dc8
TSD
3212 /* rev==2 */
3213 config[no_regs++] = adev->family;
3214 config[no_regs++] = adev->external_rev_id;
3215
9a999359
TSD
3216 /* rev==3 */
3217 config[no_regs++] = adev->pdev->device;
3218 config[no_regs++] = adev->pdev->revision;
3219 config[no_regs++] = adev->pdev->subsystem_device;
3220 config[no_regs++] = adev->pdev->subsystem_vendor;
3221
1e051413
TSD
3222 while (size && (*pos < no_regs * 4)) {
3223 uint32_t value;
3224
3225 value = config[*pos >> 2];
3226 r = put_user(value, (uint32_t *)buf);
3227 if (r) {
3228 kfree(config);
3229 return r;
3230 }
3231
3232 result += 4;
3233 buf += 4;
3234 *pos += 4;
3235 size -= 4;
3236 }
3237
3238 kfree(config);
3239 return result;
3240}
3241
f2cdaf20
TSD
3242static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3243 size_t size, loff_t *pos)
3244{
45063097 3245 struct amdgpu_device *adev = file_inode(f)->i_private;
9f8df7d7
TSD
3246 int idx, x, outsize, r, valuesize;
3247 uint32_t values[16];
f2cdaf20 3248
9f8df7d7 3249 if (size & 3 || *pos & 0x3)
f2cdaf20
TSD
3250 return -EINVAL;
3251
3cbc614f
SP
3252 if (amdgpu_dpm == 0)
3253 return -EINVAL;
3254
f2cdaf20
TSD
3255 /* convert offset to sensor number */
3256 idx = *pos >> 2;
3257
9f8df7d7 3258 valuesize = sizeof(values);
f2cdaf20 3259 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
9f8df7d7 3260 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
3cbc614f
SP
3261 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3262 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3263 &valuesize);
f2cdaf20
TSD
3264 else
3265 return -EINVAL;
3266
9f8df7d7
TSD
3267 if (size > valuesize)
3268 return -EINVAL;
3269
3270 outsize = 0;
3271 x = 0;
3272 if (!r) {
3273 while (size) {
3274 r = put_user(values[x++], (int32_t *)buf);
3275 buf += 4;
3276 size -= 4;
3277 outsize += 4;
3278 }
3279 }
f2cdaf20 3280
9f8df7d7 3281 return !r ? outsize : r;
f2cdaf20 3282}
1e051413 3283
273d7aa1
TSD
3284static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3285 size_t size, loff_t *pos)
3286{
3287 struct amdgpu_device *adev = f->f_inode->i_private;
3288 int r, x;
3289 ssize_t result=0;
472259f0 3290 uint32_t offset, se, sh, cu, wave, simd, data[32];
273d7aa1
TSD
3291
3292 if (size & 3 || *pos & 3)
3293 return -EINVAL;
3294
3295 /* decode offset */
3296 offset = (*pos & 0x7F);
3297 se = ((*pos >> 7) & 0xFF);
3298 sh = ((*pos >> 15) & 0xFF);
3299 cu = ((*pos >> 23) & 0xFF);
3300 wave = ((*pos >> 31) & 0xFF);
3301 simd = ((*pos >> 37) & 0xFF);
273d7aa1
TSD
3302
3303 /* switch to the specific se/sh/cu */
3304 mutex_lock(&adev->grbm_idx_mutex);
3305 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3306
3307 x = 0;
472259f0
TSD
3308 if (adev->gfx.funcs->read_wave_data)
3309 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
273d7aa1
TSD
3310
3311 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3312 mutex_unlock(&adev->grbm_idx_mutex);
3313
5ecfb3b8
TSD
3314 if (!x)
3315 return -EINVAL;
3316
472259f0 3317 while (size && (offset < x * 4)) {
273d7aa1
TSD
3318 uint32_t value;
3319
472259f0 3320 value = data[offset >> 2];
273d7aa1
TSD
3321 r = put_user(value, (uint32_t *)buf);
3322 if (r)
3323 return r;
3324
3325 result += 4;
3326 buf += 4;
472259f0 3327 offset += 4;
273d7aa1
TSD
3328 size -= 4;
3329 }
3330
3331 return result;
3332}
3333
c5a60ce8
TSD
3334static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3335 size_t size, loff_t *pos)
3336{
3337 struct amdgpu_device *adev = f->f_inode->i_private;
3338 int r;
3339 ssize_t result = 0;
3340 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3341
3342 if (size & 3 || *pos & 3)
3343 return -EINVAL;
3344
3345 /* decode offset */
3346 offset = (*pos & 0xFFF); /* in dwords */
3347 se = ((*pos >> 12) & 0xFF);
3348 sh = ((*pos >> 20) & 0xFF);
3349 cu = ((*pos >> 28) & 0xFF);
3350 wave = ((*pos >> 36) & 0xFF);
3351 simd = ((*pos >> 44) & 0xFF);
3352 thread = ((*pos >> 52) & 0xFF);
3353 bank = ((*pos >> 60) & 1);
3354
3355 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3356 if (!data)
3357 return -ENOMEM;
3358
3359 /* switch to the specific se/sh/cu */
3360 mutex_lock(&adev->grbm_idx_mutex);
3361 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3362
3363 if (bank == 0) {
3364 if (adev->gfx.funcs->read_wave_vgprs)
3365 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3366 } else {
3367 if (adev->gfx.funcs->read_wave_sgprs)
3368 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3369 }
3370
3371 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3372 mutex_unlock(&adev->grbm_idx_mutex);
3373
3374 while (size) {
3375 uint32_t value;
3376
3377 value = data[offset++];
3378 r = put_user(value, (uint32_t *)buf);
3379 if (r) {
3380 result = r;
3381 goto err;
3382 }
3383
3384 result += 4;
3385 buf += 4;
3386 size -= 4;
3387 }
3388
3389err:
3390 kfree(data);
3391 return result;
3392}
3393
d38ceaf9
AD
3394static const struct file_operations amdgpu_debugfs_regs_fops = {
3395 .owner = THIS_MODULE,
3396 .read = amdgpu_debugfs_regs_read,
3397 .write = amdgpu_debugfs_regs_write,
3398 .llseek = default_llseek
3399};
adcec288
TSD
3400static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3401 .owner = THIS_MODULE,
3402 .read = amdgpu_debugfs_regs_didt_read,
3403 .write = amdgpu_debugfs_regs_didt_write,
3404 .llseek = default_llseek
3405};
3406static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3407 .owner = THIS_MODULE,
3408 .read = amdgpu_debugfs_regs_pcie_read,
3409 .write = amdgpu_debugfs_regs_pcie_write,
3410 .llseek = default_llseek
3411};
3412static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3413 .owner = THIS_MODULE,
3414 .read = amdgpu_debugfs_regs_smc_read,
3415 .write = amdgpu_debugfs_regs_smc_write,
3416 .llseek = default_llseek
3417};
3418
1e051413
TSD
3419static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3420 .owner = THIS_MODULE,
3421 .read = amdgpu_debugfs_gca_config_read,
3422 .llseek = default_llseek
3423};
3424
f2cdaf20
TSD
3425static const struct file_operations amdgpu_debugfs_sensors_fops = {
3426 .owner = THIS_MODULE,
3427 .read = amdgpu_debugfs_sensor_read,
3428 .llseek = default_llseek
3429};
3430
273d7aa1
TSD
3431static const struct file_operations amdgpu_debugfs_wave_fops = {
3432 .owner = THIS_MODULE,
3433 .read = amdgpu_debugfs_wave_read,
3434 .llseek = default_llseek
3435};
c5a60ce8
TSD
3436static const struct file_operations amdgpu_debugfs_gpr_fops = {
3437 .owner = THIS_MODULE,
3438 .read = amdgpu_debugfs_gpr_read,
3439 .llseek = default_llseek
3440};
273d7aa1 3441
adcec288
TSD
3442static const struct file_operations *debugfs_regs[] = {
3443 &amdgpu_debugfs_regs_fops,
3444 &amdgpu_debugfs_regs_didt_fops,
3445 &amdgpu_debugfs_regs_pcie_fops,
3446 &amdgpu_debugfs_regs_smc_fops,
1e051413 3447 &amdgpu_debugfs_gca_config_fops,
f2cdaf20 3448 &amdgpu_debugfs_sensors_fops,
273d7aa1 3449 &amdgpu_debugfs_wave_fops,
c5a60ce8 3450 &amdgpu_debugfs_gpr_fops,
adcec288
TSD
3451};
3452
3453static const char *debugfs_regs_names[] = {
3454 "amdgpu_regs",
3455 "amdgpu_regs_didt",
3456 "amdgpu_regs_pcie",
3457 "amdgpu_regs_smc",
1e051413 3458 "amdgpu_gca_config",
f2cdaf20 3459 "amdgpu_sensors",
273d7aa1 3460 "amdgpu_wave",
c5a60ce8 3461 "amdgpu_gpr",
adcec288 3462};
d38ceaf9
AD
3463
3464static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3465{
3466 struct drm_minor *minor = adev->ddev->primary;
3467 struct dentry *ent, *root = minor->debugfs_root;
adcec288
TSD
3468 unsigned i, j;
3469
3470 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3471 ent = debugfs_create_file(debugfs_regs_names[i],
3472 S_IFREG | S_IRUGO, root,
3473 adev, debugfs_regs[i]);
3474 if (IS_ERR(ent)) {
3475 for (j = 0; j < i; j++) {
3476 debugfs_remove(adev->debugfs_regs[i]);
3477 adev->debugfs_regs[i] = NULL;
3478 }
3479 return PTR_ERR(ent);
3480 }
d38ceaf9 3481
adcec288
TSD
3482 if (!i)
3483 i_size_write(ent->d_inode, adev->rmmio_size);
3484 adev->debugfs_regs[i] = ent;
3485 }
d38ceaf9
AD
3486
3487 return 0;
3488}
3489
3490static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3491{
adcec288
TSD
3492 unsigned i;
3493
3494 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3495 if (adev->debugfs_regs[i]) {
3496 debugfs_remove(adev->debugfs_regs[i]);
3497 adev->debugfs_regs[i] = NULL;
3498 }
3499 }
d38ceaf9
AD
3500}
3501
3502int amdgpu_debugfs_init(struct drm_minor *minor)
3503{
3504 return 0;
3505}
7cebc728
AK
3506#else
3507static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3508{
3509 return 0;
3510}
3511static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
d38ceaf9 3512#endif