drm/amdgpu: use a 64bit interval tree for VM management v2
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
f4b373f4 39#include "amdgpu_trace.h"
d38ceaf9
AD
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
a5bde2f9 43#include "amdgpu_atomfirmware.h"
d0dd7f0c 44#include "amd_pcie.h"
33f34802
KW
45#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
a2e73f56
AD
48#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
aaa36a97 51#include "vi.h"
460826e6 52#include "soc15.h"
d38ceaf9 53#include "bif/bif_4_1_d.h"
9accf2fd 54#include <linux/pci.h>
bec86378 55#include <linux/firmware.h>
d1aff8ec 56#include "amdgpu_pm.h"
d38ceaf9
AD
57
58static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
59static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
60
61static const char *amdgpu_asic_name[] = {
da69c161
KW
62 "TAHITI",
63 "PITCAIRN",
64 "VERDE",
65 "OLAND",
66 "HAINAN",
d38ceaf9
AD
67 "BONAIRE",
68 "KAVERI",
69 "KABINI",
70 "HAWAII",
71 "MULLINS",
72 "TOPAZ",
73 "TONGA",
48299f95 74 "FIJI",
d38ceaf9 75 "CARRIZO",
139f4917 76 "STONEY",
2cc0c0b5
FC
77 "POLARIS10",
78 "POLARIS11",
c4642a47 79 "POLARIS12",
d4196f01 80 "VEGA10",
d38ceaf9
AD
81 "LAST",
82};
83
84bool amdgpu_device_is_px(struct drm_device *dev)
85{
86 struct amdgpu_device *adev = dev->dev_private;
87
2f7d10b3 88 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
89 return true;
90 return false;
91}
92
93/*
94 * MMIO register access helper functions.
95 */
96uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 97 uint32_t acc_flags)
d38ceaf9 98{
f4b373f4
TSD
99 uint32_t ret;
100
15d72fd7 101 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
bc992ba5
XY
102 BUG_ON(in_interrupt());
103 return amdgpu_virt_kiq_rreg(adev, reg);
104 }
105
15d72fd7 106 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 107 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
108 else {
109 unsigned long flags;
d38ceaf9
AD
110
111 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
112 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
113 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
114 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 115 }
f4b373f4
TSD
116 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
117 return ret;
d38ceaf9
AD
118}
119
120void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 121 uint32_t acc_flags)
d38ceaf9 122{
f4b373f4 123 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 124
15d72fd7 125 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
bc992ba5
XY
126 BUG_ON(in_interrupt());
127 return amdgpu_virt_kiq_wreg(adev, reg, v);
128 }
129
15d72fd7 130 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
131 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
132 else {
133 unsigned long flags;
134
135 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
136 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
137 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
138 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
139 }
140}
141
142u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
143{
144 if ((reg * 4) < adev->rio_mem_size)
145 return ioread32(adev->rio_mem + (reg * 4));
146 else {
147 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
148 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
149 }
150}
151
152void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
153{
154
155 if ((reg * 4) < adev->rio_mem_size)
156 iowrite32(v, adev->rio_mem + (reg * 4));
157 else {
158 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
159 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
160 }
161}
162
163/**
164 * amdgpu_mm_rdoorbell - read a doorbell dword
165 *
166 * @adev: amdgpu_device pointer
167 * @index: doorbell index
168 *
169 * Returns the value in the doorbell aperture at the
170 * requested doorbell index (CIK).
171 */
172u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
173{
174 if (index < adev->doorbell.num_doorbells) {
175 return readl(adev->doorbell.ptr + index);
176 } else {
177 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
178 return 0;
179 }
180}
181
182/**
183 * amdgpu_mm_wdoorbell - write a doorbell dword
184 *
185 * @adev: amdgpu_device pointer
186 * @index: doorbell index
187 * @v: value to write
188 *
189 * Writes @v to the doorbell aperture at the
190 * requested doorbell index (CIK).
191 */
192void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
193{
194 if (index < adev->doorbell.num_doorbells) {
195 writel(v, adev->doorbell.ptr + index);
196 } else {
197 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
198 }
199}
200
832be404
KW
201/**
202 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
203 *
204 * @adev: amdgpu_device pointer
205 * @index: doorbell index
206 *
207 * Returns the value in the doorbell aperture at the
208 * requested doorbell index (VEGA10+).
209 */
210u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
211{
212 if (index < adev->doorbell.num_doorbells) {
213 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
214 } else {
215 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
216 return 0;
217 }
218}
219
220/**
221 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
222 *
223 * @adev: amdgpu_device pointer
224 * @index: doorbell index
225 * @v: value to write
226 *
227 * Writes @v to the doorbell aperture at the
228 * requested doorbell index (VEGA10+).
229 */
230void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
231{
232 if (index < adev->doorbell.num_doorbells) {
233 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
234 } else {
235 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
236 }
237}
238
d38ceaf9
AD
239/**
240 * amdgpu_invalid_rreg - dummy reg read function
241 *
242 * @adev: amdgpu device pointer
243 * @reg: offset of register
244 *
245 * Dummy register read function. Used for register blocks
246 * that certain asics don't have (all asics).
247 * Returns the value in the register.
248 */
249static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
250{
251 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
252 BUG();
253 return 0;
254}
255
256/**
257 * amdgpu_invalid_wreg - dummy reg write function
258 *
259 * @adev: amdgpu device pointer
260 * @reg: offset of register
261 * @v: value to write to the register
262 *
263 * Dummy register read function. Used for register blocks
264 * that certain asics don't have (all asics).
265 */
266static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
267{
268 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
269 reg, v);
270 BUG();
271}
272
273/**
274 * amdgpu_block_invalid_rreg - dummy reg read function
275 *
276 * @adev: amdgpu device pointer
277 * @block: offset of instance
278 * @reg: offset of register
279 *
280 * Dummy register read function. Used for register blocks
281 * that certain asics don't have (all asics).
282 * Returns the value in the register.
283 */
284static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
285 uint32_t block, uint32_t reg)
286{
287 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
288 reg, block);
289 BUG();
290 return 0;
291}
292
293/**
294 * amdgpu_block_invalid_wreg - dummy reg write function
295 *
296 * @adev: amdgpu device pointer
297 * @block: offset of instance
298 * @reg: offset of register
299 * @v: value to write to the register
300 *
301 * Dummy register read function. Used for register blocks
302 * that certain asics don't have (all asics).
303 */
304static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
305 uint32_t block,
306 uint32_t reg, uint32_t v)
307{
308 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
309 reg, block, v);
310 BUG();
311}
312
313static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
314{
315 int r;
316
317 if (adev->vram_scratch.robj == NULL) {
318 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
857d913d 319 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
03f48dd5
CK
320 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
321 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
72d7668b 322 NULL, NULL, &adev->vram_scratch.robj);
d38ceaf9
AD
323 if (r) {
324 return r;
325 }
326 }
327
328 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
329 if (unlikely(r != 0))
330 return r;
331 r = amdgpu_bo_pin(adev->vram_scratch.robj,
332 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
333 if (r) {
334 amdgpu_bo_unreserve(adev->vram_scratch.robj);
335 return r;
336 }
337 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
338 (void **)&adev->vram_scratch.ptr);
339 if (r)
340 amdgpu_bo_unpin(adev->vram_scratch.robj);
341 amdgpu_bo_unreserve(adev->vram_scratch.robj);
342
343 return r;
344}
345
346static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
347{
348 int r;
349
350 if (adev->vram_scratch.robj == NULL) {
351 return;
352 }
353 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
354 if (likely(r == 0)) {
355 amdgpu_bo_kunmap(adev->vram_scratch.robj);
356 amdgpu_bo_unpin(adev->vram_scratch.robj);
357 amdgpu_bo_unreserve(adev->vram_scratch.robj);
358 }
359 amdgpu_bo_unref(&adev->vram_scratch.robj);
360}
361
362/**
363 * amdgpu_program_register_sequence - program an array of registers.
364 *
365 * @adev: amdgpu_device pointer
366 * @registers: pointer to the register array
367 * @array_size: size of the register array
368 *
369 * Programs an array or registers with and and or masks.
370 * This is a helper for setting golden registers.
371 */
372void amdgpu_program_register_sequence(struct amdgpu_device *adev,
373 const u32 *registers,
374 const u32 array_size)
375{
376 u32 tmp, reg, and_mask, or_mask;
377 int i;
378
379 if (array_size % 3)
380 return;
381
382 for (i = 0; i < array_size; i +=3) {
383 reg = registers[i + 0];
384 and_mask = registers[i + 1];
385 or_mask = registers[i + 2];
386
387 if (and_mask == 0xffffffff) {
388 tmp = or_mask;
389 } else {
390 tmp = RREG32(reg);
391 tmp &= ~and_mask;
392 tmp |= or_mask;
393 }
394 WREG32(reg, tmp);
395 }
396}
397
398void amdgpu_pci_config_reset(struct amdgpu_device *adev)
399{
400 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
401}
402
403/*
404 * GPU doorbell aperture helpers function.
405 */
406/**
407 * amdgpu_doorbell_init - Init doorbell driver information.
408 *
409 * @adev: amdgpu_device pointer
410 *
411 * Init doorbell driver information (CIK)
412 * Returns 0 on success, error on failure.
413 */
414static int amdgpu_doorbell_init(struct amdgpu_device *adev)
415{
416 /* doorbell bar mapping */
417 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
418 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
419
edf600da 420 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
421 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
422 if (adev->doorbell.num_doorbells == 0)
423 return -EINVAL;
424
425 adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
426 if (adev->doorbell.ptr == NULL) {
427 return -ENOMEM;
428 }
429 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
430 DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
431
432 return 0;
433}
434
435/**
436 * amdgpu_doorbell_fini - Tear down doorbell driver information.
437 *
438 * @adev: amdgpu_device pointer
439 *
440 * Tear down doorbell driver information (CIK)
441 */
442static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
443{
444 iounmap(adev->doorbell.ptr);
445 adev->doorbell.ptr = NULL;
446}
447
448/**
449 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
450 * setup amdkfd
451 *
452 * @adev: amdgpu_device pointer
453 * @aperture_base: output returning doorbell aperture base physical address
454 * @aperture_size: output returning doorbell aperture size in bytes
455 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
456 *
457 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
458 * takes doorbells required for its own rings and reports the setup to amdkfd.
459 * amdgpu reserved doorbells are at the start of the doorbell aperture.
460 */
461void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
462 phys_addr_t *aperture_base,
463 size_t *aperture_size,
464 size_t *start_offset)
465{
466 /*
467 * The first num_doorbells are used by amdgpu.
468 * amdkfd takes whatever's left in the aperture.
469 */
470 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
471 *aperture_base = adev->doorbell.base;
472 *aperture_size = adev->doorbell.size;
473 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
474 } else {
475 *aperture_base = 0;
476 *aperture_size = 0;
477 *start_offset = 0;
478 }
479}
480
481/*
482 * amdgpu_wb_*()
483 * Writeback is the the method by which the the GPU updates special pages
484 * in memory with the status of certain GPU events (fences, ring pointers,
485 * etc.).
486 */
487
488/**
489 * amdgpu_wb_fini - Disable Writeback and free memory
490 *
491 * @adev: amdgpu_device pointer
492 *
493 * Disables Writeback and frees the Writeback memory (all asics).
494 * Used at driver shutdown.
495 */
496static void amdgpu_wb_fini(struct amdgpu_device *adev)
497{
498 if (adev->wb.wb_obj) {
a76ed485
AD
499 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
500 &adev->wb.gpu_addr,
501 (void **)&adev->wb.wb);
d38ceaf9
AD
502 adev->wb.wb_obj = NULL;
503 }
504}
505
506/**
507 * amdgpu_wb_init- Init Writeback driver info and allocate memory
508 *
509 * @adev: amdgpu_device pointer
510 *
511 * Disables Writeback and frees the Writeback memory (all asics).
512 * Used at driver startup.
513 * Returns 0 on success or an -error on failure.
514 */
515static int amdgpu_wb_init(struct amdgpu_device *adev)
516{
517 int r;
518
519 if (adev->wb.wb_obj == NULL) {
60a970a6 520 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
a76ed485
AD
521 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
522 &adev->wb.wb_obj, &adev->wb.gpu_addr,
523 (void **)&adev->wb.wb);
d38ceaf9
AD
524 if (r) {
525 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
526 return r;
527 }
d38ceaf9
AD
528
529 adev->wb.num_wb = AMDGPU_MAX_WB;
530 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
531
532 /* clear wb memory */
60a970a6 533 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
d38ceaf9
AD
534 }
535
536 return 0;
537}
538
539/**
540 * amdgpu_wb_get - Allocate a wb entry
541 *
542 * @adev: amdgpu_device pointer
543 * @wb: wb index
544 *
545 * Allocate a wb slot for use by the driver (all asics).
546 * Returns 0 on success or -EINVAL on failure.
547 */
548int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
549{
550 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
551 if (offset < adev->wb.num_wb) {
552 __set_bit(offset, adev->wb.used);
553 *wb = offset;
554 return 0;
555 } else {
556 return -EINVAL;
557 }
558}
559
7014285a
KW
560/**
561 * amdgpu_wb_get_64bit - Allocate a wb entry
562 *
563 * @adev: amdgpu_device pointer
564 * @wb: wb index
565 *
566 * Allocate a wb slot for use by the driver (all asics).
567 * Returns 0 on success or -EINVAL on failure.
568 */
569int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
570{
571 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
572 adev->wb.num_wb, 0, 2, 7, 0);
573 if ((offset + 1) < adev->wb.num_wb) {
574 __set_bit(offset, adev->wb.used);
575 __set_bit(offset + 1, adev->wb.used);
576 *wb = offset;
577 return 0;
578 } else {
579 return -EINVAL;
580 }
581}
582
d38ceaf9
AD
583/**
584 * amdgpu_wb_free - Free a wb entry
585 *
586 * @adev: amdgpu_device pointer
587 * @wb: wb index
588 *
589 * Free a wb slot allocated for use by the driver (all asics)
590 */
591void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
592{
593 if (wb < adev->wb.num_wb)
594 __clear_bit(wb, adev->wb.used);
595}
596
7014285a
KW
597/**
598 * amdgpu_wb_free_64bit - Free a wb entry
599 *
600 * @adev: amdgpu_device pointer
601 * @wb: wb index
602 *
603 * Free a wb slot allocated for use by the driver (all asics)
604 */
605void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
606{
607 if ((wb + 1) < adev->wb.num_wb) {
608 __clear_bit(wb, adev->wb.used);
609 __clear_bit(wb + 1, adev->wb.used);
610 }
611}
612
d38ceaf9
AD
613/**
614 * amdgpu_vram_location - try to find VRAM location
615 * @adev: amdgpu device structure holding all necessary informations
616 * @mc: memory controller structure holding memory informations
617 * @base: base address at which to put VRAM
618 *
619 * Function will place try to place VRAM at base address provided
620 * as parameter (which is so far either PCI aperture address or
621 * for IGP TOM base address).
622 *
623 * If there is not enough space to fit the unvisible VRAM in the 32bits
624 * address space then we limit the VRAM size to the aperture.
625 *
626 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
627 * this shouldn't be a problem as we are using the PCI aperture as a reference.
628 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
629 * not IGP.
630 *
631 * Note: we use mc_vram_size as on some board we need to program the mc to
632 * cover the whole aperture even if VRAM size is inferior to aperture size
633 * Novell bug 204882 + along with lots of ubuntu ones
634 *
635 * Note: when limiting vram it's safe to overwritte real_vram_size because
636 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
637 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
638 * ones)
639 *
640 * Note: IGP TOM addr should be the same as the aperture addr, we don't
641 * explicitly check for that thought.
642 *
643 * FIXME: when reducing VRAM size align new size on power of 2.
644 */
645void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
646{
647 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
648
649 mc->vram_start = base;
650 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
651 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
652 mc->real_vram_size = mc->aper_size;
653 mc->mc_vram_size = mc->aper_size;
654 }
655 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
656 if (limit && limit < mc->real_vram_size)
657 mc->real_vram_size = limit;
658 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
659 mc->mc_vram_size >> 20, mc->vram_start,
660 mc->vram_end, mc->real_vram_size >> 20);
661}
662
663/**
664 * amdgpu_gtt_location - try to find GTT location
665 * @adev: amdgpu device structure holding all necessary informations
666 * @mc: memory controller structure holding memory informations
667 *
668 * Function will place try to place GTT before or after VRAM.
669 *
670 * If GTT size is bigger than space left then we ajust GTT size.
671 * Thus function will never fails.
672 *
673 * FIXME: when reducing GTT size align new size on power of 2.
674 */
675void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
676{
677 u64 size_af, size_bf;
678
679 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
680 size_bf = mc->vram_start & ~mc->gtt_base_align;
681 if (size_bf > size_af) {
682 if (mc->gtt_size > size_bf) {
683 dev_warn(adev->dev, "limiting GTT\n");
684 mc->gtt_size = size_bf;
685 }
9dc5a91e 686 mc->gtt_start = 0;
d38ceaf9
AD
687 } else {
688 if (mc->gtt_size > size_af) {
689 dev_warn(adev->dev, "limiting GTT\n");
690 mc->gtt_size = size_af;
691 }
692 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
693 }
694 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
695 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
696 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
697}
698
699/*
700 * GPU helpers function.
701 */
702/**
c836fec5 703 * amdgpu_need_post - check if the hw need post or not
d38ceaf9
AD
704 *
705 * @adev: amdgpu_device pointer
706 *
c836fec5
JQ
707 * Check if the asic has been initialized (all asics) at driver startup
708 * or post is needed if hw reset is performed.
709 * Returns true if need or false if not.
d38ceaf9 710 */
c836fec5 711bool amdgpu_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
712{
713 uint32_t reg;
714
c836fec5
JQ
715 if (adev->has_hw_reset) {
716 adev->has_hw_reset = false;
717 return true;
718 }
d38ceaf9 719 /* then check MEM_SIZE, in case the crtcs are off */
bbf282d8 720 reg = amdgpu_asic_get_config_memsize(adev);
d38ceaf9 721
f2713e8c 722 if ((reg != 0) && (reg != 0xffffffff))
c836fec5 723 return false;
d38ceaf9 724
c836fec5 725 return true;
d38ceaf9
AD
726
727}
728
bec86378
ML
729static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
730{
731 if (amdgpu_sriov_vf(adev))
732 return false;
733
734 if (amdgpu_passthrough(adev)) {
1da2c326
ML
735 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
736 * some old smc fw still need driver do vPost otherwise gpu hang, while
737 * those smc fw version above 22.15 doesn't have this flaw, so we force
738 * vpost executed for smc version below 22.15
bec86378
ML
739 */
740 if (adev->asic_type == CHIP_FIJI) {
741 int err;
742 uint32_t fw_ver;
743 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
744 /* force vPost if error occured */
745 if (err)
746 return true;
747
748 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
749 if (fw_ver < 0x00160e00)
750 return true;
bec86378 751 }
bec86378 752 }
c836fec5 753 return amdgpu_need_post(adev);
bec86378
ML
754}
755
d38ceaf9
AD
756/**
757 * amdgpu_dummy_page_init - init dummy page used by the driver
758 *
759 * @adev: amdgpu_device pointer
760 *
761 * Allocate the dummy page used by the driver (all asics).
762 * This dummy page is used by the driver as a filler for gart entries
763 * when pages are taken out of the GART
764 * Returns 0 on sucess, -ENOMEM on failure.
765 */
766int amdgpu_dummy_page_init(struct amdgpu_device *adev)
767{
768 if (adev->dummy_page.page)
769 return 0;
770 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
771 if (adev->dummy_page.page == NULL)
772 return -ENOMEM;
773 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
774 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
775 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
776 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
777 __free_page(adev->dummy_page.page);
778 adev->dummy_page.page = NULL;
779 return -ENOMEM;
780 }
781 return 0;
782}
783
784/**
785 * amdgpu_dummy_page_fini - free dummy page used by the driver
786 *
787 * @adev: amdgpu_device pointer
788 *
789 * Frees the dummy page used by the driver (all asics).
790 */
791void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
792{
793 if (adev->dummy_page.page == NULL)
794 return;
795 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
796 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
797 __free_page(adev->dummy_page.page);
798 adev->dummy_page.page = NULL;
799}
800
801
802/* ATOM accessor methods */
803/*
804 * ATOM is an interpreted byte code stored in tables in the vbios. The
805 * driver registers callbacks to access registers and the interpreter
806 * in the driver parses the tables and executes then to program specific
807 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
808 * atombios.h, and atom.c
809 */
810
811/**
812 * cail_pll_read - read PLL register
813 *
814 * @info: atom card_info pointer
815 * @reg: PLL register offset
816 *
817 * Provides a PLL register accessor for the atom interpreter (r4xx+).
818 * Returns the value of the PLL register.
819 */
820static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
821{
822 return 0;
823}
824
825/**
826 * cail_pll_write - write PLL register
827 *
828 * @info: atom card_info pointer
829 * @reg: PLL register offset
830 * @val: value to write to the pll register
831 *
832 * Provides a PLL register accessor for the atom interpreter (r4xx+).
833 */
834static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
835{
836
837}
838
839/**
840 * cail_mc_read - read MC (Memory Controller) register
841 *
842 * @info: atom card_info pointer
843 * @reg: MC register offset
844 *
845 * Provides an MC register accessor for the atom interpreter (r4xx+).
846 * Returns the value of the MC register.
847 */
848static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
849{
850 return 0;
851}
852
853/**
854 * cail_mc_write - write MC (Memory Controller) register
855 *
856 * @info: atom card_info pointer
857 * @reg: MC register offset
858 * @val: value to write to the pll register
859 *
860 * Provides a MC register accessor for the atom interpreter (r4xx+).
861 */
862static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
863{
864
865}
866
867/**
868 * cail_reg_write - write MMIO register
869 *
870 * @info: atom card_info pointer
871 * @reg: MMIO register offset
872 * @val: value to write to the pll register
873 *
874 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
875 */
876static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
877{
878 struct amdgpu_device *adev = info->dev->dev_private;
879
880 WREG32(reg, val);
881}
882
883/**
884 * cail_reg_read - read MMIO register
885 *
886 * @info: atom card_info pointer
887 * @reg: MMIO register offset
888 *
889 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
890 * Returns the value of the MMIO register.
891 */
892static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
893{
894 struct amdgpu_device *adev = info->dev->dev_private;
895 uint32_t r;
896
897 r = RREG32(reg);
898 return r;
899}
900
901/**
902 * cail_ioreg_write - write IO register
903 *
904 * @info: atom card_info pointer
905 * @reg: IO register offset
906 * @val: value to write to the pll register
907 *
908 * Provides a IO register accessor for the atom interpreter (r4xx+).
909 */
910static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
911{
912 struct amdgpu_device *adev = info->dev->dev_private;
913
914 WREG32_IO(reg, val);
915}
916
917/**
918 * cail_ioreg_read - read IO register
919 *
920 * @info: atom card_info pointer
921 * @reg: IO register offset
922 *
923 * Provides an IO register accessor for the atom interpreter (r4xx+).
924 * Returns the value of the IO register.
925 */
926static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
927{
928 struct amdgpu_device *adev = info->dev->dev_private;
929 uint32_t r;
930
931 r = RREG32_IO(reg);
932 return r;
933}
934
935/**
936 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
937 *
938 * @adev: amdgpu_device pointer
939 *
940 * Frees the driver info and register access callbacks for the ATOM
941 * interpreter (r4xx+).
942 * Called at driver shutdown.
943 */
944static void amdgpu_atombios_fini(struct amdgpu_device *adev)
945{
89e0ec9f 946 if (adev->mode_info.atom_context) {
d38ceaf9 947 kfree(adev->mode_info.atom_context->scratch);
89e0ec9f
ML
948 kfree(adev->mode_info.atom_context->iio);
949 }
d38ceaf9
AD
950 kfree(adev->mode_info.atom_context);
951 adev->mode_info.atom_context = NULL;
952 kfree(adev->mode_info.atom_card_info);
953 adev->mode_info.atom_card_info = NULL;
954}
955
956/**
957 * amdgpu_atombios_init - init the driver info and callbacks for atombios
958 *
959 * @adev: amdgpu_device pointer
960 *
961 * Initializes the driver info and register access callbacks for the
962 * ATOM interpreter (r4xx+).
963 * Returns 0 on sucess, -ENOMEM on failure.
964 * Called at driver startup.
965 */
966static int amdgpu_atombios_init(struct amdgpu_device *adev)
967{
968 struct card_info *atom_card_info =
969 kzalloc(sizeof(struct card_info), GFP_KERNEL);
970
971 if (!atom_card_info)
972 return -ENOMEM;
973
974 adev->mode_info.atom_card_info = atom_card_info;
975 atom_card_info->dev = adev->ddev;
976 atom_card_info->reg_read = cail_reg_read;
977 atom_card_info->reg_write = cail_reg_write;
978 /* needed for iio ops */
979 if (adev->rio_mem) {
980 atom_card_info->ioreg_read = cail_ioreg_read;
981 atom_card_info->ioreg_write = cail_ioreg_write;
982 } else {
b64a18c5 983 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
d38ceaf9
AD
984 atom_card_info->ioreg_read = cail_reg_read;
985 atom_card_info->ioreg_write = cail_reg_write;
986 }
987 atom_card_info->mc_read = cail_mc_read;
988 atom_card_info->mc_write = cail_mc_write;
989 atom_card_info->pll_read = cail_pll_read;
990 atom_card_info->pll_write = cail_pll_write;
991
992 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
993 if (!adev->mode_info.atom_context) {
994 amdgpu_atombios_fini(adev);
995 return -ENOMEM;
996 }
997
998 mutex_init(&adev->mode_info.atom_context->mutex);
a5bde2f9
AD
999 if (adev->is_atom_fw) {
1000 amdgpu_atomfirmware_scratch_regs_init(adev);
1001 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1002 } else {
1003 amdgpu_atombios_scratch_regs_init(adev);
1004 amdgpu_atombios_allocate_fb_scratch(adev);
1005 }
d38ceaf9
AD
1006 return 0;
1007}
1008
1009/* if we get transitioned to only one device, take VGA back */
1010/**
1011 * amdgpu_vga_set_decode - enable/disable vga decode
1012 *
1013 * @cookie: amdgpu_device pointer
1014 * @state: enable/disable vga decode
1015 *
1016 * Enable/disable vga decode (all asics).
1017 * Returns VGA resource flags.
1018 */
1019static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1020{
1021 struct amdgpu_device *adev = cookie;
1022 amdgpu_asic_set_vga_state(adev, state);
1023 if (state)
1024 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1025 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1026 else
1027 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1028}
1029
1030/**
1031 * amdgpu_check_pot_argument - check that argument is a power of two
1032 *
1033 * @arg: value to check
1034 *
1035 * Validates that a certain argument is a power of two (all asics).
1036 * Returns true if argument is valid.
1037 */
1038static bool amdgpu_check_pot_argument(int arg)
1039{
1040 return (arg & (arg - 1)) == 0;
1041}
1042
a1adf8be
CZ
1043static void amdgpu_get_block_size(struct amdgpu_device *adev)
1044{
f7effef8 1045 /* from AI, asic starts to support multiple level VMPT */
9ceaeeaf 1046 if (adev->asic_type >= CHIP_VEGA10) {
f7effef8 1047 if (amdgpu_vm_block_size != 9)
9ceaeeaf
FK
1048 dev_warn(adev->dev,
1049 "Multi-VMPT limits block size to one page!\n");
f7effef8
CZ
1050 amdgpu_vm_block_size = 9;
1051 return;
1052 }
a1adf8be
CZ
1053 /* defines number of bits in page table versus page directory,
1054 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1055 * page table and the remaining bits are in the page directory */
1056 if (amdgpu_vm_block_size == -1) {
1057
1058 /* Total bits covered by PD + PTs */
1059 unsigned bits = ilog2(amdgpu_vm_size) + 18;
1060
1061 /* Make sure the PD is 4K in size up to 8GB address space.
1062 Above that split equal between PD and PTs */
1063 if (amdgpu_vm_size <= 8)
1064 amdgpu_vm_block_size = bits - 9;
1065 else
1066 amdgpu_vm_block_size = (bits + 3) / 2;
1067
1068 } else if (amdgpu_vm_block_size < 9) {
1069 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1070 amdgpu_vm_block_size);
1071 amdgpu_vm_block_size = 9;
1072 }
1073
1074 if (amdgpu_vm_block_size > 24 ||
1075 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1076 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1077 amdgpu_vm_block_size);
1078 amdgpu_vm_block_size = 9;
1079 }
1080}
1081
d38ceaf9
AD
1082/**
1083 * amdgpu_check_arguments - validate module params
1084 *
1085 * @adev: amdgpu_device pointer
1086 *
1087 * Validates certain module parameters and updates
1088 * the associated values used by the driver (all asics).
1089 */
1090static void amdgpu_check_arguments(struct amdgpu_device *adev)
1091{
5b011235
CZ
1092 if (amdgpu_sched_jobs < 4) {
1093 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1094 amdgpu_sched_jobs);
1095 amdgpu_sched_jobs = 4;
1096 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
1097 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1098 amdgpu_sched_jobs);
1099 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1100 }
d38ceaf9
AD
1101
1102 if (amdgpu_gart_size != -1) {
c4e1a13a 1103 /* gtt size must be greater or equal to 32M */
d38ceaf9
AD
1104 if (amdgpu_gart_size < 32) {
1105 dev_warn(adev->dev, "gart size (%d) too small\n",
1106 amdgpu_gart_size);
1107 amdgpu_gart_size = -1;
d38ceaf9
AD
1108 }
1109 }
1110
1111 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
1112 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1113 amdgpu_vm_size);
8dacc127 1114 amdgpu_vm_size = 8;
d38ceaf9
AD
1115 }
1116
1117 if (amdgpu_vm_size < 1) {
1118 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1119 amdgpu_vm_size);
8dacc127 1120 amdgpu_vm_size = 8;
d38ceaf9
AD
1121 }
1122
1123 /*
1124 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1125 */
1126 if (amdgpu_vm_size > 1024) {
1127 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1128 amdgpu_vm_size);
8dacc127 1129 amdgpu_vm_size = 8;
d38ceaf9
AD
1130 }
1131
a1adf8be 1132 amdgpu_get_block_size(adev);
6a7f76e7 1133
526bae37 1134 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1135 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
6a7f76e7
CK
1136 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1137 amdgpu_vram_page_split);
1138 amdgpu_vram_page_split = 1024;
1139 }
d38ceaf9
AD
1140}
1141
1142/**
1143 * amdgpu_switcheroo_set_state - set switcheroo state
1144 *
1145 * @pdev: pci dev pointer
1694467b 1146 * @state: vga_switcheroo state
d38ceaf9
AD
1147 *
1148 * Callback for the switcheroo driver. Suspends or resumes the
1149 * the asics before or after it is powered up using ACPI methods.
1150 */
1151static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1152{
1153 struct drm_device *dev = pci_get_drvdata(pdev);
1154
1155 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1156 return;
1157
1158 if (state == VGA_SWITCHEROO_ON) {
1159 unsigned d3_delay = dev->pdev->d3_delay;
1160
7ca85295 1161 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1162 /* don't suspend or resume card normally */
1163 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1164
810ddc3a 1165 amdgpu_device_resume(dev, true, true);
d38ceaf9
AD
1166
1167 dev->pdev->d3_delay = d3_delay;
1168
1169 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1170 drm_kms_helper_poll_enable(dev);
1171 } else {
7ca85295 1172 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1173 drm_kms_helper_poll_disable(dev);
1174 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1175 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1176 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1177 }
1178}
1179
1180/**
1181 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1182 *
1183 * @pdev: pci dev pointer
1184 *
1185 * Callback for the switcheroo driver. Check of the switcheroo
1186 * state can be changed.
1187 * Returns true if the state can be changed, false if not.
1188 */
1189static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1190{
1191 struct drm_device *dev = pci_get_drvdata(pdev);
1192
1193 /*
1194 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1195 * locking inversion with the driver load path. And the access here is
1196 * completely racy anyway. So don't bother with locking for now.
1197 */
1198 return dev->open_count == 0;
1199}
1200
1201static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1202 .set_gpu_state = amdgpu_switcheroo_set_state,
1203 .reprobe = NULL,
1204 .can_switch = amdgpu_switcheroo_can_switch,
1205};
1206
1207int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
5fc3aeeb 1208 enum amd_ip_block_type block_type,
1209 enum amd_clockgating_state state)
d38ceaf9
AD
1210{
1211 int i, r = 0;
1212
1213 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1214 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1215 continue;
c722865a
RZ
1216 if (adev->ip_blocks[i].version->type != block_type)
1217 continue;
1218 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1219 continue;
1220 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1221 (void *)adev, state);
1222 if (r)
1223 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1224 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1225 }
1226 return r;
1227}
1228
1229int amdgpu_set_powergating_state(struct amdgpu_device *adev,
5fc3aeeb 1230 enum amd_ip_block_type block_type,
1231 enum amd_powergating_state state)
d38ceaf9
AD
1232{
1233 int i, r = 0;
1234
1235 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1236 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1237 continue;
c722865a
RZ
1238 if (adev->ip_blocks[i].version->type != block_type)
1239 continue;
1240 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1241 continue;
1242 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1243 (void *)adev, state);
1244 if (r)
1245 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1246 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1247 }
1248 return r;
1249}
1250
6cb2d4e4
HR
1251void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1252{
1253 int i;
1254
1255 for (i = 0; i < adev->num_ip_blocks; i++) {
1256 if (!adev->ip_blocks[i].status.valid)
1257 continue;
1258 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1259 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1260 }
1261}
1262
5dbbb60b
AD
1263int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1264 enum amd_ip_block_type block_type)
1265{
1266 int i, r;
1267
1268 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1269 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1270 continue;
a1255107
AD
1271 if (adev->ip_blocks[i].version->type == block_type) {
1272 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1273 if (r)
1274 return r;
1275 break;
1276 }
1277 }
1278 return 0;
1279
1280}
1281
1282bool amdgpu_is_idle(struct amdgpu_device *adev,
1283 enum amd_ip_block_type block_type)
1284{
1285 int i;
1286
1287 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1288 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1289 continue;
a1255107
AD
1290 if (adev->ip_blocks[i].version->type == block_type)
1291 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1292 }
1293 return true;
1294
1295}
1296
a1255107
AD
1297struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1298 enum amd_ip_block_type type)
d38ceaf9
AD
1299{
1300 int i;
1301
1302 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1303 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1304 return &adev->ip_blocks[i];
1305
1306 return NULL;
1307}
1308
1309/**
1310 * amdgpu_ip_block_version_cmp
1311 *
1312 * @adev: amdgpu_device pointer
5fc3aeeb 1313 * @type: enum amd_ip_block_type
d38ceaf9
AD
1314 * @major: major version
1315 * @minor: minor version
1316 *
1317 * return 0 if equal or greater
1318 * return 1 if smaller or the ip_block doesn't exist
1319 */
1320int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
5fc3aeeb 1321 enum amd_ip_block_type type,
d38ceaf9
AD
1322 u32 major, u32 minor)
1323{
a1255107 1324 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
d38ceaf9 1325
a1255107
AD
1326 if (ip_block && ((ip_block->version->major > major) ||
1327 ((ip_block->version->major == major) &&
1328 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1329 return 0;
1330
1331 return 1;
1332}
1333
a1255107
AD
1334/**
1335 * amdgpu_ip_block_add
1336 *
1337 * @adev: amdgpu_device pointer
1338 * @ip_block_version: pointer to the IP to add
1339 *
1340 * Adds the IP block driver information to the collection of IPs
1341 * on the asic.
1342 */
1343int amdgpu_ip_block_add(struct amdgpu_device *adev,
1344 const struct amdgpu_ip_block_version *ip_block_version)
1345{
1346 if (!ip_block_version)
1347 return -EINVAL;
1348
1349 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1350
1351 return 0;
1352}
1353
483ef985 1354static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1355{
1356 adev->enable_virtual_display = false;
1357
1358 if (amdgpu_virtual_display) {
1359 struct drm_device *ddev = adev->ddev;
1360 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1361 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1362
1363 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1364 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1365 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1366 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1367 if (!strcmp("all", pciaddname)
1368 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1369 long num_crtc;
1370 int res = -1;
1371
9accf2fd 1372 adev->enable_virtual_display = true;
0f66356d
ED
1373
1374 if (pciaddname_tmp)
1375 res = kstrtol(pciaddname_tmp, 10,
1376 &num_crtc);
1377
1378 if (!res) {
1379 if (num_crtc < 1)
1380 num_crtc = 1;
1381 if (num_crtc > 6)
1382 num_crtc = 6;
1383 adev->mode_info.num_crtc = num_crtc;
1384 } else {
1385 adev->mode_info.num_crtc = 1;
1386 }
9accf2fd
ED
1387 break;
1388 }
1389 }
1390
0f66356d
ED
1391 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1392 amdgpu_virtual_display, pci_address_name,
1393 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1394
1395 kfree(pciaddstr);
1396 }
1397}
1398
d38ceaf9
AD
1399static int amdgpu_early_init(struct amdgpu_device *adev)
1400{
aaa36a97 1401 int i, r;
d38ceaf9 1402
483ef985 1403 amdgpu_device_enable_virtual_display(adev);
a6be7570 1404
d38ceaf9 1405 switch (adev->asic_type) {
aaa36a97
AD
1406 case CHIP_TOPAZ:
1407 case CHIP_TONGA:
48299f95 1408 case CHIP_FIJI:
2cc0c0b5
FC
1409 case CHIP_POLARIS11:
1410 case CHIP_POLARIS10:
c4642a47 1411 case CHIP_POLARIS12:
aaa36a97 1412 case CHIP_CARRIZO:
39bb0c92
SL
1413 case CHIP_STONEY:
1414 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1415 adev->family = AMDGPU_FAMILY_CZ;
1416 else
1417 adev->family = AMDGPU_FAMILY_VI;
1418
1419 r = vi_set_ip_blocks(adev);
1420 if (r)
1421 return r;
1422 break;
33f34802
KW
1423#ifdef CONFIG_DRM_AMDGPU_SI
1424 case CHIP_VERDE:
1425 case CHIP_TAHITI:
1426 case CHIP_PITCAIRN:
1427 case CHIP_OLAND:
1428 case CHIP_HAINAN:
295d0daf 1429 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1430 r = si_set_ip_blocks(adev);
1431 if (r)
1432 return r;
1433 break;
1434#endif
a2e73f56
AD
1435#ifdef CONFIG_DRM_AMDGPU_CIK
1436 case CHIP_BONAIRE:
1437 case CHIP_HAWAII:
1438 case CHIP_KAVERI:
1439 case CHIP_KABINI:
1440 case CHIP_MULLINS:
1441 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1442 adev->family = AMDGPU_FAMILY_CI;
1443 else
1444 adev->family = AMDGPU_FAMILY_KV;
1445
1446 r = cik_set_ip_blocks(adev);
1447 if (r)
1448 return r;
1449 break;
1450#endif
460826e6
KW
1451 case CHIP_VEGA10:
1452 adev->family = AMDGPU_FAMILY_AI;
1453
1454 r = soc15_set_ip_blocks(adev);
1455 if (r)
1456 return r;
1457 break;
d38ceaf9
AD
1458 default:
1459 /* FIXME: not supported yet */
1460 return -EINVAL;
1461 }
1462
3149d9da
XY
1463 if (amdgpu_sriov_vf(adev)) {
1464 r = amdgpu_virt_request_full_gpu(adev, true);
1465 if (r)
1466 return r;
1467 }
1468
d38ceaf9
AD
1469 for (i = 0; i < adev->num_ip_blocks; i++) {
1470 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1471 DRM_ERROR("disabled ip block: %d\n", i);
a1255107 1472 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1473 } else {
a1255107
AD
1474 if (adev->ip_blocks[i].version->funcs->early_init) {
1475 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1476 if (r == -ENOENT) {
a1255107 1477 adev->ip_blocks[i].status.valid = false;
2c1a2784 1478 } else if (r) {
a1255107
AD
1479 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1480 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1481 return r;
2c1a2784 1482 } else {
a1255107 1483 adev->ip_blocks[i].status.valid = true;
2c1a2784 1484 }
974e6b64 1485 } else {
a1255107 1486 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1487 }
d38ceaf9
AD
1488 }
1489 }
1490
395d1fb9
NH
1491 adev->cg_flags &= amdgpu_cg_mask;
1492 adev->pg_flags &= amdgpu_pg_mask;
1493
d38ceaf9
AD
1494 return 0;
1495}
1496
1497static int amdgpu_init(struct amdgpu_device *adev)
1498{
1499 int i, r;
1500
1501 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1502 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1503 continue;
a1255107 1504 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1505 if (r) {
a1255107
AD
1506 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1507 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1508 return r;
2c1a2784 1509 }
a1255107 1510 adev->ip_blocks[i].status.sw = true;
d38ceaf9 1511 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1512 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9 1513 r = amdgpu_vram_scratch_init(adev);
2c1a2784
AD
1514 if (r) {
1515 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1516 return r;
2c1a2784 1517 }
a1255107 1518 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1519 if (r) {
1520 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1521 return r;
2c1a2784 1522 }
d38ceaf9 1523 r = amdgpu_wb_init(adev);
2c1a2784
AD
1524 if (r) {
1525 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
d38ceaf9 1526 return r;
2c1a2784 1527 }
a1255107 1528 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1529
1530 /* right after GMC hw init, we create CSA */
1531 if (amdgpu_sriov_vf(adev)) {
1532 r = amdgpu_allocate_static_csa(adev);
1533 if (r) {
1534 DRM_ERROR("allocate CSA failed %d\n", r);
1535 return r;
1536 }
1537 }
d38ceaf9
AD
1538 }
1539 }
1540
1541 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1542 if (!adev->ip_blocks[i].status.sw)
d38ceaf9
AD
1543 continue;
1544 /* gmc hw init is done early */
a1255107 1545 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
d38ceaf9 1546 continue;
a1255107 1547 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1548 if (r) {
a1255107
AD
1549 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1550 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1551 return r;
2c1a2784 1552 }
a1255107 1553 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1554 }
1555
1556 return 0;
1557}
1558
1559static int amdgpu_late_init(struct amdgpu_device *adev)
1560{
1561 int i = 0, r;
1562
1563 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1564 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1565 continue;
a1255107
AD
1566 if (adev->ip_blocks[i].version->funcs->late_init) {
1567 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2c1a2784 1568 if (r) {
a1255107
AD
1569 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1570 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1571 return r;
2c1a2784 1572 }
a1255107 1573 adev->ip_blocks[i].status.late_initialized = true;
d38ceaf9 1574 }
4a446d55 1575 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1576 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1577 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1578 /* enable clockgating to save power */
a1255107
AD
1579 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1580 AMD_CG_STATE_GATE);
4a446d55
AD
1581 if (r) {
1582 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1583 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1584 return r;
1585 }
b0b00ff1 1586 }
d38ceaf9
AD
1587 }
1588
d1aff8ec
TSD
1589 amdgpu_dpm_enable_uvd(adev, false);
1590 amdgpu_dpm_enable_vce(adev, false);
1591
d38ceaf9
AD
1592 return 0;
1593}
1594
1595static int amdgpu_fini(struct amdgpu_device *adev)
1596{
1597 int i, r;
1598
3e96dbfd
AD
1599 /* need to disable SMC first */
1600 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1601 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1602 continue;
a1255107 1603 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1604 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1605 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1606 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1607 if (r) {
1608 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1609 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1610 return r;
1611 }
a1255107 1612 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1613 /* XXX handle errors */
1614 if (r) {
1615 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1616 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1617 }
a1255107 1618 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1619 break;
1620 }
1621 }
1622
d38ceaf9 1623 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1624 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1625 continue;
a1255107 1626 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9
AD
1627 amdgpu_wb_fini(adev);
1628 amdgpu_vram_scratch_fini(adev);
1629 }
8201a67a
RZ
1630
1631 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1632 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1633 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1634 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1635 AMD_CG_STATE_UNGATE);
1636 if (r) {
1637 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1638 adev->ip_blocks[i].version->funcs->name, r);
1639 return r;
1640 }
2c1a2784 1641 }
8201a67a 1642
a1255107 1643 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1644 /* XXX handle errors */
2c1a2784 1645 if (r) {
a1255107
AD
1646 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1647 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1648 }
8201a67a 1649
a1255107 1650 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1651 }
1652
1653 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1654 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1655 continue;
a1255107 1656 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1657 /* XXX handle errors */
2c1a2784 1658 if (r) {
a1255107
AD
1659 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1660 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1661 }
a1255107
AD
1662 adev->ip_blocks[i].status.sw = false;
1663 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1664 }
1665
a6dcfd9c 1666 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1667 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1668 continue;
a1255107
AD
1669 if (adev->ip_blocks[i].version->funcs->late_fini)
1670 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1671 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1672 }
1673
3149d9da 1674 if (amdgpu_sriov_vf(adev)) {
2493664f 1675 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
3149d9da
XY
1676 amdgpu_virt_release_full_gpu(adev, false);
1677 }
2493664f 1678
d38ceaf9
AD
1679 return 0;
1680}
1681
faefba95 1682int amdgpu_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1683{
1684 int i, r;
1685
e941ea99
XY
1686 if (amdgpu_sriov_vf(adev))
1687 amdgpu_virt_request_full_gpu(adev, false);
1688
c5a93a28
FC
1689 /* ungate SMC block first */
1690 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1691 AMD_CG_STATE_UNGATE);
1692 if (r) {
1693 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1694 }
1695
d38ceaf9 1696 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1697 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1698 continue;
1699 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1700 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1701 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1702 AMD_CG_STATE_UNGATE);
c5a93a28 1703 if (r) {
a1255107
AD
1704 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1705 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1706 }
2c1a2784 1707 }
d38ceaf9 1708 /* XXX handle errors */
a1255107 1709 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1710 /* XXX handle errors */
2c1a2784 1711 if (r) {
a1255107
AD
1712 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1713 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1714 }
d38ceaf9
AD
1715 }
1716
e941ea99
XY
1717 if (amdgpu_sriov_vf(adev))
1718 amdgpu_virt_release_full_gpu(adev, false);
1719
d38ceaf9
AD
1720 return 0;
1721}
1722
e4f0fdcc 1723static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
a90ad3c2
ML
1724{
1725 int i, r;
1726
1727 for (i = 0; i < adev->num_ip_blocks; i++) {
1728 if (!adev->ip_blocks[i].status.valid)
1729 continue;
1730
1731 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1732 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1733 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
e4f0fdcc 1734 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
a90ad3c2
ML
1735
1736 if (r) {
1737 DRM_ERROR("resume of IP block <%s> failed %d\n",
1738 adev->ip_blocks[i].version->funcs->name, r);
1739 return r;
1740 }
1741 }
1742
1743 return 0;
1744}
1745
e4f0fdcc 1746static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
a90ad3c2
ML
1747{
1748 int i, r;
1749
1750 for (i = 0; i < adev->num_ip_blocks; i++) {
1751 if (!adev->ip_blocks[i].status.valid)
1752 continue;
1753
1754 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1755 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1756 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1757 continue;
1758
e4f0fdcc 1759 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
a90ad3c2
ML
1760 if (r) {
1761 DRM_ERROR("resume of IP block <%s> failed %d\n",
1762 adev->ip_blocks[i].version->funcs->name, r);
1763 return r;
1764 }
1765 }
1766
1767 return 0;
1768}
1769
d38ceaf9
AD
1770static int amdgpu_resume(struct amdgpu_device *adev)
1771{
1772 int i, r;
1773
1774 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1775 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1776 continue;
a1255107 1777 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 1778 if (r) {
a1255107
AD
1779 DRM_ERROR("resume of IP block <%s> failed %d\n",
1780 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1781 return r;
2c1a2784 1782 }
d38ceaf9
AD
1783 }
1784
1785 return 0;
1786}
1787
4e99a44e 1788static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 1789{
a5bde2f9
AD
1790 if (adev->is_atom_fw) {
1791 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1792 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1793 } else {
1794 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1795 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1796 }
048765ad
AR
1797}
1798
d38ceaf9
AD
1799/**
1800 * amdgpu_device_init - initialize the driver
1801 *
1802 * @adev: amdgpu_device pointer
1803 * @pdev: drm dev pointer
1804 * @pdev: pci dev pointer
1805 * @flags: driver flags
1806 *
1807 * Initializes the driver info and hw (all asics).
1808 * Returns 0 for success or an error on failure.
1809 * Called at driver startup.
1810 */
1811int amdgpu_device_init(struct amdgpu_device *adev,
1812 struct drm_device *ddev,
1813 struct pci_dev *pdev,
1814 uint32_t flags)
1815{
1816 int r, i;
1817 bool runtime = false;
95844d20 1818 u32 max_MBps;
d38ceaf9
AD
1819
1820 adev->shutdown = false;
1821 adev->dev = &pdev->dev;
1822 adev->ddev = ddev;
1823 adev->pdev = pdev;
1824 adev->flags = flags;
2f7d10b3 1825 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9
AD
1826 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1827 adev->mc.gtt_size = 512 * 1024 * 1024;
1828 adev->accel_working = false;
1829 adev->num_rings = 0;
1830 adev->mman.buffer_funcs = NULL;
1831 adev->mman.buffer_funcs_ring = NULL;
1832 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 1833 adev->vm_manager.vm_pte_num_rings = 0;
d38ceaf9 1834 adev->gart.gart_funcs = NULL;
f54d1867 1835 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
d38ceaf9
AD
1836
1837 adev->smc_rreg = &amdgpu_invalid_rreg;
1838 adev->smc_wreg = &amdgpu_invalid_wreg;
1839 adev->pcie_rreg = &amdgpu_invalid_rreg;
1840 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
1841 adev->pciep_rreg = &amdgpu_invalid_rreg;
1842 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1843 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1844 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1845 adev->didt_rreg = &amdgpu_invalid_rreg;
1846 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
1847 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1848 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1849 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1850 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1851
ccdbb20a 1852
3e39ab90
AD
1853 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1854 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1855 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
1856
1857 /* mutex initialization are all done here so we
1858 * can recall function without having locking issues */
8d0a7cea 1859 mutex_init(&adev->vm_manager.lock);
d38ceaf9 1860 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 1861 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
1862 mutex_init(&adev->pm.mutex);
1863 mutex_init(&adev->gfx.gpu_clock_mutex);
1864 mutex_init(&adev->srbm_mutex);
1865 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9
AD
1866 mutex_init(&adev->mn_lock);
1867 hash_init(adev->mn_hash);
1868
1869 amdgpu_check_arguments(adev);
1870
1871 /* Registers mapping */
1872 /* TODO: block userspace mapping of io register */
1873 spin_lock_init(&adev->mmio_idx_lock);
1874 spin_lock_init(&adev->smc_idx_lock);
1875 spin_lock_init(&adev->pcie_idx_lock);
1876 spin_lock_init(&adev->uvd_ctx_idx_lock);
1877 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 1878 spin_lock_init(&adev->gc_cac_idx_lock);
d38ceaf9 1879 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 1880 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 1881
0c4e7fa5
CZ
1882 INIT_LIST_HEAD(&adev->shadow_list);
1883 mutex_init(&adev->shadow_list_lock);
1884
5c1354bd
CZ
1885 INIT_LIST_HEAD(&adev->gtt_list);
1886 spin_lock_init(&adev->gtt_list_lock);
1887
da69c161
KW
1888 if (adev->asic_type >= CHIP_BONAIRE) {
1889 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1890 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1891 } else {
1892 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1893 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1894 }
d38ceaf9 1895
d38ceaf9
AD
1896 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1897 if (adev->rmmio == NULL) {
1898 return -ENOMEM;
1899 }
1900 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1901 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1902
da69c161
KW
1903 if (adev->asic_type >= CHIP_BONAIRE)
1904 /* doorbell bar mapping */
1905 amdgpu_doorbell_init(adev);
d38ceaf9
AD
1906
1907 /* io port mapping */
1908 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1909 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1910 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1911 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1912 break;
1913 }
1914 }
1915 if (adev->rio_mem == NULL)
b64a18c5 1916 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
1917
1918 /* early init functions */
1919 r = amdgpu_early_init(adev);
1920 if (r)
1921 return r;
1922
1923 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1924 /* this will fail for cards that aren't VGA class devices, just
1925 * ignore it */
1926 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1927
1928 if (amdgpu_runtime_pm == 1)
1929 runtime = true;
e9bef455 1930 if (amdgpu_device_is_px(ddev))
d38ceaf9 1931 runtime = true;
84c8b22e
LW
1932 if (!pci_is_thunderbolt_attached(adev->pdev))
1933 vga_switcheroo_register_client(adev->pdev,
1934 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
1935 if (runtime)
1936 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1937
1938 /* Read BIOS */
83ba126a
AD
1939 if (!amdgpu_get_bios(adev)) {
1940 r = -EINVAL;
1941 goto failed;
1942 }
f7e9e9fe 1943
d38ceaf9 1944 r = amdgpu_atombios_init(adev);
2c1a2784
AD
1945 if (r) {
1946 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
83ba126a 1947 goto failed;
2c1a2784 1948 }
d38ceaf9 1949
4e99a44e
ML
1950 /* detect if we are with an SRIOV vbios */
1951 amdgpu_device_detect_sriov_bios(adev);
048765ad 1952
d38ceaf9 1953 /* Post card if necessary */
bec86378 1954 if (amdgpu_vpost_needed(adev)) {
d38ceaf9 1955 if (!adev->bios) {
bec86378 1956 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
1957 r = -EINVAL;
1958 goto failed;
d38ceaf9 1959 }
bec86378 1960 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
1961 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1962 if (r) {
1963 dev_err(adev->dev, "gpu post error!\n");
1964 goto failed;
1965 }
1966 } else {
1967 DRM_INFO("GPU post is not needed\n");
d38ceaf9
AD
1968 }
1969
a5bde2f9
AD
1970 if (!adev->is_atom_fw) {
1971 /* Initialize clocks */
1972 r = amdgpu_atombios_get_clock_info(adev);
1973 if (r) {
1974 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
1975 return r;
1976 }
1977 /* init i2c buses */
1978 amdgpu_atombios_i2c_init(adev);
2c1a2784 1979 }
d38ceaf9
AD
1980
1981 /* Fence driver */
1982 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
1983 if (r) {
1984 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
83ba126a 1985 goto failed;
2c1a2784 1986 }
d38ceaf9
AD
1987
1988 /* init the mode config */
1989 drm_mode_config_init(adev->ddev);
1990
1991 r = amdgpu_init(adev);
1992 if (r) {
2c1a2784 1993 dev_err(adev->dev, "amdgpu_init failed\n");
d38ceaf9 1994 amdgpu_fini(adev);
83ba126a 1995 goto failed;
d38ceaf9
AD
1996 }
1997
1998 adev->accel_working = true;
1999
95844d20
MO
2000 /* Initialize the buffer migration limit. */
2001 if (amdgpu_moverate >= 0)
2002 max_MBps = amdgpu_moverate;
2003 else
2004 max_MBps = 8; /* Allow 8 MB/s. */
2005 /* Get a log2 for easy divisions. */
2006 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2007
d38ceaf9
AD
2008 r = amdgpu_ib_pool_init(adev);
2009 if (r) {
2010 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
83ba126a 2011 goto failed;
d38ceaf9
AD
2012 }
2013
2014 r = amdgpu_ib_ring_tests(adev);
2015 if (r)
2016 DRM_ERROR("ib ring test failed (%d).\n", r);
2017
9bc92b9c
ML
2018 amdgpu_fbdev_init(adev);
2019
d38ceaf9 2020 r = amdgpu_gem_debugfs_init(adev);
3f14e623 2021 if (r)
d38ceaf9 2022 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2023
2024 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2025 if (r)
d38ceaf9 2026 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2027
50ab2533 2028 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2029 if (r)
50ab2533 2030 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2031
d38ceaf9
AD
2032 if ((amdgpu_testing & 1)) {
2033 if (adev->accel_working)
2034 amdgpu_test_moves(adev);
2035 else
2036 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2037 }
d38ceaf9
AD
2038 if (amdgpu_benchmarking) {
2039 if (adev->accel_working)
2040 amdgpu_benchmark(adev, amdgpu_benchmarking);
2041 else
2042 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2043 }
2044
2045 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2046 * explicit gating rather than handling it automatically.
2047 */
2048 r = amdgpu_late_init(adev);
2c1a2784
AD
2049 if (r) {
2050 dev_err(adev->dev, "amdgpu_late_init failed\n");
83ba126a 2051 goto failed;
2c1a2784 2052 }
d38ceaf9
AD
2053
2054 return 0;
83ba126a
AD
2055
2056failed:
2057 if (runtime)
2058 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2059 return r;
d38ceaf9
AD
2060}
2061
d38ceaf9
AD
2062/**
2063 * amdgpu_device_fini - tear down the driver
2064 *
2065 * @adev: amdgpu_device pointer
2066 *
2067 * Tear down the driver info (all asics).
2068 * Called at driver shutdown.
2069 */
2070void amdgpu_device_fini(struct amdgpu_device *adev)
2071{
2072 int r;
2073
2074 DRM_INFO("amdgpu: finishing device.\n");
2075 adev->shutdown = true;
a951ed85 2076 drm_crtc_force_disable_all(adev->ddev);
d38ceaf9
AD
2077 /* evict vram memory */
2078 amdgpu_bo_evict_vram(adev);
2079 amdgpu_ib_pool_fini(adev);
2080 amdgpu_fence_driver_fini(adev);
2081 amdgpu_fbdev_fini(adev);
2082 r = amdgpu_fini(adev);
d38ceaf9
AD
2083 adev->accel_working = false;
2084 /* free i2c buses */
2085 amdgpu_i2c_fini(adev);
2086 amdgpu_atombios_fini(adev);
2087 kfree(adev->bios);
2088 adev->bios = NULL;
84c8b22e
LW
2089 if (!pci_is_thunderbolt_attached(adev->pdev))
2090 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2091 if (adev->flags & AMD_IS_PX)
2092 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2093 vga_client_register(adev->pdev, NULL, NULL, NULL);
2094 if (adev->rio_mem)
2095 pci_iounmap(adev->pdev, adev->rio_mem);
2096 adev->rio_mem = NULL;
2097 iounmap(adev->rmmio);
2098 adev->rmmio = NULL;
da69c161
KW
2099 if (adev->asic_type >= CHIP_BONAIRE)
2100 amdgpu_doorbell_fini(adev);
d38ceaf9 2101 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2102}
2103
2104
2105/*
2106 * Suspend & resume.
2107 */
2108/**
810ddc3a 2109 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2110 *
2111 * @pdev: drm dev pointer
2112 * @state: suspend state
2113 *
2114 * Puts the hw in the suspend state (all asics).
2115 * Returns 0 for success or an error on failure.
2116 * Called at driver suspend.
2117 */
810ddc3a 2118int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2119{
2120 struct amdgpu_device *adev;
2121 struct drm_crtc *crtc;
2122 struct drm_connector *connector;
5ceb54c6 2123 int r;
d38ceaf9
AD
2124
2125 if (dev == NULL || dev->dev_private == NULL) {
2126 return -ENODEV;
2127 }
2128
2129 adev = dev->dev_private;
2130
2131 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2132 return 0;
2133
2134 drm_kms_helper_poll_disable(dev);
2135
2136 /* turn off display hw */
4c7fbc39 2137 drm_modeset_lock_all(dev);
d38ceaf9
AD
2138 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2139 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2140 }
4c7fbc39 2141 drm_modeset_unlock_all(dev);
d38ceaf9 2142
756e6880 2143 /* unpin the front buffers and cursors */
d38ceaf9 2144 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2145 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2146 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2147 struct amdgpu_bo *robj;
2148
756e6880
AD
2149 if (amdgpu_crtc->cursor_bo) {
2150 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2151 r = amdgpu_bo_reserve(aobj, false);
2152 if (r == 0) {
2153 amdgpu_bo_unpin(aobj);
2154 amdgpu_bo_unreserve(aobj);
2155 }
2156 }
2157
d38ceaf9
AD
2158 if (rfb == NULL || rfb->obj == NULL) {
2159 continue;
2160 }
2161 robj = gem_to_amdgpu_bo(rfb->obj);
2162 /* don't unpin kernel fb objects */
2163 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2164 r = amdgpu_bo_reserve(robj, false);
2165 if (r == 0) {
2166 amdgpu_bo_unpin(robj);
2167 amdgpu_bo_unreserve(robj);
2168 }
2169 }
2170 }
2171 /* evict vram memory */
2172 amdgpu_bo_evict_vram(adev);
2173
5ceb54c6 2174 amdgpu_fence_driver_suspend(adev);
d38ceaf9
AD
2175
2176 r = amdgpu_suspend(adev);
2177
a0a71e49
AD
2178 /* evict remaining vram memory
2179 * This second call to evict vram is to evict the gart page table
2180 * using the CPU.
2181 */
d38ceaf9
AD
2182 amdgpu_bo_evict_vram(adev);
2183
be34d3bf
AD
2184 if (adev->is_atom_fw)
2185 amdgpu_atomfirmware_scratch_regs_save(adev);
2186 else
2187 amdgpu_atombios_scratch_regs_save(adev);
d38ceaf9
AD
2188 pci_save_state(dev->pdev);
2189 if (suspend) {
2190 /* Shut down the device */
2191 pci_disable_device(dev->pdev);
2192 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2193 } else {
2194 r = amdgpu_asic_reset(adev);
2195 if (r)
2196 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2197 }
2198
2199 if (fbcon) {
2200 console_lock();
2201 amdgpu_fbdev_set_suspend(adev, 1);
2202 console_unlock();
2203 }
2204 return 0;
2205}
2206
2207/**
810ddc3a 2208 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2209 *
2210 * @pdev: drm dev pointer
2211 *
2212 * Bring the hw back to operating state (all asics).
2213 * Returns 0 for success or an error on failure.
2214 * Called at driver resume.
2215 */
810ddc3a 2216int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2217{
2218 struct drm_connector *connector;
2219 struct amdgpu_device *adev = dev->dev_private;
756e6880 2220 struct drm_crtc *crtc;
d38ceaf9
AD
2221 int r;
2222
2223 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2224 return 0;
2225
74b0b157 2226 if (fbcon)
d38ceaf9 2227 console_lock();
74b0b157 2228
d38ceaf9
AD
2229 if (resume) {
2230 pci_set_power_state(dev->pdev, PCI_D0);
2231 pci_restore_state(dev->pdev);
74b0b157 2232 r = pci_enable_device(dev->pdev);
2233 if (r) {
d38ceaf9
AD
2234 if (fbcon)
2235 console_unlock();
74b0b157 2236 return r;
d38ceaf9
AD
2237 }
2238 }
be34d3bf
AD
2239 if (adev->is_atom_fw)
2240 amdgpu_atomfirmware_scratch_regs_restore(adev);
2241 else
2242 amdgpu_atombios_scratch_regs_restore(adev);
d38ceaf9
AD
2243
2244 /* post card */
c836fec5 2245 if (amdgpu_need_post(adev)) {
74b0b157 2246 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2247 if (r)
2248 DRM_ERROR("amdgpu asic init failed\n");
2249 }
d38ceaf9
AD
2250
2251 r = amdgpu_resume(adev);
ca198528
FC
2252 if (r)
2253 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
d38ceaf9 2254
5ceb54c6
AD
2255 amdgpu_fence_driver_resume(adev);
2256
ca198528
FC
2257 if (resume) {
2258 r = amdgpu_ib_ring_tests(adev);
2259 if (r)
2260 DRM_ERROR("ib ring test failed (%d).\n", r);
2261 }
d38ceaf9
AD
2262
2263 r = amdgpu_late_init(adev);
c085bd51
JQ
2264 if (r) {
2265 if (fbcon)
2266 console_unlock();
d38ceaf9 2267 return r;
c085bd51 2268 }
d38ceaf9 2269
756e6880
AD
2270 /* pin cursors */
2271 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2272 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2273
2274 if (amdgpu_crtc->cursor_bo) {
2275 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2276 r = amdgpu_bo_reserve(aobj, false);
2277 if (r == 0) {
2278 r = amdgpu_bo_pin(aobj,
2279 AMDGPU_GEM_DOMAIN_VRAM,
2280 &amdgpu_crtc->cursor_addr);
2281 if (r != 0)
2282 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2283 amdgpu_bo_unreserve(aobj);
2284 }
2285 }
2286 }
2287
d38ceaf9
AD
2288 /* blat the mode back in */
2289 if (fbcon) {
2290 drm_helper_resume_force_mode(dev);
2291 /* turn on display hw */
4c7fbc39 2292 drm_modeset_lock_all(dev);
d38ceaf9
AD
2293 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2294 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2295 }
4c7fbc39 2296 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2297 }
2298
2299 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2300
2301 /*
2302 * Most of the connector probing functions try to acquire runtime pm
2303 * refs to ensure that the GPU is powered on when connector polling is
2304 * performed. Since we're calling this from a runtime PM callback,
2305 * trying to acquire rpm refs will cause us to deadlock.
2306 *
2307 * Since we're guaranteed to be holding the rpm lock, it's safe to
2308 * temporarily disable the rpm helpers so this doesn't deadlock us.
2309 */
2310#ifdef CONFIG_PM
2311 dev->dev->power.disable_depth++;
2312#endif
54fb2a5c 2313 drm_helper_hpd_irq_event(dev);
23a1a9e5
L
2314#ifdef CONFIG_PM
2315 dev->dev->power.disable_depth--;
2316#endif
d38ceaf9
AD
2317
2318 if (fbcon) {
2319 amdgpu_fbdev_set_suspend(adev, 0);
2320 console_unlock();
2321 }
2322
2323 return 0;
2324}
2325
63fbf42f
CZ
2326static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2327{
2328 int i;
2329 bool asic_hang = false;
2330
2331 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2332 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2333 continue;
a1255107
AD
2334 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2335 adev->ip_blocks[i].status.hang =
2336 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2337 if (adev->ip_blocks[i].status.hang) {
2338 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2339 asic_hang = true;
2340 }
2341 }
2342 return asic_hang;
2343}
2344
4d446656 2345static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2346{
2347 int i, r = 0;
2348
2349 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2350 if (!adev->ip_blocks[i].status.valid)
d31a501e 2351 continue;
a1255107
AD
2352 if (adev->ip_blocks[i].status.hang &&
2353 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2354 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2355 if (r)
2356 return r;
2357 }
2358 }
2359
2360 return 0;
2361}
2362
35d782fe
CZ
2363static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2364{
da146d3b
AD
2365 int i;
2366
2367 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2368 if (!adev->ip_blocks[i].status.valid)
da146d3b 2369 continue;
a1255107
AD
2370 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2371 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2372 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2373 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2374 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2375 DRM_INFO("Some block need full reset!\n");
2376 return true;
2377 }
2378 }
35d782fe
CZ
2379 }
2380 return false;
2381}
2382
2383static int amdgpu_soft_reset(struct amdgpu_device *adev)
2384{
2385 int i, r = 0;
2386
2387 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2388 if (!adev->ip_blocks[i].status.valid)
35d782fe 2389 continue;
a1255107
AD
2390 if (adev->ip_blocks[i].status.hang &&
2391 adev->ip_blocks[i].version->funcs->soft_reset) {
2392 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2393 if (r)
2394 return r;
2395 }
2396 }
2397
2398 return 0;
2399}
2400
2401static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2402{
2403 int i, r = 0;
2404
2405 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2406 if (!adev->ip_blocks[i].status.valid)
35d782fe 2407 continue;
a1255107
AD
2408 if (adev->ip_blocks[i].status.hang &&
2409 adev->ip_blocks[i].version->funcs->post_soft_reset)
2410 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2411 if (r)
2412 return r;
2413 }
2414
2415 return 0;
2416}
2417
3ad81f16
CZ
2418bool amdgpu_need_backup(struct amdgpu_device *adev)
2419{
2420 if (adev->flags & AMD_IS_APU)
2421 return false;
2422
2423 return amdgpu_lockup_timeout > 0 ? true : false;
2424}
2425
53cdccd5
CZ
2426static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2427 struct amdgpu_ring *ring,
2428 struct amdgpu_bo *bo,
f54d1867 2429 struct dma_fence **fence)
53cdccd5
CZ
2430{
2431 uint32_t domain;
2432 int r;
2433
2434 if (!bo->shadow)
2435 return 0;
2436
2437 r = amdgpu_bo_reserve(bo, false);
2438 if (r)
2439 return r;
2440 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2441 /* if bo has been evicted, then no need to recover */
2442 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2443 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2444 NULL, fence, true);
2445 if (r) {
2446 DRM_ERROR("recover page table failed!\n");
2447 goto err;
2448 }
2449 }
2450err:
2451 amdgpu_bo_unreserve(bo);
2452 return r;
2453}
2454
a90ad3c2
ML
2455/**
2456 * amdgpu_sriov_gpu_reset - reset the asic
2457 *
2458 * @adev: amdgpu device pointer
2459 * @voluntary: if this reset is requested by guest.
2460 * (true means by guest and false means by HYPERVISOR )
2461 *
2462 * Attempt the reset the GPU if it has hung (all asics).
2463 * for SRIOV case.
2464 * Returns 0 for success or an error on failure.
2465 */
2466int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
2467{
2468 int i, r = 0;
2469 int resched;
2470 struct amdgpu_bo *bo, *tmp;
2471 struct amdgpu_ring *ring;
2472 struct dma_fence *fence = NULL, *next = NULL;
2473
147b5983 2474 mutex_lock(&adev->virt.lock_reset);
a90ad3c2 2475 atomic_inc(&adev->gpu_reset_counter);
1fb37a3d 2476 adev->gfx.in_reset = true;
a90ad3c2
ML
2477
2478 /* block TTM */
2479 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2480
2481 /* block scheduler */
2482 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2483 ring = adev->rings[i];
2484
2485 if (!ring || !ring->sched.thread)
2486 continue;
2487
2488 kthread_park(ring->sched.thread);
2489 amd_sched_hw_job_reset(&ring->sched);
2490 }
2491
2492 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2493 amdgpu_fence_driver_force_completion(adev);
2494
2495 /* request to take full control of GPU before re-initialization */
2496 if (voluntary)
2497 amdgpu_virt_reset_gpu(adev);
2498 else
2499 amdgpu_virt_request_full_gpu(adev, true);
2500
2501
2502 /* Resume IP prior to SMC */
e4f0fdcc 2503 amdgpu_sriov_reinit_early(adev);
a90ad3c2
ML
2504
2505 /* we need recover gart prior to run SMC/CP/SDMA resume */
2506 amdgpu_ttm_recover_gart(adev);
2507
2508 /* now we are okay to resume SMC/CP/SDMA */
e4f0fdcc 2509 amdgpu_sriov_reinit_late(adev);
a90ad3c2
ML
2510
2511 amdgpu_irq_gpu_reset_resume_helper(adev);
2512
2513 if (amdgpu_ib_ring_tests(adev))
2514 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2515
2516 /* release full control of GPU after ib test */
2517 amdgpu_virt_release_full_gpu(adev, true);
2518
2519 DRM_INFO("recover vram bo from shadow\n");
2520
2521 ring = adev->mman.buffer_funcs_ring;
2522 mutex_lock(&adev->shadow_list_lock);
2523 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2524 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2525 if (fence) {
2526 r = dma_fence_wait(fence, false);
2527 if (r) {
2528 WARN(r, "recovery from shadow isn't completed\n");
2529 break;
2530 }
2531 }
2532
2533 dma_fence_put(fence);
2534 fence = next;
2535 }
2536 mutex_unlock(&adev->shadow_list_lock);
2537
2538 if (fence) {
2539 r = dma_fence_wait(fence, false);
2540 if (r)
2541 WARN(r, "recovery from shadow isn't completed\n");
2542 }
2543 dma_fence_put(fence);
2544
2545 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2546 struct amdgpu_ring *ring = adev->rings[i];
2547 if (!ring || !ring->sched.thread)
2548 continue;
2549
2550 amd_sched_job_recovery(&ring->sched);
2551 kthread_unpark(ring->sched.thread);
2552 }
2553
2554 drm_helper_resume_force_mode(adev->ddev);
2555 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2556 if (r) {
2557 /* bad news, how to tell it to userspace ? */
2558 dev_info(adev->dev, "GPU reset failed\n");
2559 }
2560
1fb37a3d 2561 adev->gfx.in_reset = false;
147b5983 2562 mutex_unlock(&adev->virt.lock_reset);
a90ad3c2
ML
2563 return r;
2564}
2565
d38ceaf9
AD
2566/**
2567 * amdgpu_gpu_reset - reset the asic
2568 *
2569 * @adev: amdgpu device pointer
2570 *
2571 * Attempt the reset the GPU if it has hung (all asics).
2572 * Returns 0 for success or an error on failure.
2573 */
2574int amdgpu_gpu_reset(struct amdgpu_device *adev)
2575{
d38ceaf9
AD
2576 int i, r;
2577 int resched;
35d782fe 2578 bool need_full_reset;
d38ceaf9 2579
fb140b29 2580 if (amdgpu_sriov_vf(adev))
a90ad3c2 2581 return amdgpu_sriov_gpu_reset(adev, true);
fb140b29 2582
63fbf42f
CZ
2583 if (!amdgpu_check_soft_reset(adev)) {
2584 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2585 return 0;
2586 }
d38ceaf9 2587
d94aed5a 2588 atomic_inc(&adev->gpu_reset_counter);
d38ceaf9 2589
a3c47d6b
CZ
2590 /* block TTM */
2591 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2592
0875dc9e
CZ
2593 /* block scheduler */
2594 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2595 struct amdgpu_ring *ring = adev->rings[i];
2596
2597 if (!ring)
2598 continue;
2599 kthread_park(ring->sched.thread);
aa1c8900 2600 amd_sched_hw_job_reset(&ring->sched);
0875dc9e 2601 }
2200edac
CZ
2602 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2603 amdgpu_fence_driver_force_completion(adev);
d38ceaf9 2604
35d782fe 2605 need_full_reset = amdgpu_need_full_reset(adev);
d38ceaf9 2606
35d782fe
CZ
2607 if (!need_full_reset) {
2608 amdgpu_pre_soft_reset(adev);
2609 r = amdgpu_soft_reset(adev);
2610 amdgpu_post_soft_reset(adev);
2611 if (r || amdgpu_check_soft_reset(adev)) {
2612 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2613 need_full_reset = true;
2614 }
f1aa7e08
CZ
2615 }
2616
35d782fe 2617 if (need_full_reset) {
35d782fe 2618 r = amdgpu_suspend(adev);
bfa99269 2619
35d782fe
CZ
2620retry:
2621 /* Disable fb access */
2622 if (adev->mode_info.num_crtc) {
2623 struct amdgpu_mode_mc_save save;
2624 amdgpu_display_stop_mc_access(adev, &save);
2625 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2626 }
be34d3bf
AD
2627 if (adev->is_atom_fw)
2628 amdgpu_atomfirmware_scratch_regs_save(adev);
2629 else
2630 amdgpu_atombios_scratch_regs_save(adev);
35d782fe 2631 r = amdgpu_asic_reset(adev);
be34d3bf
AD
2632 if (adev->is_atom_fw)
2633 amdgpu_atomfirmware_scratch_regs_restore(adev);
2634 else
2635 amdgpu_atombios_scratch_regs_restore(adev);
35d782fe
CZ
2636 /* post card */
2637 amdgpu_atom_asic_init(adev->mode_info.atom_context);
2638
2639 if (!r) {
2640 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2641 r = amdgpu_resume(adev);
2642 }
d38ceaf9 2643 }
d38ceaf9 2644 if (!r) {
e72cfd58 2645 amdgpu_irq_gpu_reset_resume_helper(adev);
2c0d7318
CZ
2646 if (need_full_reset && amdgpu_need_backup(adev)) {
2647 r = amdgpu_ttm_recover_gart(adev);
2648 if (r)
2649 DRM_ERROR("gart recovery failed!!!\n");
2650 }
1f465087
CZ
2651 r = amdgpu_ib_ring_tests(adev);
2652 if (r) {
2653 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
40019dc4 2654 r = amdgpu_suspend(adev);
53cdccd5 2655 need_full_reset = true;
40019dc4 2656 goto retry;
1f465087 2657 }
53cdccd5
CZ
2658 /**
2659 * recovery vm page tables, since we cannot depend on VRAM is
2660 * consistent after gpu full reset.
2661 */
2662 if (need_full_reset && amdgpu_need_backup(adev)) {
2663 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2664 struct amdgpu_bo *bo, *tmp;
f54d1867 2665 struct dma_fence *fence = NULL, *next = NULL;
53cdccd5
CZ
2666
2667 DRM_INFO("recover vram bo from shadow\n");
2668 mutex_lock(&adev->shadow_list_lock);
2669 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2670 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2671 if (fence) {
f54d1867 2672 r = dma_fence_wait(fence, false);
53cdccd5 2673 if (r) {
1d7b17b0 2674 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5
CZ
2675 break;
2676 }
2677 }
1f465087 2678
f54d1867 2679 dma_fence_put(fence);
53cdccd5
CZ
2680 fence = next;
2681 }
2682 mutex_unlock(&adev->shadow_list_lock);
2683 if (fence) {
f54d1867 2684 r = dma_fence_wait(fence, false);
53cdccd5 2685 if (r)
1d7b17b0 2686 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5 2687 }
f54d1867 2688 dma_fence_put(fence);
53cdccd5 2689 }
d38ceaf9
AD
2690 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2691 struct amdgpu_ring *ring = adev->rings[i];
2692 if (!ring)
2693 continue;
53cdccd5 2694
aa1c8900 2695 amd_sched_job_recovery(&ring->sched);
0875dc9e 2696 kthread_unpark(ring->sched.thread);
d38ceaf9 2697 }
d38ceaf9 2698 } else {
2200edac 2699 dev_err(adev->dev, "asic resume failed (%d).\n", r);
d38ceaf9 2700 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
0875dc9e
CZ
2701 if (adev->rings[i]) {
2702 kthread_unpark(adev->rings[i]->sched.thread);
0875dc9e 2703 }
d38ceaf9
AD
2704 }
2705 }
2706
2707 drm_helper_resume_force_mode(adev->ddev);
2708
2709 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2710 if (r) {
2711 /* bad news, how to tell it to userspace ? */
2712 dev_info(adev->dev, "GPU reset failed\n");
2713 }
2714
d38ceaf9
AD
2715 return r;
2716}
2717
d0dd7f0c
AD
2718void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2719{
2720 u32 mask;
2721 int ret;
2722
cd474ba0
AD
2723 if (amdgpu_pcie_gen_cap)
2724 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 2725
cd474ba0
AD
2726 if (amdgpu_pcie_lane_cap)
2727 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 2728
cd474ba0
AD
2729 /* covers APUs as well */
2730 if (pci_is_root_bus(adev->pdev->bus)) {
2731 if (adev->pm.pcie_gen_mask == 0)
2732 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2733 if (adev->pm.pcie_mlw_mask == 0)
2734 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 2735 return;
cd474ba0 2736 }
d0dd7f0c 2737
cd474ba0
AD
2738 if (adev->pm.pcie_gen_mask == 0) {
2739 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2740 if (!ret) {
2741 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2742 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2743 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2744
2745 if (mask & DRM_PCIE_SPEED_25)
2746 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2747 if (mask & DRM_PCIE_SPEED_50)
2748 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2749 if (mask & DRM_PCIE_SPEED_80)
2750 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2751 } else {
2752 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2753 }
2754 }
2755 if (adev->pm.pcie_mlw_mask == 0) {
2756 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2757 if (!ret) {
2758 switch (mask) {
2759 case 32:
2760 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2761 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2762 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2763 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2764 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2765 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2766 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2767 break;
2768 case 16:
2769 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2770 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2771 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2772 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2773 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2774 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2775 break;
2776 case 12:
2777 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2778 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2779 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2780 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2781 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2782 break;
2783 case 8:
2784 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2785 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2786 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2787 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2788 break;
2789 case 4:
2790 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2791 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2792 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2793 break;
2794 case 2:
2795 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2796 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2797 break;
2798 case 1:
2799 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2800 break;
2801 default:
2802 break;
2803 }
2804 } else {
2805 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
2806 }
2807 }
2808}
d38ceaf9
AD
2809
2810/*
2811 * Debugfs
2812 */
2813int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
06ab6832 2814 const struct drm_info_list *files,
d38ceaf9
AD
2815 unsigned nfiles)
2816{
2817 unsigned i;
2818
2819 for (i = 0; i < adev->debugfs_count; i++) {
2820 if (adev->debugfs[i].files == files) {
2821 /* Already registered */
2822 return 0;
2823 }
2824 }
2825
2826 i = adev->debugfs_count + 1;
2827 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2828 DRM_ERROR("Reached maximum number of debugfs components.\n");
2829 DRM_ERROR("Report so we increase "
2830 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2831 return -EINVAL;
2832 }
2833 adev->debugfs[adev->debugfs_count].files = files;
2834 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2835 adev->debugfs_count = i;
2836#if defined(CONFIG_DEBUG_FS)
d38ceaf9
AD
2837 drm_debugfs_create_files(files, nfiles,
2838 adev->ddev->primary->debugfs_root,
2839 adev->ddev->primary);
2840#endif
2841 return 0;
2842}
2843
d38ceaf9
AD
2844#if defined(CONFIG_DEBUG_FS)
2845
2846static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2847 size_t size, loff_t *pos)
2848{
45063097 2849 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
2850 ssize_t result = 0;
2851 int r;
bd12267d 2852 bool pm_pg_lock, use_bank;
56628159 2853 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
2854
2855 if (size & 0x3 || *pos & 0x3)
2856 return -EINVAL;
2857
bd12267d
TSD
2858 /* are we reading registers for which a PG lock is necessary? */
2859 pm_pg_lock = (*pos >> 23) & 1;
2860
56628159
TSD
2861 if (*pos & (1ULL << 62)) {
2862 se_bank = (*pos >> 24) & 0x3FF;
2863 sh_bank = (*pos >> 34) & 0x3FF;
2864 instance_bank = (*pos >> 44) & 0x3FF;
32977f93
TSD
2865
2866 if (se_bank == 0x3FF)
2867 se_bank = 0xFFFFFFFF;
2868 if (sh_bank == 0x3FF)
2869 sh_bank = 0xFFFFFFFF;
2870 if (instance_bank == 0x3FF)
2871 instance_bank = 0xFFFFFFFF;
56628159 2872 use_bank = 1;
56628159
TSD
2873 } else {
2874 use_bank = 0;
2875 }
2876
801a6aa9 2877 *pos &= (1UL << 22) - 1;
bd12267d 2878
56628159 2879 if (use_bank) {
32977f93
TSD
2880 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2881 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
56628159
TSD
2882 return -EINVAL;
2883 mutex_lock(&adev->grbm_idx_mutex);
2884 amdgpu_gfx_select_se_sh(adev, se_bank,
2885 sh_bank, instance_bank);
2886 }
2887
bd12267d
TSD
2888 if (pm_pg_lock)
2889 mutex_lock(&adev->pm.mutex);
2890
d38ceaf9
AD
2891 while (size) {
2892 uint32_t value;
2893
2894 if (*pos > adev->rmmio_size)
56628159 2895 goto end;
d38ceaf9
AD
2896
2897 value = RREG32(*pos >> 2);
2898 r = put_user(value, (uint32_t *)buf);
56628159
TSD
2899 if (r) {
2900 result = r;
2901 goto end;
2902 }
d38ceaf9
AD
2903
2904 result += 4;
2905 buf += 4;
2906 *pos += 4;
2907 size -= 4;
2908 }
2909
56628159
TSD
2910end:
2911 if (use_bank) {
2912 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2913 mutex_unlock(&adev->grbm_idx_mutex);
2914 }
2915
bd12267d
TSD
2916 if (pm_pg_lock)
2917 mutex_unlock(&adev->pm.mutex);
2918
d38ceaf9
AD
2919 return result;
2920}
2921
2922static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2923 size_t size, loff_t *pos)
2924{
45063097 2925 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
2926 ssize_t result = 0;
2927 int r;
394fdde2
TSD
2928 bool pm_pg_lock, use_bank;
2929 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
2930
2931 if (size & 0x3 || *pos & 0x3)
2932 return -EINVAL;
2933
394fdde2
TSD
2934 /* are we reading registers for which a PG lock is necessary? */
2935 pm_pg_lock = (*pos >> 23) & 1;
2936
2937 if (*pos & (1ULL << 62)) {
2938 se_bank = (*pos >> 24) & 0x3FF;
2939 sh_bank = (*pos >> 34) & 0x3FF;
2940 instance_bank = (*pos >> 44) & 0x3FF;
2941
2942 if (se_bank == 0x3FF)
2943 se_bank = 0xFFFFFFFF;
2944 if (sh_bank == 0x3FF)
2945 sh_bank = 0xFFFFFFFF;
2946 if (instance_bank == 0x3FF)
2947 instance_bank = 0xFFFFFFFF;
2948 use_bank = 1;
2949 } else {
2950 use_bank = 0;
2951 }
2952
801a6aa9 2953 *pos &= (1UL << 22) - 1;
394fdde2
TSD
2954
2955 if (use_bank) {
2956 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2957 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2958 return -EINVAL;
2959 mutex_lock(&adev->grbm_idx_mutex);
2960 amdgpu_gfx_select_se_sh(adev, se_bank,
2961 sh_bank, instance_bank);
2962 }
2963
2964 if (pm_pg_lock)
2965 mutex_lock(&adev->pm.mutex);
2966
d38ceaf9
AD
2967 while (size) {
2968 uint32_t value;
2969
2970 if (*pos > adev->rmmio_size)
2971 return result;
2972
2973 r = get_user(value, (uint32_t *)buf);
2974 if (r)
2975 return r;
2976
2977 WREG32(*pos >> 2, value);
2978
2979 result += 4;
2980 buf += 4;
2981 *pos += 4;
2982 size -= 4;
2983 }
2984
394fdde2
TSD
2985 if (use_bank) {
2986 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2987 mutex_unlock(&adev->grbm_idx_mutex);
2988 }
2989
2990 if (pm_pg_lock)
2991 mutex_unlock(&adev->pm.mutex);
2992
d38ceaf9
AD
2993 return result;
2994}
2995
adcec288
TSD
2996static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
2997 size_t size, loff_t *pos)
2998{
45063097 2999 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3000 ssize_t result = 0;
3001 int r;
3002
3003 if (size & 0x3 || *pos & 0x3)
3004 return -EINVAL;
3005
3006 while (size) {
3007 uint32_t value;
3008
3009 value = RREG32_PCIE(*pos >> 2);
3010 r = put_user(value, (uint32_t *)buf);
3011 if (r)
3012 return r;
3013
3014 result += 4;
3015 buf += 4;
3016 *pos += 4;
3017 size -= 4;
3018 }
3019
3020 return result;
3021}
3022
3023static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3024 size_t size, loff_t *pos)
3025{
45063097 3026 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3027 ssize_t result = 0;
3028 int r;
3029
3030 if (size & 0x3 || *pos & 0x3)
3031 return -EINVAL;
3032
3033 while (size) {
3034 uint32_t value;
3035
3036 r = get_user(value, (uint32_t *)buf);
3037 if (r)
3038 return r;
3039
3040 WREG32_PCIE(*pos >> 2, value);
3041
3042 result += 4;
3043 buf += 4;
3044 *pos += 4;
3045 size -= 4;
3046 }
3047
3048 return result;
3049}
3050
3051static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3052 size_t size, loff_t *pos)
3053{
45063097 3054 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3055 ssize_t result = 0;
3056 int r;
3057
3058 if (size & 0x3 || *pos & 0x3)
3059 return -EINVAL;
3060
3061 while (size) {
3062 uint32_t value;
3063
3064 value = RREG32_DIDT(*pos >> 2);
3065 r = put_user(value, (uint32_t *)buf);
3066 if (r)
3067 return r;
3068
3069 result += 4;
3070 buf += 4;
3071 *pos += 4;
3072 size -= 4;
3073 }
3074
3075 return result;
3076}
3077
3078static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3079 size_t size, loff_t *pos)
3080{
45063097 3081 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3082 ssize_t result = 0;
3083 int r;
3084
3085 if (size & 0x3 || *pos & 0x3)
3086 return -EINVAL;
3087
3088 while (size) {
3089 uint32_t value;
3090
3091 r = get_user(value, (uint32_t *)buf);
3092 if (r)
3093 return r;
3094
3095 WREG32_DIDT(*pos >> 2, value);
3096
3097 result += 4;
3098 buf += 4;
3099 *pos += 4;
3100 size -= 4;
3101 }
3102
3103 return result;
3104}
3105
3106static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3107 size_t size, loff_t *pos)
3108{
45063097 3109 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3110 ssize_t result = 0;
3111 int r;
3112
3113 if (size & 0x3 || *pos & 0x3)
3114 return -EINVAL;
3115
3116 while (size) {
3117 uint32_t value;
3118
6fc0deaf 3119 value = RREG32_SMC(*pos);
adcec288
TSD
3120 r = put_user(value, (uint32_t *)buf);
3121 if (r)
3122 return r;
3123
3124 result += 4;
3125 buf += 4;
3126 *pos += 4;
3127 size -= 4;
3128 }
3129
3130 return result;
3131}
3132
3133static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3134 size_t size, loff_t *pos)
3135{
45063097 3136 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3137 ssize_t result = 0;
3138 int r;
3139
3140 if (size & 0x3 || *pos & 0x3)
3141 return -EINVAL;
3142
3143 while (size) {
3144 uint32_t value;
3145
3146 r = get_user(value, (uint32_t *)buf);
3147 if (r)
3148 return r;
3149
6fc0deaf 3150 WREG32_SMC(*pos, value);
adcec288
TSD
3151
3152 result += 4;
3153 buf += 4;
3154 *pos += 4;
3155 size -= 4;
3156 }
3157
3158 return result;
3159}
3160
1e051413
TSD
3161static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3162 size_t size, loff_t *pos)
3163{
45063097 3164 struct amdgpu_device *adev = file_inode(f)->i_private;
1e051413
TSD
3165 ssize_t result = 0;
3166 int r;
3167 uint32_t *config, no_regs = 0;
3168
3169 if (size & 0x3 || *pos & 0x3)
3170 return -EINVAL;
3171
ecab7668 3172 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
1e051413
TSD
3173 if (!config)
3174 return -ENOMEM;
3175
3176 /* version, increment each time something is added */
9a999359 3177 config[no_regs++] = 3;
1e051413
TSD
3178 config[no_regs++] = adev->gfx.config.max_shader_engines;
3179 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3180 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3181 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3182 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3183 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3184 config[no_regs++] = adev->gfx.config.max_gprs;
3185 config[no_regs++] = adev->gfx.config.max_gs_threads;
3186 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3187 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3188 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3189 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3190 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3191 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3192 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3193 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3194 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3195 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3196 config[no_regs++] = adev->gfx.config.num_gpus;
3197 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3198 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3199 config[no_regs++] = adev->gfx.config.gb_addr_config;
3200 config[no_regs++] = adev->gfx.config.num_rbs;
3201
89a8f309
TSD
3202 /* rev==1 */
3203 config[no_regs++] = adev->rev_id;
3204 config[no_regs++] = adev->pg_flags;
3205 config[no_regs++] = adev->cg_flags;
3206
e9f11dc8
TSD
3207 /* rev==2 */
3208 config[no_regs++] = adev->family;
3209 config[no_regs++] = adev->external_rev_id;
3210
9a999359
TSD
3211 /* rev==3 */
3212 config[no_regs++] = adev->pdev->device;
3213 config[no_regs++] = adev->pdev->revision;
3214 config[no_regs++] = adev->pdev->subsystem_device;
3215 config[no_regs++] = adev->pdev->subsystem_vendor;
3216
1e051413
TSD
3217 while (size && (*pos < no_regs * 4)) {
3218 uint32_t value;
3219
3220 value = config[*pos >> 2];
3221 r = put_user(value, (uint32_t *)buf);
3222 if (r) {
3223 kfree(config);
3224 return r;
3225 }
3226
3227 result += 4;
3228 buf += 4;
3229 *pos += 4;
3230 size -= 4;
3231 }
3232
3233 kfree(config);
3234 return result;
3235}
3236
f2cdaf20
TSD
3237static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3238 size_t size, loff_t *pos)
3239{
45063097 3240 struct amdgpu_device *adev = file_inode(f)->i_private;
9f8df7d7
TSD
3241 int idx, x, outsize, r, valuesize;
3242 uint32_t values[16];
f2cdaf20 3243
9f8df7d7 3244 if (size & 3 || *pos & 0x3)
f2cdaf20
TSD
3245 return -EINVAL;
3246
3cbc614f
SP
3247 if (amdgpu_dpm == 0)
3248 return -EINVAL;
3249
f2cdaf20
TSD
3250 /* convert offset to sensor number */
3251 idx = *pos >> 2;
3252
9f8df7d7 3253 valuesize = sizeof(values);
f2cdaf20 3254 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
9f8df7d7 3255 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
3cbc614f
SP
3256 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3257 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3258 &valuesize);
f2cdaf20
TSD
3259 else
3260 return -EINVAL;
3261
9f8df7d7
TSD
3262 if (size > valuesize)
3263 return -EINVAL;
3264
3265 outsize = 0;
3266 x = 0;
3267 if (!r) {
3268 while (size) {
3269 r = put_user(values[x++], (int32_t *)buf);
3270 buf += 4;
3271 size -= 4;
3272 outsize += 4;
3273 }
3274 }
f2cdaf20 3275
9f8df7d7 3276 return !r ? outsize : r;
f2cdaf20 3277}
1e051413 3278
273d7aa1
TSD
3279static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3280 size_t size, loff_t *pos)
3281{
3282 struct amdgpu_device *adev = f->f_inode->i_private;
3283 int r, x;
3284 ssize_t result=0;
472259f0 3285 uint32_t offset, se, sh, cu, wave, simd, data[32];
273d7aa1
TSD
3286
3287 if (size & 3 || *pos & 3)
3288 return -EINVAL;
3289
3290 /* decode offset */
3291 offset = (*pos & 0x7F);
3292 se = ((*pos >> 7) & 0xFF);
3293 sh = ((*pos >> 15) & 0xFF);
3294 cu = ((*pos >> 23) & 0xFF);
3295 wave = ((*pos >> 31) & 0xFF);
3296 simd = ((*pos >> 37) & 0xFF);
273d7aa1
TSD
3297
3298 /* switch to the specific se/sh/cu */
3299 mutex_lock(&adev->grbm_idx_mutex);
3300 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3301
3302 x = 0;
472259f0
TSD
3303 if (adev->gfx.funcs->read_wave_data)
3304 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
273d7aa1
TSD
3305
3306 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3307 mutex_unlock(&adev->grbm_idx_mutex);
3308
5ecfb3b8
TSD
3309 if (!x)
3310 return -EINVAL;
3311
472259f0 3312 while (size && (offset < x * 4)) {
273d7aa1
TSD
3313 uint32_t value;
3314
472259f0 3315 value = data[offset >> 2];
273d7aa1
TSD
3316 r = put_user(value, (uint32_t *)buf);
3317 if (r)
3318 return r;
3319
3320 result += 4;
3321 buf += 4;
472259f0 3322 offset += 4;
273d7aa1
TSD
3323 size -= 4;
3324 }
3325
3326 return result;
3327}
3328
c5a60ce8
TSD
3329static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3330 size_t size, loff_t *pos)
3331{
3332 struct amdgpu_device *adev = f->f_inode->i_private;
3333 int r;
3334 ssize_t result = 0;
3335 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3336
3337 if (size & 3 || *pos & 3)
3338 return -EINVAL;
3339
3340 /* decode offset */
3341 offset = (*pos & 0xFFF); /* in dwords */
3342 se = ((*pos >> 12) & 0xFF);
3343 sh = ((*pos >> 20) & 0xFF);
3344 cu = ((*pos >> 28) & 0xFF);
3345 wave = ((*pos >> 36) & 0xFF);
3346 simd = ((*pos >> 44) & 0xFF);
3347 thread = ((*pos >> 52) & 0xFF);
3348 bank = ((*pos >> 60) & 1);
3349
3350 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3351 if (!data)
3352 return -ENOMEM;
3353
3354 /* switch to the specific se/sh/cu */
3355 mutex_lock(&adev->grbm_idx_mutex);
3356 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3357
3358 if (bank == 0) {
3359 if (adev->gfx.funcs->read_wave_vgprs)
3360 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3361 } else {
3362 if (adev->gfx.funcs->read_wave_sgprs)
3363 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3364 }
3365
3366 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3367 mutex_unlock(&adev->grbm_idx_mutex);
3368
3369 while (size) {
3370 uint32_t value;
3371
3372 value = data[offset++];
3373 r = put_user(value, (uint32_t *)buf);
3374 if (r) {
3375 result = r;
3376 goto err;
3377 }
3378
3379 result += 4;
3380 buf += 4;
3381 size -= 4;
3382 }
3383
3384err:
3385 kfree(data);
3386 return result;
3387}
3388
d38ceaf9
AD
3389static const struct file_operations amdgpu_debugfs_regs_fops = {
3390 .owner = THIS_MODULE,
3391 .read = amdgpu_debugfs_regs_read,
3392 .write = amdgpu_debugfs_regs_write,
3393 .llseek = default_llseek
3394};
adcec288
TSD
3395static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3396 .owner = THIS_MODULE,
3397 .read = amdgpu_debugfs_regs_didt_read,
3398 .write = amdgpu_debugfs_regs_didt_write,
3399 .llseek = default_llseek
3400};
3401static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3402 .owner = THIS_MODULE,
3403 .read = amdgpu_debugfs_regs_pcie_read,
3404 .write = amdgpu_debugfs_regs_pcie_write,
3405 .llseek = default_llseek
3406};
3407static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3408 .owner = THIS_MODULE,
3409 .read = amdgpu_debugfs_regs_smc_read,
3410 .write = amdgpu_debugfs_regs_smc_write,
3411 .llseek = default_llseek
3412};
3413
1e051413
TSD
3414static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3415 .owner = THIS_MODULE,
3416 .read = amdgpu_debugfs_gca_config_read,
3417 .llseek = default_llseek
3418};
3419
f2cdaf20
TSD
3420static const struct file_operations amdgpu_debugfs_sensors_fops = {
3421 .owner = THIS_MODULE,
3422 .read = amdgpu_debugfs_sensor_read,
3423 .llseek = default_llseek
3424};
3425
273d7aa1
TSD
3426static const struct file_operations amdgpu_debugfs_wave_fops = {
3427 .owner = THIS_MODULE,
3428 .read = amdgpu_debugfs_wave_read,
3429 .llseek = default_llseek
3430};
c5a60ce8
TSD
3431static const struct file_operations amdgpu_debugfs_gpr_fops = {
3432 .owner = THIS_MODULE,
3433 .read = amdgpu_debugfs_gpr_read,
3434 .llseek = default_llseek
3435};
273d7aa1 3436
adcec288
TSD
3437static const struct file_operations *debugfs_regs[] = {
3438 &amdgpu_debugfs_regs_fops,
3439 &amdgpu_debugfs_regs_didt_fops,
3440 &amdgpu_debugfs_regs_pcie_fops,
3441 &amdgpu_debugfs_regs_smc_fops,
1e051413 3442 &amdgpu_debugfs_gca_config_fops,
f2cdaf20 3443 &amdgpu_debugfs_sensors_fops,
273d7aa1 3444 &amdgpu_debugfs_wave_fops,
c5a60ce8 3445 &amdgpu_debugfs_gpr_fops,
adcec288
TSD
3446};
3447
3448static const char *debugfs_regs_names[] = {
3449 "amdgpu_regs",
3450 "amdgpu_regs_didt",
3451 "amdgpu_regs_pcie",
3452 "amdgpu_regs_smc",
1e051413 3453 "amdgpu_gca_config",
f2cdaf20 3454 "amdgpu_sensors",
273d7aa1 3455 "amdgpu_wave",
c5a60ce8 3456 "amdgpu_gpr",
adcec288 3457};
d38ceaf9
AD
3458
3459static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3460{
3461 struct drm_minor *minor = adev->ddev->primary;
3462 struct dentry *ent, *root = minor->debugfs_root;
adcec288
TSD
3463 unsigned i, j;
3464
3465 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3466 ent = debugfs_create_file(debugfs_regs_names[i],
3467 S_IFREG | S_IRUGO, root,
3468 adev, debugfs_regs[i]);
3469 if (IS_ERR(ent)) {
3470 for (j = 0; j < i; j++) {
3471 debugfs_remove(adev->debugfs_regs[i]);
3472 adev->debugfs_regs[i] = NULL;
3473 }
3474 return PTR_ERR(ent);
3475 }
d38ceaf9 3476
adcec288
TSD
3477 if (!i)
3478 i_size_write(ent->d_inode, adev->rmmio_size);
3479 adev->debugfs_regs[i] = ent;
3480 }
d38ceaf9
AD
3481
3482 return 0;
3483}
3484
3485static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3486{
adcec288
TSD
3487 unsigned i;
3488
3489 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3490 if (adev->debugfs_regs[i]) {
3491 debugfs_remove(adev->debugfs_regs[i]);
3492 adev->debugfs_regs[i] = NULL;
3493 }
3494 }
d38ceaf9
AD
3495}
3496
3497int amdgpu_debugfs_init(struct drm_minor *minor)
3498{
3499 return 0;
3500}
7cebc728
AK
3501#else
3502static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3503{
3504 return 0;
3505}
3506static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
d38ceaf9 3507#endif