drm/amdgpu: Avoid overflows/divide-by-zero in latency_watermark calculations.
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
f4b373f4 39#include "amdgpu_trace.h"
d38ceaf9
AD
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
a5bde2f9 43#include "amdgpu_atomfirmware.h"
d0dd7f0c 44#include "amd_pcie.h"
33f34802
KW
45#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
a2e73f56
AD
48#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
aaa36a97 51#include "vi.h"
460826e6 52#include "soc15.h"
d38ceaf9 53#include "bif/bif_4_1_d.h"
9accf2fd 54#include <linux/pci.h>
bec86378 55#include <linux/firmware.h>
d1aff8ec 56#include "amdgpu_pm.h"
d38ceaf9
AD
57
58static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
59static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
60
61static const char *amdgpu_asic_name[] = {
da69c161
KW
62 "TAHITI",
63 "PITCAIRN",
64 "VERDE",
65 "OLAND",
66 "HAINAN",
d38ceaf9
AD
67 "BONAIRE",
68 "KAVERI",
69 "KABINI",
70 "HAWAII",
71 "MULLINS",
72 "TOPAZ",
73 "TONGA",
48299f95 74 "FIJI",
d38ceaf9 75 "CARRIZO",
139f4917 76 "STONEY",
2cc0c0b5
FC
77 "POLARIS10",
78 "POLARIS11",
c4642a47 79 "POLARIS12",
d4196f01 80 "VEGA10",
d38ceaf9
AD
81 "LAST",
82};
83
84bool amdgpu_device_is_px(struct drm_device *dev)
85{
86 struct amdgpu_device *adev = dev->dev_private;
87
2f7d10b3 88 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
89 return true;
90 return false;
91}
92
93/*
94 * MMIO register access helper functions.
95 */
96uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 97 uint32_t acc_flags)
d38ceaf9 98{
f4b373f4
TSD
99 uint32_t ret;
100
15d72fd7 101 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
bc992ba5
XY
102 BUG_ON(in_interrupt());
103 return amdgpu_virt_kiq_rreg(adev, reg);
104 }
105
15d72fd7 106 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 107 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
108 else {
109 unsigned long flags;
d38ceaf9
AD
110
111 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
112 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
113 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
114 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 115 }
f4b373f4
TSD
116 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
117 return ret;
d38ceaf9
AD
118}
119
120void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 121 uint32_t acc_flags)
d38ceaf9 122{
f4b373f4 123 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 124
15d72fd7 125 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
bc992ba5
XY
126 BUG_ON(in_interrupt());
127 return amdgpu_virt_kiq_wreg(adev, reg, v);
128 }
129
15d72fd7 130 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
131 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
132 else {
133 unsigned long flags;
134
135 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
136 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
137 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
138 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
139 }
140}
141
142u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
143{
144 if ((reg * 4) < adev->rio_mem_size)
145 return ioread32(adev->rio_mem + (reg * 4));
146 else {
147 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
148 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
149 }
150}
151
152void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
153{
154
155 if ((reg * 4) < adev->rio_mem_size)
156 iowrite32(v, adev->rio_mem + (reg * 4));
157 else {
158 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
159 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
160 }
161}
162
163/**
164 * amdgpu_mm_rdoorbell - read a doorbell dword
165 *
166 * @adev: amdgpu_device pointer
167 * @index: doorbell index
168 *
169 * Returns the value in the doorbell aperture at the
170 * requested doorbell index (CIK).
171 */
172u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
173{
174 if (index < adev->doorbell.num_doorbells) {
175 return readl(adev->doorbell.ptr + index);
176 } else {
177 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
178 return 0;
179 }
180}
181
182/**
183 * amdgpu_mm_wdoorbell - write a doorbell dword
184 *
185 * @adev: amdgpu_device pointer
186 * @index: doorbell index
187 * @v: value to write
188 *
189 * Writes @v to the doorbell aperture at the
190 * requested doorbell index (CIK).
191 */
192void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
193{
194 if (index < adev->doorbell.num_doorbells) {
195 writel(v, adev->doorbell.ptr + index);
196 } else {
197 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
198 }
199}
200
832be404
KW
201/**
202 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
203 *
204 * @adev: amdgpu_device pointer
205 * @index: doorbell index
206 *
207 * Returns the value in the doorbell aperture at the
208 * requested doorbell index (VEGA10+).
209 */
210u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
211{
212 if (index < adev->doorbell.num_doorbells) {
213 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
214 } else {
215 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
216 return 0;
217 }
218}
219
220/**
221 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
222 *
223 * @adev: amdgpu_device pointer
224 * @index: doorbell index
225 * @v: value to write
226 *
227 * Writes @v to the doorbell aperture at the
228 * requested doorbell index (VEGA10+).
229 */
230void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
231{
232 if (index < adev->doorbell.num_doorbells) {
233 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
234 } else {
235 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
236 }
237}
238
d38ceaf9
AD
239/**
240 * amdgpu_invalid_rreg - dummy reg read function
241 *
242 * @adev: amdgpu device pointer
243 * @reg: offset of register
244 *
245 * Dummy register read function. Used for register blocks
246 * that certain asics don't have (all asics).
247 * Returns the value in the register.
248 */
249static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
250{
251 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
252 BUG();
253 return 0;
254}
255
256/**
257 * amdgpu_invalid_wreg - dummy reg write function
258 *
259 * @adev: amdgpu device pointer
260 * @reg: offset of register
261 * @v: value to write to the register
262 *
263 * Dummy register read function. Used for register blocks
264 * that certain asics don't have (all asics).
265 */
266static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
267{
268 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
269 reg, v);
270 BUG();
271}
272
273/**
274 * amdgpu_block_invalid_rreg - dummy reg read function
275 *
276 * @adev: amdgpu device pointer
277 * @block: offset of instance
278 * @reg: offset of register
279 *
280 * Dummy register read function. Used for register blocks
281 * that certain asics don't have (all asics).
282 * Returns the value in the register.
283 */
284static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
285 uint32_t block, uint32_t reg)
286{
287 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
288 reg, block);
289 BUG();
290 return 0;
291}
292
293/**
294 * amdgpu_block_invalid_wreg - dummy reg write function
295 *
296 * @adev: amdgpu device pointer
297 * @block: offset of instance
298 * @reg: offset of register
299 * @v: value to write to the register
300 *
301 * Dummy register read function. Used for register blocks
302 * that certain asics don't have (all asics).
303 */
304static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
305 uint32_t block,
306 uint32_t reg, uint32_t v)
307{
308 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
309 reg, block, v);
310 BUG();
311}
312
313static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
314{
315 int r;
316
317 if (adev->vram_scratch.robj == NULL) {
318 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
857d913d 319 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
03f48dd5
CK
320 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
321 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
72d7668b 322 NULL, NULL, &adev->vram_scratch.robj);
d38ceaf9
AD
323 if (r) {
324 return r;
325 }
326 }
327
328 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
329 if (unlikely(r != 0))
330 return r;
331 r = amdgpu_bo_pin(adev->vram_scratch.robj,
332 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
333 if (r) {
334 amdgpu_bo_unreserve(adev->vram_scratch.robj);
335 return r;
336 }
337 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
338 (void **)&adev->vram_scratch.ptr);
339 if (r)
340 amdgpu_bo_unpin(adev->vram_scratch.robj);
341 amdgpu_bo_unreserve(adev->vram_scratch.robj);
342
343 return r;
344}
345
346static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
347{
348 int r;
349
350 if (adev->vram_scratch.robj == NULL) {
351 return;
352 }
353 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
354 if (likely(r == 0)) {
355 amdgpu_bo_kunmap(adev->vram_scratch.robj);
356 amdgpu_bo_unpin(adev->vram_scratch.robj);
357 amdgpu_bo_unreserve(adev->vram_scratch.robj);
358 }
359 amdgpu_bo_unref(&adev->vram_scratch.robj);
360}
361
362/**
363 * amdgpu_program_register_sequence - program an array of registers.
364 *
365 * @adev: amdgpu_device pointer
366 * @registers: pointer to the register array
367 * @array_size: size of the register array
368 *
369 * Programs an array or registers with and and or masks.
370 * This is a helper for setting golden registers.
371 */
372void amdgpu_program_register_sequence(struct amdgpu_device *adev,
373 const u32 *registers,
374 const u32 array_size)
375{
376 u32 tmp, reg, and_mask, or_mask;
377 int i;
378
379 if (array_size % 3)
380 return;
381
382 for (i = 0; i < array_size; i +=3) {
383 reg = registers[i + 0];
384 and_mask = registers[i + 1];
385 or_mask = registers[i + 2];
386
387 if (and_mask == 0xffffffff) {
388 tmp = or_mask;
389 } else {
390 tmp = RREG32(reg);
391 tmp &= ~and_mask;
392 tmp |= or_mask;
393 }
394 WREG32(reg, tmp);
395 }
396}
397
398void amdgpu_pci_config_reset(struct amdgpu_device *adev)
399{
400 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
401}
402
403/*
404 * GPU doorbell aperture helpers function.
405 */
406/**
407 * amdgpu_doorbell_init - Init doorbell driver information.
408 *
409 * @adev: amdgpu_device pointer
410 *
411 * Init doorbell driver information (CIK)
412 * Returns 0 on success, error on failure.
413 */
414static int amdgpu_doorbell_init(struct amdgpu_device *adev)
415{
416 /* doorbell bar mapping */
417 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
418 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
419
edf600da 420 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
421 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
422 if (adev->doorbell.num_doorbells == 0)
423 return -EINVAL;
424
425 adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
426 if (adev->doorbell.ptr == NULL) {
427 return -ENOMEM;
428 }
429 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
430 DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
431
432 return 0;
433}
434
435/**
436 * amdgpu_doorbell_fini - Tear down doorbell driver information.
437 *
438 * @adev: amdgpu_device pointer
439 *
440 * Tear down doorbell driver information (CIK)
441 */
442static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
443{
444 iounmap(adev->doorbell.ptr);
445 adev->doorbell.ptr = NULL;
446}
447
448/**
449 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
450 * setup amdkfd
451 *
452 * @adev: amdgpu_device pointer
453 * @aperture_base: output returning doorbell aperture base physical address
454 * @aperture_size: output returning doorbell aperture size in bytes
455 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
456 *
457 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
458 * takes doorbells required for its own rings and reports the setup to amdkfd.
459 * amdgpu reserved doorbells are at the start of the doorbell aperture.
460 */
461void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
462 phys_addr_t *aperture_base,
463 size_t *aperture_size,
464 size_t *start_offset)
465{
466 /*
467 * The first num_doorbells are used by amdgpu.
468 * amdkfd takes whatever's left in the aperture.
469 */
470 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
471 *aperture_base = adev->doorbell.base;
472 *aperture_size = adev->doorbell.size;
473 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
474 } else {
475 *aperture_base = 0;
476 *aperture_size = 0;
477 *start_offset = 0;
478 }
479}
480
481/*
482 * amdgpu_wb_*()
483 * Writeback is the the method by which the the GPU updates special pages
484 * in memory with the status of certain GPU events (fences, ring pointers,
485 * etc.).
486 */
487
488/**
489 * amdgpu_wb_fini - Disable Writeback and free memory
490 *
491 * @adev: amdgpu_device pointer
492 *
493 * Disables Writeback and frees the Writeback memory (all asics).
494 * Used at driver shutdown.
495 */
496static void amdgpu_wb_fini(struct amdgpu_device *adev)
497{
498 if (adev->wb.wb_obj) {
a76ed485
AD
499 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
500 &adev->wb.gpu_addr,
501 (void **)&adev->wb.wb);
d38ceaf9
AD
502 adev->wb.wb_obj = NULL;
503 }
504}
505
506/**
507 * amdgpu_wb_init- Init Writeback driver info and allocate memory
508 *
509 * @adev: amdgpu_device pointer
510 *
511 * Disables Writeback and frees the Writeback memory (all asics).
512 * Used at driver startup.
513 * Returns 0 on success or an -error on failure.
514 */
515static int amdgpu_wb_init(struct amdgpu_device *adev)
516{
517 int r;
518
519 if (adev->wb.wb_obj == NULL) {
60a970a6 520 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
a76ed485
AD
521 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
522 &adev->wb.wb_obj, &adev->wb.gpu_addr,
523 (void **)&adev->wb.wb);
d38ceaf9
AD
524 if (r) {
525 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
526 return r;
527 }
d38ceaf9
AD
528
529 adev->wb.num_wb = AMDGPU_MAX_WB;
530 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
531
532 /* clear wb memory */
60a970a6 533 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
d38ceaf9
AD
534 }
535
536 return 0;
537}
538
539/**
540 * amdgpu_wb_get - Allocate a wb entry
541 *
542 * @adev: amdgpu_device pointer
543 * @wb: wb index
544 *
545 * Allocate a wb slot for use by the driver (all asics).
546 * Returns 0 on success or -EINVAL on failure.
547 */
548int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
549{
550 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
551 if (offset < adev->wb.num_wb) {
552 __set_bit(offset, adev->wb.used);
553 *wb = offset;
554 return 0;
555 } else {
556 return -EINVAL;
557 }
558}
559
7014285a
KW
560/**
561 * amdgpu_wb_get_64bit - Allocate a wb entry
562 *
563 * @adev: amdgpu_device pointer
564 * @wb: wb index
565 *
566 * Allocate a wb slot for use by the driver (all asics).
567 * Returns 0 on success or -EINVAL on failure.
568 */
569int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
570{
571 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
572 adev->wb.num_wb, 0, 2, 7, 0);
573 if ((offset + 1) < adev->wb.num_wb) {
574 __set_bit(offset, adev->wb.used);
575 __set_bit(offset + 1, adev->wb.used);
576 *wb = offset;
577 return 0;
578 } else {
579 return -EINVAL;
580 }
581}
582
d38ceaf9
AD
583/**
584 * amdgpu_wb_free - Free a wb entry
585 *
586 * @adev: amdgpu_device pointer
587 * @wb: wb index
588 *
589 * Free a wb slot allocated for use by the driver (all asics)
590 */
591void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
592{
593 if (wb < adev->wb.num_wb)
594 __clear_bit(wb, adev->wb.used);
595}
596
7014285a
KW
597/**
598 * amdgpu_wb_free_64bit - Free a wb entry
599 *
600 * @adev: amdgpu_device pointer
601 * @wb: wb index
602 *
603 * Free a wb slot allocated for use by the driver (all asics)
604 */
605void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
606{
607 if ((wb + 1) < adev->wb.num_wb) {
608 __clear_bit(wb, adev->wb.used);
609 __clear_bit(wb + 1, adev->wb.used);
610 }
611}
612
d38ceaf9
AD
613/**
614 * amdgpu_vram_location - try to find VRAM location
615 * @adev: amdgpu device structure holding all necessary informations
616 * @mc: memory controller structure holding memory informations
617 * @base: base address at which to put VRAM
618 *
619 * Function will place try to place VRAM at base address provided
620 * as parameter (which is so far either PCI aperture address or
621 * for IGP TOM base address).
622 *
623 * If there is not enough space to fit the unvisible VRAM in the 32bits
624 * address space then we limit the VRAM size to the aperture.
625 *
626 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
627 * this shouldn't be a problem as we are using the PCI aperture as a reference.
628 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
629 * not IGP.
630 *
631 * Note: we use mc_vram_size as on some board we need to program the mc to
632 * cover the whole aperture even if VRAM size is inferior to aperture size
633 * Novell bug 204882 + along with lots of ubuntu ones
634 *
635 * Note: when limiting vram it's safe to overwritte real_vram_size because
636 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
637 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
638 * ones)
639 *
640 * Note: IGP TOM addr should be the same as the aperture addr, we don't
641 * explicitly check for that thought.
642 *
643 * FIXME: when reducing VRAM size align new size on power of 2.
644 */
645void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
646{
647 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
648
649 mc->vram_start = base;
650 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
651 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
652 mc->real_vram_size = mc->aper_size;
653 mc->mc_vram_size = mc->aper_size;
654 }
655 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
656 if (limit && limit < mc->real_vram_size)
657 mc->real_vram_size = limit;
658 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
659 mc->mc_vram_size >> 20, mc->vram_start,
660 mc->vram_end, mc->real_vram_size >> 20);
661}
662
663/**
664 * amdgpu_gtt_location - try to find GTT location
665 * @adev: amdgpu device structure holding all necessary informations
666 * @mc: memory controller structure holding memory informations
667 *
668 * Function will place try to place GTT before or after VRAM.
669 *
670 * If GTT size is bigger than space left then we ajust GTT size.
671 * Thus function will never fails.
672 *
673 * FIXME: when reducing GTT size align new size on power of 2.
674 */
675void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
676{
677 u64 size_af, size_bf;
678
679 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
680 size_bf = mc->vram_start & ~mc->gtt_base_align;
681 if (size_bf > size_af) {
682 if (mc->gtt_size > size_bf) {
683 dev_warn(adev->dev, "limiting GTT\n");
684 mc->gtt_size = size_bf;
685 }
9dc5a91e 686 mc->gtt_start = 0;
d38ceaf9
AD
687 } else {
688 if (mc->gtt_size > size_af) {
689 dev_warn(adev->dev, "limiting GTT\n");
690 mc->gtt_size = size_af;
691 }
692 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
693 }
694 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
695 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
696 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
697}
698
699/*
700 * GPU helpers function.
701 */
702/**
c836fec5 703 * amdgpu_need_post - check if the hw need post or not
d38ceaf9
AD
704 *
705 * @adev: amdgpu_device pointer
706 *
c836fec5
JQ
707 * Check if the asic has been initialized (all asics) at driver startup
708 * or post is needed if hw reset is performed.
709 * Returns true if need or false if not.
d38ceaf9 710 */
c836fec5 711bool amdgpu_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
712{
713 uint32_t reg;
714
c836fec5
JQ
715 if (adev->has_hw_reset) {
716 adev->has_hw_reset = false;
717 return true;
718 }
d38ceaf9 719 /* then check MEM_SIZE, in case the crtcs are off */
bbf282d8 720 reg = amdgpu_asic_get_config_memsize(adev);
d38ceaf9 721
f2713e8c 722 if ((reg != 0) && (reg != 0xffffffff))
c836fec5 723 return false;
d38ceaf9 724
c836fec5 725 return true;
d38ceaf9
AD
726
727}
728
bec86378
ML
729static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
730{
731 if (amdgpu_sriov_vf(adev))
732 return false;
733
734 if (amdgpu_passthrough(adev)) {
1da2c326
ML
735 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
736 * some old smc fw still need driver do vPost otherwise gpu hang, while
737 * those smc fw version above 22.15 doesn't have this flaw, so we force
738 * vpost executed for smc version below 22.15
bec86378
ML
739 */
740 if (adev->asic_type == CHIP_FIJI) {
741 int err;
742 uint32_t fw_ver;
743 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
744 /* force vPost if error occured */
745 if (err)
746 return true;
747
748 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
749 if (fw_ver < 0x00160e00)
750 return true;
bec86378 751 }
bec86378 752 }
c836fec5 753 return amdgpu_need_post(adev);
bec86378
ML
754}
755
d38ceaf9
AD
756/**
757 * amdgpu_dummy_page_init - init dummy page used by the driver
758 *
759 * @adev: amdgpu_device pointer
760 *
761 * Allocate the dummy page used by the driver (all asics).
762 * This dummy page is used by the driver as a filler for gart entries
763 * when pages are taken out of the GART
764 * Returns 0 on sucess, -ENOMEM on failure.
765 */
766int amdgpu_dummy_page_init(struct amdgpu_device *adev)
767{
768 if (adev->dummy_page.page)
769 return 0;
770 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
771 if (adev->dummy_page.page == NULL)
772 return -ENOMEM;
773 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
774 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
775 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
776 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
777 __free_page(adev->dummy_page.page);
778 adev->dummy_page.page = NULL;
779 return -ENOMEM;
780 }
781 return 0;
782}
783
784/**
785 * amdgpu_dummy_page_fini - free dummy page used by the driver
786 *
787 * @adev: amdgpu_device pointer
788 *
789 * Frees the dummy page used by the driver (all asics).
790 */
791void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
792{
793 if (adev->dummy_page.page == NULL)
794 return;
795 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
796 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
797 __free_page(adev->dummy_page.page);
798 adev->dummy_page.page = NULL;
799}
800
801
802/* ATOM accessor methods */
803/*
804 * ATOM is an interpreted byte code stored in tables in the vbios. The
805 * driver registers callbacks to access registers and the interpreter
806 * in the driver parses the tables and executes then to program specific
807 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
808 * atombios.h, and atom.c
809 */
810
811/**
812 * cail_pll_read - read PLL register
813 *
814 * @info: atom card_info pointer
815 * @reg: PLL register offset
816 *
817 * Provides a PLL register accessor for the atom interpreter (r4xx+).
818 * Returns the value of the PLL register.
819 */
820static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
821{
822 return 0;
823}
824
825/**
826 * cail_pll_write - write PLL register
827 *
828 * @info: atom card_info pointer
829 * @reg: PLL register offset
830 * @val: value to write to the pll register
831 *
832 * Provides a PLL register accessor for the atom interpreter (r4xx+).
833 */
834static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
835{
836
837}
838
839/**
840 * cail_mc_read - read MC (Memory Controller) register
841 *
842 * @info: atom card_info pointer
843 * @reg: MC register offset
844 *
845 * Provides an MC register accessor for the atom interpreter (r4xx+).
846 * Returns the value of the MC register.
847 */
848static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
849{
850 return 0;
851}
852
853/**
854 * cail_mc_write - write MC (Memory Controller) register
855 *
856 * @info: atom card_info pointer
857 * @reg: MC register offset
858 * @val: value to write to the pll register
859 *
860 * Provides a MC register accessor for the atom interpreter (r4xx+).
861 */
862static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
863{
864
865}
866
867/**
868 * cail_reg_write - write MMIO register
869 *
870 * @info: atom card_info pointer
871 * @reg: MMIO register offset
872 * @val: value to write to the pll register
873 *
874 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
875 */
876static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
877{
878 struct amdgpu_device *adev = info->dev->dev_private;
879
880 WREG32(reg, val);
881}
882
883/**
884 * cail_reg_read - read MMIO register
885 *
886 * @info: atom card_info pointer
887 * @reg: MMIO register offset
888 *
889 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
890 * Returns the value of the MMIO register.
891 */
892static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
893{
894 struct amdgpu_device *adev = info->dev->dev_private;
895 uint32_t r;
896
897 r = RREG32(reg);
898 return r;
899}
900
901/**
902 * cail_ioreg_write - write IO register
903 *
904 * @info: atom card_info pointer
905 * @reg: IO register offset
906 * @val: value to write to the pll register
907 *
908 * Provides a IO register accessor for the atom interpreter (r4xx+).
909 */
910static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
911{
912 struct amdgpu_device *adev = info->dev->dev_private;
913
914 WREG32_IO(reg, val);
915}
916
917/**
918 * cail_ioreg_read - read IO register
919 *
920 * @info: atom card_info pointer
921 * @reg: IO register offset
922 *
923 * Provides an IO register accessor for the atom interpreter (r4xx+).
924 * Returns the value of the IO register.
925 */
926static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
927{
928 struct amdgpu_device *adev = info->dev->dev_private;
929 uint32_t r;
930
931 r = RREG32_IO(reg);
932 return r;
933}
934
935/**
936 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
937 *
938 * @adev: amdgpu_device pointer
939 *
940 * Frees the driver info and register access callbacks for the ATOM
941 * interpreter (r4xx+).
942 * Called at driver shutdown.
943 */
944static void amdgpu_atombios_fini(struct amdgpu_device *adev)
945{
89e0ec9f 946 if (adev->mode_info.atom_context) {
d38ceaf9 947 kfree(adev->mode_info.atom_context->scratch);
89e0ec9f
ML
948 kfree(adev->mode_info.atom_context->iio);
949 }
d38ceaf9
AD
950 kfree(adev->mode_info.atom_context);
951 adev->mode_info.atom_context = NULL;
952 kfree(adev->mode_info.atom_card_info);
953 adev->mode_info.atom_card_info = NULL;
954}
955
956/**
957 * amdgpu_atombios_init - init the driver info and callbacks for atombios
958 *
959 * @adev: amdgpu_device pointer
960 *
961 * Initializes the driver info and register access callbacks for the
962 * ATOM interpreter (r4xx+).
963 * Returns 0 on sucess, -ENOMEM on failure.
964 * Called at driver startup.
965 */
966static int amdgpu_atombios_init(struct amdgpu_device *adev)
967{
968 struct card_info *atom_card_info =
969 kzalloc(sizeof(struct card_info), GFP_KERNEL);
970
971 if (!atom_card_info)
972 return -ENOMEM;
973
974 adev->mode_info.atom_card_info = atom_card_info;
975 atom_card_info->dev = adev->ddev;
976 atom_card_info->reg_read = cail_reg_read;
977 atom_card_info->reg_write = cail_reg_write;
978 /* needed for iio ops */
979 if (adev->rio_mem) {
980 atom_card_info->ioreg_read = cail_ioreg_read;
981 atom_card_info->ioreg_write = cail_ioreg_write;
982 } else {
b64a18c5 983 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
d38ceaf9
AD
984 atom_card_info->ioreg_read = cail_reg_read;
985 atom_card_info->ioreg_write = cail_reg_write;
986 }
987 atom_card_info->mc_read = cail_mc_read;
988 atom_card_info->mc_write = cail_mc_write;
989 atom_card_info->pll_read = cail_pll_read;
990 atom_card_info->pll_write = cail_pll_write;
991
992 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
993 if (!adev->mode_info.atom_context) {
994 amdgpu_atombios_fini(adev);
995 return -ENOMEM;
996 }
997
998 mutex_init(&adev->mode_info.atom_context->mutex);
a5bde2f9
AD
999 if (adev->is_atom_fw) {
1000 amdgpu_atomfirmware_scratch_regs_init(adev);
1001 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1002 } else {
1003 amdgpu_atombios_scratch_regs_init(adev);
1004 amdgpu_atombios_allocate_fb_scratch(adev);
1005 }
d38ceaf9
AD
1006 return 0;
1007}
1008
1009/* if we get transitioned to only one device, take VGA back */
1010/**
1011 * amdgpu_vga_set_decode - enable/disable vga decode
1012 *
1013 * @cookie: amdgpu_device pointer
1014 * @state: enable/disable vga decode
1015 *
1016 * Enable/disable vga decode (all asics).
1017 * Returns VGA resource flags.
1018 */
1019static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1020{
1021 struct amdgpu_device *adev = cookie;
1022 amdgpu_asic_set_vga_state(adev, state);
1023 if (state)
1024 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1025 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1026 else
1027 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1028}
1029
1030/**
1031 * amdgpu_check_pot_argument - check that argument is a power of two
1032 *
1033 * @arg: value to check
1034 *
1035 * Validates that a certain argument is a power of two (all asics).
1036 * Returns true if argument is valid.
1037 */
1038static bool amdgpu_check_pot_argument(int arg)
1039{
1040 return (arg & (arg - 1)) == 0;
1041}
1042
a1adf8be
CZ
1043static void amdgpu_get_block_size(struct amdgpu_device *adev)
1044{
1045 /* defines number of bits in page table versus page directory,
1046 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1047 * page table and the remaining bits are in the page directory */
1048 if (amdgpu_vm_block_size == -1) {
1049
1050 /* Total bits covered by PD + PTs */
1051 unsigned bits = ilog2(amdgpu_vm_size) + 18;
1052
1053 /* Make sure the PD is 4K in size up to 8GB address space.
1054 Above that split equal between PD and PTs */
1055 if (amdgpu_vm_size <= 8)
1056 amdgpu_vm_block_size = bits - 9;
1057 else
1058 amdgpu_vm_block_size = (bits + 3) / 2;
1059
1060 } else if (amdgpu_vm_block_size < 9) {
1061 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1062 amdgpu_vm_block_size);
1063 amdgpu_vm_block_size = 9;
1064 }
1065
1066 if (amdgpu_vm_block_size > 24 ||
1067 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1068 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1069 amdgpu_vm_block_size);
1070 amdgpu_vm_block_size = 9;
1071 }
1072}
1073
83ca145d
ZJ
1074static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1075{
1076 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
1077 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1078 amdgpu_vm_size);
1079 goto def_value;
1080 }
1081
1082 if (amdgpu_vm_size < 1) {
1083 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1084 amdgpu_vm_size);
1085 goto def_value;
1086 }
1087
1088 /*
1089 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1090 */
1091 if (amdgpu_vm_size > 1024) {
1092 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1093 amdgpu_vm_size);
1094 goto def_value;
1095 }
1096
1097 return;
1098
1099def_value:
1100 amdgpu_vm_size = 8;
1101 dev_info(adev->dev, "set default VM size %dGB\n", amdgpu_vm_size);
1102}
1103
d38ceaf9
AD
1104/**
1105 * amdgpu_check_arguments - validate module params
1106 *
1107 * @adev: amdgpu_device pointer
1108 *
1109 * Validates certain module parameters and updates
1110 * the associated values used by the driver (all asics).
1111 */
1112static void amdgpu_check_arguments(struct amdgpu_device *adev)
1113{
5b011235
CZ
1114 if (amdgpu_sched_jobs < 4) {
1115 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1116 amdgpu_sched_jobs);
1117 amdgpu_sched_jobs = 4;
1118 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
1119 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1120 amdgpu_sched_jobs);
1121 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1122 }
d38ceaf9
AD
1123
1124 if (amdgpu_gart_size != -1) {
c4e1a13a 1125 /* gtt size must be greater or equal to 32M */
d38ceaf9
AD
1126 if (amdgpu_gart_size < 32) {
1127 dev_warn(adev->dev, "gart size (%d) too small\n",
1128 amdgpu_gart_size);
1129 amdgpu_gart_size = -1;
d38ceaf9
AD
1130 }
1131 }
1132
83ca145d 1133 amdgpu_check_vm_size(adev);
d38ceaf9 1134
a1adf8be 1135 amdgpu_get_block_size(adev);
6a7f76e7 1136
526bae37 1137 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1138 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
6a7f76e7
CK
1139 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1140 amdgpu_vram_page_split);
1141 amdgpu_vram_page_split = 1024;
1142 }
d38ceaf9
AD
1143}
1144
1145/**
1146 * amdgpu_switcheroo_set_state - set switcheroo state
1147 *
1148 * @pdev: pci dev pointer
1694467b 1149 * @state: vga_switcheroo state
d38ceaf9
AD
1150 *
1151 * Callback for the switcheroo driver. Suspends or resumes the
1152 * the asics before or after it is powered up using ACPI methods.
1153 */
1154static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1155{
1156 struct drm_device *dev = pci_get_drvdata(pdev);
1157
1158 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1159 return;
1160
1161 if (state == VGA_SWITCHEROO_ON) {
1162 unsigned d3_delay = dev->pdev->d3_delay;
1163
7ca85295 1164 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1165 /* don't suspend or resume card normally */
1166 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1167
810ddc3a 1168 amdgpu_device_resume(dev, true, true);
d38ceaf9
AD
1169
1170 dev->pdev->d3_delay = d3_delay;
1171
1172 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1173 drm_kms_helper_poll_enable(dev);
1174 } else {
7ca85295 1175 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1176 drm_kms_helper_poll_disable(dev);
1177 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1178 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1179 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1180 }
1181}
1182
1183/**
1184 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1185 *
1186 * @pdev: pci dev pointer
1187 *
1188 * Callback for the switcheroo driver. Check of the switcheroo
1189 * state can be changed.
1190 * Returns true if the state can be changed, false if not.
1191 */
1192static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1193{
1194 struct drm_device *dev = pci_get_drvdata(pdev);
1195
1196 /*
1197 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1198 * locking inversion with the driver load path. And the access here is
1199 * completely racy anyway. So don't bother with locking for now.
1200 */
1201 return dev->open_count == 0;
1202}
1203
1204static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1205 .set_gpu_state = amdgpu_switcheroo_set_state,
1206 .reprobe = NULL,
1207 .can_switch = amdgpu_switcheroo_can_switch,
1208};
1209
1210int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
5fc3aeeb 1211 enum amd_ip_block_type block_type,
1212 enum amd_clockgating_state state)
d38ceaf9
AD
1213{
1214 int i, r = 0;
1215
1216 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1217 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1218 continue;
c722865a
RZ
1219 if (adev->ip_blocks[i].version->type != block_type)
1220 continue;
1221 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1222 continue;
1223 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1224 (void *)adev, state);
1225 if (r)
1226 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1227 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1228 }
1229 return r;
1230}
1231
1232int amdgpu_set_powergating_state(struct amdgpu_device *adev,
5fc3aeeb 1233 enum amd_ip_block_type block_type,
1234 enum amd_powergating_state state)
d38ceaf9
AD
1235{
1236 int i, r = 0;
1237
1238 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1239 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1240 continue;
c722865a
RZ
1241 if (adev->ip_blocks[i].version->type != block_type)
1242 continue;
1243 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1244 continue;
1245 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1246 (void *)adev, state);
1247 if (r)
1248 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1249 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1250 }
1251 return r;
1252}
1253
6cb2d4e4
HR
1254void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1255{
1256 int i;
1257
1258 for (i = 0; i < adev->num_ip_blocks; i++) {
1259 if (!adev->ip_blocks[i].status.valid)
1260 continue;
1261 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1262 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1263 }
1264}
1265
5dbbb60b
AD
1266int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1267 enum amd_ip_block_type block_type)
1268{
1269 int i, r;
1270
1271 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1272 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1273 continue;
a1255107
AD
1274 if (adev->ip_blocks[i].version->type == block_type) {
1275 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1276 if (r)
1277 return r;
1278 break;
1279 }
1280 }
1281 return 0;
1282
1283}
1284
1285bool amdgpu_is_idle(struct amdgpu_device *adev,
1286 enum amd_ip_block_type block_type)
1287{
1288 int i;
1289
1290 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1291 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1292 continue;
a1255107
AD
1293 if (adev->ip_blocks[i].version->type == block_type)
1294 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1295 }
1296 return true;
1297
1298}
1299
a1255107
AD
1300struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1301 enum amd_ip_block_type type)
d38ceaf9
AD
1302{
1303 int i;
1304
1305 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1306 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1307 return &adev->ip_blocks[i];
1308
1309 return NULL;
1310}
1311
1312/**
1313 * amdgpu_ip_block_version_cmp
1314 *
1315 * @adev: amdgpu_device pointer
5fc3aeeb 1316 * @type: enum amd_ip_block_type
d38ceaf9
AD
1317 * @major: major version
1318 * @minor: minor version
1319 *
1320 * return 0 if equal or greater
1321 * return 1 if smaller or the ip_block doesn't exist
1322 */
1323int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
5fc3aeeb 1324 enum amd_ip_block_type type,
d38ceaf9
AD
1325 u32 major, u32 minor)
1326{
a1255107 1327 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
d38ceaf9 1328
a1255107
AD
1329 if (ip_block && ((ip_block->version->major > major) ||
1330 ((ip_block->version->major == major) &&
1331 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1332 return 0;
1333
1334 return 1;
1335}
1336
a1255107
AD
1337/**
1338 * amdgpu_ip_block_add
1339 *
1340 * @adev: amdgpu_device pointer
1341 * @ip_block_version: pointer to the IP to add
1342 *
1343 * Adds the IP block driver information to the collection of IPs
1344 * on the asic.
1345 */
1346int amdgpu_ip_block_add(struct amdgpu_device *adev,
1347 const struct amdgpu_ip_block_version *ip_block_version)
1348{
1349 if (!ip_block_version)
1350 return -EINVAL;
1351
1352 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1353
1354 return 0;
1355}
1356
483ef985 1357static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1358{
1359 adev->enable_virtual_display = false;
1360
1361 if (amdgpu_virtual_display) {
1362 struct drm_device *ddev = adev->ddev;
1363 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1364 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1365
1366 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1367 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1368 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1369 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1370 if (!strcmp("all", pciaddname)
1371 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1372 long num_crtc;
1373 int res = -1;
1374
9accf2fd 1375 adev->enable_virtual_display = true;
0f66356d
ED
1376
1377 if (pciaddname_tmp)
1378 res = kstrtol(pciaddname_tmp, 10,
1379 &num_crtc);
1380
1381 if (!res) {
1382 if (num_crtc < 1)
1383 num_crtc = 1;
1384 if (num_crtc > 6)
1385 num_crtc = 6;
1386 adev->mode_info.num_crtc = num_crtc;
1387 } else {
1388 adev->mode_info.num_crtc = 1;
1389 }
9accf2fd
ED
1390 break;
1391 }
1392 }
1393
0f66356d
ED
1394 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1395 amdgpu_virtual_display, pci_address_name,
1396 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1397
1398 kfree(pciaddstr);
1399 }
1400}
1401
d38ceaf9
AD
1402static int amdgpu_early_init(struct amdgpu_device *adev)
1403{
aaa36a97 1404 int i, r;
d38ceaf9 1405
483ef985 1406 amdgpu_device_enable_virtual_display(adev);
a6be7570 1407
d38ceaf9 1408 switch (adev->asic_type) {
aaa36a97
AD
1409 case CHIP_TOPAZ:
1410 case CHIP_TONGA:
48299f95 1411 case CHIP_FIJI:
2cc0c0b5
FC
1412 case CHIP_POLARIS11:
1413 case CHIP_POLARIS10:
c4642a47 1414 case CHIP_POLARIS12:
aaa36a97 1415 case CHIP_CARRIZO:
39bb0c92
SL
1416 case CHIP_STONEY:
1417 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1418 adev->family = AMDGPU_FAMILY_CZ;
1419 else
1420 adev->family = AMDGPU_FAMILY_VI;
1421
1422 r = vi_set_ip_blocks(adev);
1423 if (r)
1424 return r;
1425 break;
33f34802
KW
1426#ifdef CONFIG_DRM_AMDGPU_SI
1427 case CHIP_VERDE:
1428 case CHIP_TAHITI:
1429 case CHIP_PITCAIRN:
1430 case CHIP_OLAND:
1431 case CHIP_HAINAN:
295d0daf 1432 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1433 r = si_set_ip_blocks(adev);
1434 if (r)
1435 return r;
1436 break;
1437#endif
a2e73f56
AD
1438#ifdef CONFIG_DRM_AMDGPU_CIK
1439 case CHIP_BONAIRE:
1440 case CHIP_HAWAII:
1441 case CHIP_KAVERI:
1442 case CHIP_KABINI:
1443 case CHIP_MULLINS:
1444 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1445 adev->family = AMDGPU_FAMILY_CI;
1446 else
1447 adev->family = AMDGPU_FAMILY_KV;
1448
1449 r = cik_set_ip_blocks(adev);
1450 if (r)
1451 return r;
1452 break;
1453#endif
460826e6
KW
1454 case CHIP_VEGA10:
1455 adev->family = AMDGPU_FAMILY_AI;
1456
1457 r = soc15_set_ip_blocks(adev);
1458 if (r)
1459 return r;
1460 break;
d38ceaf9
AD
1461 default:
1462 /* FIXME: not supported yet */
1463 return -EINVAL;
1464 }
1465
3149d9da
XY
1466 if (amdgpu_sriov_vf(adev)) {
1467 r = amdgpu_virt_request_full_gpu(adev, true);
1468 if (r)
1469 return r;
1470 }
1471
d38ceaf9
AD
1472 for (i = 0; i < adev->num_ip_blocks; i++) {
1473 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1474 DRM_ERROR("disabled ip block: %d\n", i);
a1255107 1475 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1476 } else {
a1255107
AD
1477 if (adev->ip_blocks[i].version->funcs->early_init) {
1478 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1479 if (r == -ENOENT) {
a1255107 1480 adev->ip_blocks[i].status.valid = false;
2c1a2784 1481 } else if (r) {
a1255107
AD
1482 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1483 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1484 return r;
2c1a2784 1485 } else {
a1255107 1486 adev->ip_blocks[i].status.valid = true;
2c1a2784 1487 }
974e6b64 1488 } else {
a1255107 1489 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1490 }
d38ceaf9
AD
1491 }
1492 }
1493
395d1fb9
NH
1494 adev->cg_flags &= amdgpu_cg_mask;
1495 adev->pg_flags &= amdgpu_pg_mask;
1496
d38ceaf9
AD
1497 return 0;
1498}
1499
1500static int amdgpu_init(struct amdgpu_device *adev)
1501{
1502 int i, r;
1503
1504 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1505 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1506 continue;
a1255107 1507 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1508 if (r) {
a1255107
AD
1509 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1510 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1511 return r;
2c1a2784 1512 }
a1255107 1513 adev->ip_blocks[i].status.sw = true;
d38ceaf9 1514 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1515 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9 1516 r = amdgpu_vram_scratch_init(adev);
2c1a2784
AD
1517 if (r) {
1518 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1519 return r;
2c1a2784 1520 }
a1255107 1521 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1522 if (r) {
1523 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1524 return r;
2c1a2784 1525 }
d38ceaf9 1526 r = amdgpu_wb_init(adev);
2c1a2784
AD
1527 if (r) {
1528 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
d38ceaf9 1529 return r;
2c1a2784 1530 }
a1255107 1531 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1532
1533 /* right after GMC hw init, we create CSA */
1534 if (amdgpu_sriov_vf(adev)) {
1535 r = amdgpu_allocate_static_csa(adev);
1536 if (r) {
1537 DRM_ERROR("allocate CSA failed %d\n", r);
1538 return r;
1539 }
1540 }
d38ceaf9
AD
1541 }
1542 }
1543
1544 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1545 if (!adev->ip_blocks[i].status.sw)
d38ceaf9
AD
1546 continue;
1547 /* gmc hw init is done early */
a1255107 1548 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
d38ceaf9 1549 continue;
a1255107 1550 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1551 if (r) {
a1255107
AD
1552 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1553 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1554 return r;
2c1a2784 1555 }
a1255107 1556 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1557 }
1558
1559 return 0;
1560}
1561
1562static int amdgpu_late_init(struct amdgpu_device *adev)
1563{
1564 int i = 0, r;
1565
1566 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1567 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1568 continue;
a1255107
AD
1569 if (adev->ip_blocks[i].version->funcs->late_init) {
1570 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2c1a2784 1571 if (r) {
a1255107
AD
1572 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1573 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1574 return r;
2c1a2784 1575 }
a1255107 1576 adev->ip_blocks[i].status.late_initialized = true;
d38ceaf9 1577 }
4a446d55 1578 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1579 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1580 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1581 /* enable clockgating to save power */
a1255107
AD
1582 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1583 AMD_CG_STATE_GATE);
4a446d55
AD
1584 if (r) {
1585 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1586 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1587 return r;
1588 }
b0b00ff1 1589 }
d38ceaf9
AD
1590 }
1591
d1aff8ec
TSD
1592 amdgpu_dpm_enable_uvd(adev, false);
1593 amdgpu_dpm_enable_vce(adev, false);
1594
d38ceaf9
AD
1595 return 0;
1596}
1597
1598static int amdgpu_fini(struct amdgpu_device *adev)
1599{
1600 int i, r;
1601
3e96dbfd
AD
1602 /* need to disable SMC first */
1603 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1604 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1605 continue;
a1255107 1606 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1607 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1608 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1609 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1610 if (r) {
1611 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1612 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1613 return r;
1614 }
a1255107 1615 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1616 /* XXX handle errors */
1617 if (r) {
1618 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1619 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1620 }
a1255107 1621 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1622 break;
1623 }
1624 }
1625
d38ceaf9 1626 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1627 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1628 continue;
a1255107 1629 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9
AD
1630 amdgpu_wb_fini(adev);
1631 amdgpu_vram_scratch_fini(adev);
1632 }
8201a67a
RZ
1633
1634 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1635 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1636 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1637 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1638 AMD_CG_STATE_UNGATE);
1639 if (r) {
1640 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1641 adev->ip_blocks[i].version->funcs->name, r);
1642 return r;
1643 }
2c1a2784 1644 }
8201a67a 1645
a1255107 1646 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1647 /* XXX handle errors */
2c1a2784 1648 if (r) {
a1255107
AD
1649 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1650 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1651 }
8201a67a 1652
a1255107 1653 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1654 }
1655
1656 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1657 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1658 continue;
a1255107 1659 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1660 /* XXX handle errors */
2c1a2784 1661 if (r) {
a1255107
AD
1662 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1663 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1664 }
a1255107
AD
1665 adev->ip_blocks[i].status.sw = false;
1666 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1667 }
1668
a6dcfd9c 1669 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1670 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1671 continue;
a1255107
AD
1672 if (adev->ip_blocks[i].version->funcs->late_fini)
1673 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1674 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1675 }
1676
3149d9da 1677 if (amdgpu_sriov_vf(adev)) {
2493664f 1678 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
3149d9da
XY
1679 amdgpu_virt_release_full_gpu(adev, false);
1680 }
2493664f 1681
d38ceaf9
AD
1682 return 0;
1683}
1684
faefba95 1685int amdgpu_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1686{
1687 int i, r;
1688
e941ea99
XY
1689 if (amdgpu_sriov_vf(adev))
1690 amdgpu_virt_request_full_gpu(adev, false);
1691
c5a93a28
FC
1692 /* ungate SMC block first */
1693 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1694 AMD_CG_STATE_UNGATE);
1695 if (r) {
1696 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1697 }
1698
d38ceaf9 1699 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1700 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1701 continue;
1702 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1703 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1704 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1705 AMD_CG_STATE_UNGATE);
c5a93a28 1706 if (r) {
a1255107
AD
1707 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1708 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1709 }
2c1a2784 1710 }
d38ceaf9 1711 /* XXX handle errors */
a1255107 1712 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1713 /* XXX handle errors */
2c1a2784 1714 if (r) {
a1255107
AD
1715 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1716 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1717 }
d38ceaf9
AD
1718 }
1719
e941ea99
XY
1720 if (amdgpu_sriov_vf(adev))
1721 amdgpu_virt_release_full_gpu(adev, false);
1722
d38ceaf9
AD
1723 return 0;
1724}
1725
e4f0fdcc 1726static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
a90ad3c2
ML
1727{
1728 int i, r;
1729
1730 for (i = 0; i < adev->num_ip_blocks; i++) {
1731 if (!adev->ip_blocks[i].status.valid)
1732 continue;
1733
1734 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1735 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1736 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
e4f0fdcc 1737 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
a90ad3c2
ML
1738
1739 if (r) {
1740 DRM_ERROR("resume of IP block <%s> failed %d\n",
1741 adev->ip_blocks[i].version->funcs->name, r);
1742 return r;
1743 }
1744 }
1745
1746 return 0;
1747}
1748
e4f0fdcc 1749static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
a90ad3c2
ML
1750{
1751 int i, r;
1752
1753 for (i = 0; i < adev->num_ip_blocks; i++) {
1754 if (!adev->ip_blocks[i].status.valid)
1755 continue;
1756
1757 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1758 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1759 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1760 continue;
1761
e4f0fdcc 1762 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
a90ad3c2
ML
1763 if (r) {
1764 DRM_ERROR("resume of IP block <%s> failed %d\n",
1765 adev->ip_blocks[i].version->funcs->name, r);
1766 return r;
1767 }
1768 }
1769
1770 return 0;
1771}
1772
d38ceaf9
AD
1773static int amdgpu_resume(struct amdgpu_device *adev)
1774{
1775 int i, r;
1776
1777 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1778 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1779 continue;
a1255107 1780 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 1781 if (r) {
a1255107
AD
1782 DRM_ERROR("resume of IP block <%s> failed %d\n",
1783 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1784 return r;
2c1a2784 1785 }
d38ceaf9
AD
1786 }
1787
1788 return 0;
1789}
1790
4e99a44e 1791static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 1792{
a5bde2f9
AD
1793 if (adev->is_atom_fw) {
1794 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1795 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1796 } else {
1797 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1798 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1799 }
048765ad
AR
1800}
1801
d38ceaf9
AD
1802/**
1803 * amdgpu_device_init - initialize the driver
1804 *
1805 * @adev: amdgpu_device pointer
1806 * @pdev: drm dev pointer
1807 * @pdev: pci dev pointer
1808 * @flags: driver flags
1809 *
1810 * Initializes the driver info and hw (all asics).
1811 * Returns 0 for success or an error on failure.
1812 * Called at driver startup.
1813 */
1814int amdgpu_device_init(struct amdgpu_device *adev,
1815 struct drm_device *ddev,
1816 struct pci_dev *pdev,
1817 uint32_t flags)
1818{
1819 int r, i;
1820 bool runtime = false;
95844d20 1821 u32 max_MBps;
d38ceaf9
AD
1822
1823 adev->shutdown = false;
1824 adev->dev = &pdev->dev;
1825 adev->ddev = ddev;
1826 adev->pdev = pdev;
1827 adev->flags = flags;
2f7d10b3 1828 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9
AD
1829 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1830 adev->mc.gtt_size = 512 * 1024 * 1024;
1831 adev->accel_working = false;
1832 adev->num_rings = 0;
1833 adev->mman.buffer_funcs = NULL;
1834 adev->mman.buffer_funcs_ring = NULL;
1835 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 1836 adev->vm_manager.vm_pte_num_rings = 0;
d38ceaf9 1837 adev->gart.gart_funcs = NULL;
f54d1867 1838 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
d38ceaf9
AD
1839
1840 adev->smc_rreg = &amdgpu_invalid_rreg;
1841 adev->smc_wreg = &amdgpu_invalid_wreg;
1842 adev->pcie_rreg = &amdgpu_invalid_rreg;
1843 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
1844 adev->pciep_rreg = &amdgpu_invalid_rreg;
1845 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1846 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1847 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1848 adev->didt_rreg = &amdgpu_invalid_rreg;
1849 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
1850 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1851 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1852 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1853 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1854
ccdbb20a 1855
3e39ab90
AD
1856 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1857 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1858 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
1859
1860 /* mutex initialization are all done here so we
1861 * can recall function without having locking issues */
8d0a7cea 1862 mutex_init(&adev->vm_manager.lock);
d38ceaf9 1863 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 1864 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
1865 mutex_init(&adev->pm.mutex);
1866 mutex_init(&adev->gfx.gpu_clock_mutex);
1867 mutex_init(&adev->srbm_mutex);
1868 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9
AD
1869 mutex_init(&adev->mn_lock);
1870 hash_init(adev->mn_hash);
1871
1872 amdgpu_check_arguments(adev);
1873
1874 /* Registers mapping */
1875 /* TODO: block userspace mapping of io register */
1876 spin_lock_init(&adev->mmio_idx_lock);
1877 spin_lock_init(&adev->smc_idx_lock);
1878 spin_lock_init(&adev->pcie_idx_lock);
1879 spin_lock_init(&adev->uvd_ctx_idx_lock);
1880 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 1881 spin_lock_init(&adev->gc_cac_idx_lock);
d38ceaf9 1882 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 1883 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 1884
0c4e7fa5
CZ
1885 INIT_LIST_HEAD(&adev->shadow_list);
1886 mutex_init(&adev->shadow_list_lock);
1887
5c1354bd
CZ
1888 INIT_LIST_HEAD(&adev->gtt_list);
1889 spin_lock_init(&adev->gtt_list_lock);
1890
da69c161
KW
1891 if (adev->asic_type >= CHIP_BONAIRE) {
1892 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1893 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1894 } else {
1895 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1896 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1897 }
d38ceaf9 1898
d38ceaf9
AD
1899 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1900 if (adev->rmmio == NULL) {
1901 return -ENOMEM;
1902 }
1903 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1904 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1905
da69c161
KW
1906 if (adev->asic_type >= CHIP_BONAIRE)
1907 /* doorbell bar mapping */
1908 amdgpu_doorbell_init(adev);
d38ceaf9
AD
1909
1910 /* io port mapping */
1911 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1912 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1913 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1914 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1915 break;
1916 }
1917 }
1918 if (adev->rio_mem == NULL)
b64a18c5 1919 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
1920
1921 /* early init functions */
1922 r = amdgpu_early_init(adev);
1923 if (r)
1924 return r;
1925
1926 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1927 /* this will fail for cards that aren't VGA class devices, just
1928 * ignore it */
1929 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1930
1931 if (amdgpu_runtime_pm == 1)
1932 runtime = true;
e9bef455 1933 if (amdgpu_device_is_px(ddev))
d38ceaf9 1934 runtime = true;
84c8b22e
LW
1935 if (!pci_is_thunderbolt_attached(adev->pdev))
1936 vga_switcheroo_register_client(adev->pdev,
1937 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
1938 if (runtime)
1939 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1940
1941 /* Read BIOS */
83ba126a
AD
1942 if (!amdgpu_get_bios(adev)) {
1943 r = -EINVAL;
1944 goto failed;
1945 }
f7e9e9fe 1946
d38ceaf9 1947 r = amdgpu_atombios_init(adev);
2c1a2784
AD
1948 if (r) {
1949 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
83ba126a 1950 goto failed;
2c1a2784 1951 }
d38ceaf9 1952
4e99a44e
ML
1953 /* detect if we are with an SRIOV vbios */
1954 amdgpu_device_detect_sriov_bios(adev);
048765ad 1955
d38ceaf9 1956 /* Post card if necessary */
bec86378 1957 if (amdgpu_vpost_needed(adev)) {
d38ceaf9 1958 if (!adev->bios) {
bec86378 1959 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
1960 r = -EINVAL;
1961 goto failed;
d38ceaf9 1962 }
bec86378 1963 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
1964 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1965 if (r) {
1966 dev_err(adev->dev, "gpu post error!\n");
1967 goto failed;
1968 }
1969 } else {
1970 DRM_INFO("GPU post is not needed\n");
d38ceaf9
AD
1971 }
1972
a5bde2f9
AD
1973 if (!adev->is_atom_fw) {
1974 /* Initialize clocks */
1975 r = amdgpu_atombios_get_clock_info(adev);
1976 if (r) {
1977 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
1978 return r;
1979 }
1980 /* init i2c buses */
1981 amdgpu_atombios_i2c_init(adev);
2c1a2784 1982 }
d38ceaf9
AD
1983
1984 /* Fence driver */
1985 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
1986 if (r) {
1987 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
83ba126a 1988 goto failed;
2c1a2784 1989 }
d38ceaf9
AD
1990
1991 /* init the mode config */
1992 drm_mode_config_init(adev->ddev);
1993
1994 r = amdgpu_init(adev);
1995 if (r) {
2c1a2784 1996 dev_err(adev->dev, "amdgpu_init failed\n");
d38ceaf9 1997 amdgpu_fini(adev);
83ba126a 1998 goto failed;
d38ceaf9
AD
1999 }
2000
2001 adev->accel_working = true;
2002
95844d20
MO
2003 /* Initialize the buffer migration limit. */
2004 if (amdgpu_moverate >= 0)
2005 max_MBps = amdgpu_moverate;
2006 else
2007 max_MBps = 8; /* Allow 8 MB/s. */
2008 /* Get a log2 for easy divisions. */
2009 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2010
d38ceaf9
AD
2011 r = amdgpu_ib_pool_init(adev);
2012 if (r) {
2013 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
83ba126a 2014 goto failed;
d38ceaf9
AD
2015 }
2016
2017 r = amdgpu_ib_ring_tests(adev);
2018 if (r)
2019 DRM_ERROR("ib ring test failed (%d).\n", r);
2020
9bc92b9c
ML
2021 amdgpu_fbdev_init(adev);
2022
d38ceaf9 2023 r = amdgpu_gem_debugfs_init(adev);
3f14e623 2024 if (r)
d38ceaf9 2025 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2026
2027 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2028 if (r)
d38ceaf9 2029 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2030
50ab2533 2031 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2032 if (r)
50ab2533 2033 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2034
d38ceaf9
AD
2035 if ((amdgpu_testing & 1)) {
2036 if (adev->accel_working)
2037 amdgpu_test_moves(adev);
2038 else
2039 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2040 }
d38ceaf9
AD
2041 if (amdgpu_benchmarking) {
2042 if (adev->accel_working)
2043 amdgpu_benchmark(adev, amdgpu_benchmarking);
2044 else
2045 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2046 }
2047
2048 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2049 * explicit gating rather than handling it automatically.
2050 */
2051 r = amdgpu_late_init(adev);
2c1a2784
AD
2052 if (r) {
2053 dev_err(adev->dev, "amdgpu_late_init failed\n");
83ba126a 2054 goto failed;
2c1a2784 2055 }
d38ceaf9
AD
2056
2057 return 0;
83ba126a
AD
2058
2059failed:
2060 if (runtime)
2061 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2062 return r;
d38ceaf9
AD
2063}
2064
d38ceaf9
AD
2065/**
2066 * amdgpu_device_fini - tear down the driver
2067 *
2068 * @adev: amdgpu_device pointer
2069 *
2070 * Tear down the driver info (all asics).
2071 * Called at driver shutdown.
2072 */
2073void amdgpu_device_fini(struct amdgpu_device *adev)
2074{
2075 int r;
2076
2077 DRM_INFO("amdgpu: finishing device.\n");
2078 adev->shutdown = true;
a951ed85 2079 drm_crtc_force_disable_all(adev->ddev);
d38ceaf9
AD
2080 /* evict vram memory */
2081 amdgpu_bo_evict_vram(adev);
2082 amdgpu_ib_pool_fini(adev);
2083 amdgpu_fence_driver_fini(adev);
2084 amdgpu_fbdev_fini(adev);
2085 r = amdgpu_fini(adev);
d38ceaf9
AD
2086 adev->accel_working = false;
2087 /* free i2c buses */
2088 amdgpu_i2c_fini(adev);
2089 amdgpu_atombios_fini(adev);
2090 kfree(adev->bios);
2091 adev->bios = NULL;
84c8b22e
LW
2092 if (!pci_is_thunderbolt_attached(adev->pdev))
2093 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2094 if (adev->flags & AMD_IS_PX)
2095 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2096 vga_client_register(adev->pdev, NULL, NULL, NULL);
2097 if (adev->rio_mem)
2098 pci_iounmap(adev->pdev, adev->rio_mem);
2099 adev->rio_mem = NULL;
2100 iounmap(adev->rmmio);
2101 adev->rmmio = NULL;
da69c161
KW
2102 if (adev->asic_type >= CHIP_BONAIRE)
2103 amdgpu_doorbell_fini(adev);
d38ceaf9 2104 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2105}
2106
2107
2108/*
2109 * Suspend & resume.
2110 */
2111/**
810ddc3a 2112 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2113 *
2114 * @pdev: drm dev pointer
2115 * @state: suspend state
2116 *
2117 * Puts the hw in the suspend state (all asics).
2118 * Returns 0 for success or an error on failure.
2119 * Called at driver suspend.
2120 */
810ddc3a 2121int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2122{
2123 struct amdgpu_device *adev;
2124 struct drm_crtc *crtc;
2125 struct drm_connector *connector;
5ceb54c6 2126 int r;
d38ceaf9
AD
2127
2128 if (dev == NULL || dev->dev_private == NULL) {
2129 return -ENODEV;
2130 }
2131
2132 adev = dev->dev_private;
2133
2134 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2135 return 0;
2136
2137 drm_kms_helper_poll_disable(dev);
2138
2139 /* turn off display hw */
4c7fbc39 2140 drm_modeset_lock_all(dev);
d38ceaf9
AD
2141 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2142 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2143 }
4c7fbc39 2144 drm_modeset_unlock_all(dev);
d38ceaf9 2145
756e6880 2146 /* unpin the front buffers and cursors */
d38ceaf9 2147 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2148 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2149 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2150 struct amdgpu_bo *robj;
2151
756e6880
AD
2152 if (amdgpu_crtc->cursor_bo) {
2153 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2154 r = amdgpu_bo_reserve(aobj, false);
2155 if (r == 0) {
2156 amdgpu_bo_unpin(aobj);
2157 amdgpu_bo_unreserve(aobj);
2158 }
2159 }
2160
d38ceaf9
AD
2161 if (rfb == NULL || rfb->obj == NULL) {
2162 continue;
2163 }
2164 robj = gem_to_amdgpu_bo(rfb->obj);
2165 /* don't unpin kernel fb objects */
2166 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2167 r = amdgpu_bo_reserve(robj, false);
2168 if (r == 0) {
2169 amdgpu_bo_unpin(robj);
2170 amdgpu_bo_unreserve(robj);
2171 }
2172 }
2173 }
2174 /* evict vram memory */
2175 amdgpu_bo_evict_vram(adev);
2176
5ceb54c6 2177 amdgpu_fence_driver_suspend(adev);
d38ceaf9
AD
2178
2179 r = amdgpu_suspend(adev);
2180
a0a71e49
AD
2181 /* evict remaining vram memory
2182 * This second call to evict vram is to evict the gart page table
2183 * using the CPU.
2184 */
d38ceaf9
AD
2185 amdgpu_bo_evict_vram(adev);
2186
be34d3bf
AD
2187 if (adev->is_atom_fw)
2188 amdgpu_atomfirmware_scratch_regs_save(adev);
2189 else
2190 amdgpu_atombios_scratch_regs_save(adev);
d38ceaf9
AD
2191 pci_save_state(dev->pdev);
2192 if (suspend) {
2193 /* Shut down the device */
2194 pci_disable_device(dev->pdev);
2195 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2196 } else {
2197 r = amdgpu_asic_reset(adev);
2198 if (r)
2199 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2200 }
2201
2202 if (fbcon) {
2203 console_lock();
2204 amdgpu_fbdev_set_suspend(adev, 1);
2205 console_unlock();
2206 }
2207 return 0;
2208}
2209
2210/**
810ddc3a 2211 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2212 *
2213 * @pdev: drm dev pointer
2214 *
2215 * Bring the hw back to operating state (all asics).
2216 * Returns 0 for success or an error on failure.
2217 * Called at driver resume.
2218 */
810ddc3a 2219int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2220{
2221 struct drm_connector *connector;
2222 struct amdgpu_device *adev = dev->dev_private;
756e6880 2223 struct drm_crtc *crtc;
d38ceaf9
AD
2224 int r;
2225
2226 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2227 return 0;
2228
74b0b157 2229 if (fbcon)
d38ceaf9 2230 console_lock();
74b0b157 2231
d38ceaf9
AD
2232 if (resume) {
2233 pci_set_power_state(dev->pdev, PCI_D0);
2234 pci_restore_state(dev->pdev);
74b0b157 2235 r = pci_enable_device(dev->pdev);
2236 if (r) {
d38ceaf9
AD
2237 if (fbcon)
2238 console_unlock();
74b0b157 2239 return r;
d38ceaf9
AD
2240 }
2241 }
be34d3bf
AD
2242 if (adev->is_atom_fw)
2243 amdgpu_atomfirmware_scratch_regs_restore(adev);
2244 else
2245 amdgpu_atombios_scratch_regs_restore(adev);
d38ceaf9
AD
2246
2247 /* post card */
c836fec5 2248 if (amdgpu_need_post(adev)) {
74b0b157 2249 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2250 if (r)
2251 DRM_ERROR("amdgpu asic init failed\n");
2252 }
d38ceaf9
AD
2253
2254 r = amdgpu_resume(adev);
e6707218 2255 if (r) {
ca198528 2256 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
e6707218
RZ
2257 return r;
2258 }
5ceb54c6
AD
2259 amdgpu_fence_driver_resume(adev);
2260
ca198528
FC
2261 if (resume) {
2262 r = amdgpu_ib_ring_tests(adev);
2263 if (r)
2264 DRM_ERROR("ib ring test failed (%d).\n", r);
2265 }
d38ceaf9
AD
2266
2267 r = amdgpu_late_init(adev);
c085bd51
JQ
2268 if (r) {
2269 if (fbcon)
2270 console_unlock();
d38ceaf9 2271 return r;
c085bd51 2272 }
d38ceaf9 2273
756e6880
AD
2274 /* pin cursors */
2275 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2276 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2277
2278 if (amdgpu_crtc->cursor_bo) {
2279 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2280 r = amdgpu_bo_reserve(aobj, false);
2281 if (r == 0) {
2282 r = amdgpu_bo_pin(aobj,
2283 AMDGPU_GEM_DOMAIN_VRAM,
2284 &amdgpu_crtc->cursor_addr);
2285 if (r != 0)
2286 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2287 amdgpu_bo_unreserve(aobj);
2288 }
2289 }
2290 }
2291
d38ceaf9
AD
2292 /* blat the mode back in */
2293 if (fbcon) {
2294 drm_helper_resume_force_mode(dev);
2295 /* turn on display hw */
4c7fbc39 2296 drm_modeset_lock_all(dev);
d38ceaf9
AD
2297 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2298 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2299 }
4c7fbc39 2300 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2301 }
2302
2303 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2304
2305 /*
2306 * Most of the connector probing functions try to acquire runtime pm
2307 * refs to ensure that the GPU is powered on when connector polling is
2308 * performed. Since we're calling this from a runtime PM callback,
2309 * trying to acquire rpm refs will cause us to deadlock.
2310 *
2311 * Since we're guaranteed to be holding the rpm lock, it's safe to
2312 * temporarily disable the rpm helpers so this doesn't deadlock us.
2313 */
2314#ifdef CONFIG_PM
2315 dev->dev->power.disable_depth++;
2316#endif
54fb2a5c 2317 drm_helper_hpd_irq_event(dev);
23a1a9e5
L
2318#ifdef CONFIG_PM
2319 dev->dev->power.disable_depth--;
2320#endif
d38ceaf9
AD
2321
2322 if (fbcon) {
2323 amdgpu_fbdev_set_suspend(adev, 0);
2324 console_unlock();
2325 }
2326
2327 return 0;
2328}
2329
63fbf42f
CZ
2330static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2331{
2332 int i;
2333 bool asic_hang = false;
2334
2335 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2336 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2337 continue;
a1255107
AD
2338 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2339 adev->ip_blocks[i].status.hang =
2340 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2341 if (adev->ip_blocks[i].status.hang) {
2342 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2343 asic_hang = true;
2344 }
2345 }
2346 return asic_hang;
2347}
2348
4d446656 2349static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2350{
2351 int i, r = 0;
2352
2353 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2354 if (!adev->ip_blocks[i].status.valid)
d31a501e 2355 continue;
a1255107
AD
2356 if (adev->ip_blocks[i].status.hang &&
2357 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2358 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2359 if (r)
2360 return r;
2361 }
2362 }
2363
2364 return 0;
2365}
2366
35d782fe
CZ
2367static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2368{
da146d3b
AD
2369 int i;
2370
2371 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2372 if (!adev->ip_blocks[i].status.valid)
da146d3b 2373 continue;
a1255107
AD
2374 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2375 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2376 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2377 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2378 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2379 DRM_INFO("Some block need full reset!\n");
2380 return true;
2381 }
2382 }
35d782fe
CZ
2383 }
2384 return false;
2385}
2386
2387static int amdgpu_soft_reset(struct amdgpu_device *adev)
2388{
2389 int i, r = 0;
2390
2391 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2392 if (!adev->ip_blocks[i].status.valid)
35d782fe 2393 continue;
a1255107
AD
2394 if (adev->ip_blocks[i].status.hang &&
2395 adev->ip_blocks[i].version->funcs->soft_reset) {
2396 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2397 if (r)
2398 return r;
2399 }
2400 }
2401
2402 return 0;
2403}
2404
2405static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2406{
2407 int i, r = 0;
2408
2409 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2410 if (!adev->ip_blocks[i].status.valid)
35d782fe 2411 continue;
a1255107
AD
2412 if (adev->ip_blocks[i].status.hang &&
2413 adev->ip_blocks[i].version->funcs->post_soft_reset)
2414 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2415 if (r)
2416 return r;
2417 }
2418
2419 return 0;
2420}
2421
3ad81f16
CZ
2422bool amdgpu_need_backup(struct amdgpu_device *adev)
2423{
2424 if (adev->flags & AMD_IS_APU)
2425 return false;
2426
2427 return amdgpu_lockup_timeout > 0 ? true : false;
2428}
2429
53cdccd5
CZ
2430static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2431 struct amdgpu_ring *ring,
2432 struct amdgpu_bo *bo,
f54d1867 2433 struct dma_fence **fence)
53cdccd5
CZ
2434{
2435 uint32_t domain;
2436 int r;
2437
2438 if (!bo->shadow)
2439 return 0;
2440
2441 r = amdgpu_bo_reserve(bo, false);
2442 if (r)
2443 return r;
2444 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2445 /* if bo has been evicted, then no need to recover */
2446 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2447 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2448 NULL, fence, true);
2449 if (r) {
2450 DRM_ERROR("recover page table failed!\n");
2451 goto err;
2452 }
2453 }
2454err:
2455 amdgpu_bo_unreserve(bo);
2456 return r;
2457}
2458
a90ad3c2
ML
2459/**
2460 * amdgpu_sriov_gpu_reset - reset the asic
2461 *
2462 * @adev: amdgpu device pointer
2463 * @voluntary: if this reset is requested by guest.
2464 * (true means by guest and false means by HYPERVISOR )
2465 *
2466 * Attempt the reset the GPU if it has hung (all asics).
2467 * for SRIOV case.
2468 * Returns 0 for success or an error on failure.
2469 */
2470int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
2471{
2472 int i, r = 0;
2473 int resched;
2474 struct amdgpu_bo *bo, *tmp;
2475 struct amdgpu_ring *ring;
2476 struct dma_fence *fence = NULL, *next = NULL;
2477
147b5983 2478 mutex_lock(&adev->virt.lock_reset);
a90ad3c2 2479 atomic_inc(&adev->gpu_reset_counter);
1fb37a3d 2480 adev->gfx.in_reset = true;
a90ad3c2
ML
2481
2482 /* block TTM */
2483 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2484
2485 /* block scheduler */
2486 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2487 ring = adev->rings[i];
2488
2489 if (!ring || !ring->sched.thread)
2490 continue;
2491
2492 kthread_park(ring->sched.thread);
2493 amd_sched_hw_job_reset(&ring->sched);
2494 }
2495
2496 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2497 amdgpu_fence_driver_force_completion(adev);
2498
2499 /* request to take full control of GPU before re-initialization */
2500 if (voluntary)
2501 amdgpu_virt_reset_gpu(adev);
2502 else
2503 amdgpu_virt_request_full_gpu(adev, true);
2504
2505
2506 /* Resume IP prior to SMC */
e4f0fdcc 2507 amdgpu_sriov_reinit_early(adev);
a90ad3c2
ML
2508
2509 /* we need recover gart prior to run SMC/CP/SDMA resume */
2510 amdgpu_ttm_recover_gart(adev);
2511
2512 /* now we are okay to resume SMC/CP/SDMA */
e4f0fdcc 2513 amdgpu_sriov_reinit_late(adev);
a90ad3c2
ML
2514
2515 amdgpu_irq_gpu_reset_resume_helper(adev);
2516
2517 if (amdgpu_ib_ring_tests(adev))
2518 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2519
2520 /* release full control of GPU after ib test */
2521 amdgpu_virt_release_full_gpu(adev, true);
2522
2523 DRM_INFO("recover vram bo from shadow\n");
2524
2525 ring = adev->mman.buffer_funcs_ring;
2526 mutex_lock(&adev->shadow_list_lock);
2527 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2528 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2529 if (fence) {
2530 r = dma_fence_wait(fence, false);
2531 if (r) {
2532 WARN(r, "recovery from shadow isn't completed\n");
2533 break;
2534 }
2535 }
2536
2537 dma_fence_put(fence);
2538 fence = next;
2539 }
2540 mutex_unlock(&adev->shadow_list_lock);
2541
2542 if (fence) {
2543 r = dma_fence_wait(fence, false);
2544 if (r)
2545 WARN(r, "recovery from shadow isn't completed\n");
2546 }
2547 dma_fence_put(fence);
2548
2549 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2550 struct amdgpu_ring *ring = adev->rings[i];
2551 if (!ring || !ring->sched.thread)
2552 continue;
2553
2554 amd_sched_job_recovery(&ring->sched);
2555 kthread_unpark(ring->sched.thread);
2556 }
2557
2558 drm_helper_resume_force_mode(adev->ddev);
2559 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2560 if (r) {
2561 /* bad news, how to tell it to userspace ? */
2562 dev_info(adev->dev, "GPU reset failed\n");
2563 }
2564
1fb37a3d 2565 adev->gfx.in_reset = false;
147b5983 2566 mutex_unlock(&adev->virt.lock_reset);
a90ad3c2
ML
2567 return r;
2568}
2569
d38ceaf9
AD
2570/**
2571 * amdgpu_gpu_reset - reset the asic
2572 *
2573 * @adev: amdgpu device pointer
2574 *
2575 * Attempt the reset the GPU if it has hung (all asics).
2576 * Returns 0 for success or an error on failure.
2577 */
2578int amdgpu_gpu_reset(struct amdgpu_device *adev)
2579{
d38ceaf9
AD
2580 int i, r;
2581 int resched;
35d782fe 2582 bool need_full_reset;
d38ceaf9 2583
fb140b29 2584 if (amdgpu_sriov_vf(adev))
a90ad3c2 2585 return amdgpu_sriov_gpu_reset(adev, true);
fb140b29 2586
63fbf42f
CZ
2587 if (!amdgpu_check_soft_reset(adev)) {
2588 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2589 return 0;
2590 }
d38ceaf9 2591
d94aed5a 2592 atomic_inc(&adev->gpu_reset_counter);
d38ceaf9 2593
a3c47d6b
CZ
2594 /* block TTM */
2595 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2596
0875dc9e
CZ
2597 /* block scheduler */
2598 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2599 struct amdgpu_ring *ring = adev->rings[i];
2600
2601 if (!ring)
2602 continue;
2603 kthread_park(ring->sched.thread);
aa1c8900 2604 amd_sched_hw_job_reset(&ring->sched);
0875dc9e 2605 }
2200edac
CZ
2606 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2607 amdgpu_fence_driver_force_completion(adev);
d38ceaf9 2608
35d782fe 2609 need_full_reset = amdgpu_need_full_reset(adev);
d38ceaf9 2610
35d782fe
CZ
2611 if (!need_full_reset) {
2612 amdgpu_pre_soft_reset(adev);
2613 r = amdgpu_soft_reset(adev);
2614 amdgpu_post_soft_reset(adev);
2615 if (r || amdgpu_check_soft_reset(adev)) {
2616 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2617 need_full_reset = true;
2618 }
f1aa7e08
CZ
2619 }
2620
35d782fe 2621 if (need_full_reset) {
35d782fe 2622 r = amdgpu_suspend(adev);
bfa99269 2623
35d782fe
CZ
2624retry:
2625 /* Disable fb access */
2626 if (adev->mode_info.num_crtc) {
2627 struct amdgpu_mode_mc_save save;
2628 amdgpu_display_stop_mc_access(adev, &save);
2629 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2630 }
be34d3bf
AD
2631 if (adev->is_atom_fw)
2632 amdgpu_atomfirmware_scratch_regs_save(adev);
2633 else
2634 amdgpu_atombios_scratch_regs_save(adev);
35d782fe 2635 r = amdgpu_asic_reset(adev);
be34d3bf
AD
2636 if (adev->is_atom_fw)
2637 amdgpu_atomfirmware_scratch_regs_restore(adev);
2638 else
2639 amdgpu_atombios_scratch_regs_restore(adev);
35d782fe
CZ
2640 /* post card */
2641 amdgpu_atom_asic_init(adev->mode_info.atom_context);
2642
2643 if (!r) {
2644 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2645 r = amdgpu_resume(adev);
2646 }
d38ceaf9 2647 }
d38ceaf9 2648 if (!r) {
e72cfd58 2649 amdgpu_irq_gpu_reset_resume_helper(adev);
2c0d7318
CZ
2650 if (need_full_reset && amdgpu_need_backup(adev)) {
2651 r = amdgpu_ttm_recover_gart(adev);
2652 if (r)
2653 DRM_ERROR("gart recovery failed!!!\n");
2654 }
1f465087
CZ
2655 r = amdgpu_ib_ring_tests(adev);
2656 if (r) {
2657 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
40019dc4 2658 r = amdgpu_suspend(adev);
53cdccd5 2659 need_full_reset = true;
40019dc4 2660 goto retry;
1f465087 2661 }
53cdccd5
CZ
2662 /**
2663 * recovery vm page tables, since we cannot depend on VRAM is
2664 * consistent after gpu full reset.
2665 */
2666 if (need_full_reset && amdgpu_need_backup(adev)) {
2667 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2668 struct amdgpu_bo *bo, *tmp;
f54d1867 2669 struct dma_fence *fence = NULL, *next = NULL;
53cdccd5
CZ
2670
2671 DRM_INFO("recover vram bo from shadow\n");
2672 mutex_lock(&adev->shadow_list_lock);
2673 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2674 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2675 if (fence) {
f54d1867 2676 r = dma_fence_wait(fence, false);
53cdccd5 2677 if (r) {
1d7b17b0 2678 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5
CZ
2679 break;
2680 }
2681 }
1f465087 2682
f54d1867 2683 dma_fence_put(fence);
53cdccd5
CZ
2684 fence = next;
2685 }
2686 mutex_unlock(&adev->shadow_list_lock);
2687 if (fence) {
f54d1867 2688 r = dma_fence_wait(fence, false);
53cdccd5 2689 if (r)
1d7b17b0 2690 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5 2691 }
f54d1867 2692 dma_fence_put(fence);
53cdccd5 2693 }
d38ceaf9
AD
2694 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2695 struct amdgpu_ring *ring = adev->rings[i];
2696 if (!ring)
2697 continue;
53cdccd5 2698
aa1c8900 2699 amd_sched_job_recovery(&ring->sched);
0875dc9e 2700 kthread_unpark(ring->sched.thread);
d38ceaf9 2701 }
d38ceaf9 2702 } else {
2200edac 2703 dev_err(adev->dev, "asic resume failed (%d).\n", r);
d38ceaf9 2704 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
0875dc9e
CZ
2705 if (adev->rings[i]) {
2706 kthread_unpark(adev->rings[i]->sched.thread);
0875dc9e 2707 }
d38ceaf9
AD
2708 }
2709 }
2710
2711 drm_helper_resume_force_mode(adev->ddev);
2712
2713 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2714 if (r) {
2715 /* bad news, how to tell it to userspace ? */
2716 dev_info(adev->dev, "GPU reset failed\n");
2717 }
2718
d38ceaf9
AD
2719 return r;
2720}
2721
d0dd7f0c
AD
2722void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2723{
2724 u32 mask;
2725 int ret;
2726
cd474ba0
AD
2727 if (amdgpu_pcie_gen_cap)
2728 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 2729
cd474ba0
AD
2730 if (amdgpu_pcie_lane_cap)
2731 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 2732
cd474ba0
AD
2733 /* covers APUs as well */
2734 if (pci_is_root_bus(adev->pdev->bus)) {
2735 if (adev->pm.pcie_gen_mask == 0)
2736 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2737 if (adev->pm.pcie_mlw_mask == 0)
2738 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 2739 return;
cd474ba0 2740 }
d0dd7f0c 2741
cd474ba0
AD
2742 if (adev->pm.pcie_gen_mask == 0) {
2743 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2744 if (!ret) {
2745 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2746 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2747 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2748
2749 if (mask & DRM_PCIE_SPEED_25)
2750 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2751 if (mask & DRM_PCIE_SPEED_50)
2752 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2753 if (mask & DRM_PCIE_SPEED_80)
2754 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2755 } else {
2756 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2757 }
2758 }
2759 if (adev->pm.pcie_mlw_mask == 0) {
2760 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2761 if (!ret) {
2762 switch (mask) {
2763 case 32:
2764 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2765 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2766 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2767 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2768 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2769 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2770 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2771 break;
2772 case 16:
2773 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2774 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2775 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2776 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2777 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2778 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2779 break;
2780 case 12:
2781 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2782 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2783 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2784 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2785 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2786 break;
2787 case 8:
2788 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2789 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2790 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2791 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2792 break;
2793 case 4:
2794 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2795 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2796 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2797 break;
2798 case 2:
2799 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2800 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2801 break;
2802 case 1:
2803 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2804 break;
2805 default:
2806 break;
2807 }
2808 } else {
2809 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
2810 }
2811 }
2812}
d38ceaf9
AD
2813
2814/*
2815 * Debugfs
2816 */
2817int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
06ab6832 2818 const struct drm_info_list *files,
d38ceaf9
AD
2819 unsigned nfiles)
2820{
2821 unsigned i;
2822
2823 for (i = 0; i < adev->debugfs_count; i++) {
2824 if (adev->debugfs[i].files == files) {
2825 /* Already registered */
2826 return 0;
2827 }
2828 }
2829
2830 i = adev->debugfs_count + 1;
2831 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2832 DRM_ERROR("Reached maximum number of debugfs components.\n");
2833 DRM_ERROR("Report so we increase "
2834 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2835 return -EINVAL;
2836 }
2837 adev->debugfs[adev->debugfs_count].files = files;
2838 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2839 adev->debugfs_count = i;
2840#if defined(CONFIG_DEBUG_FS)
d38ceaf9
AD
2841 drm_debugfs_create_files(files, nfiles,
2842 adev->ddev->primary->debugfs_root,
2843 adev->ddev->primary);
2844#endif
2845 return 0;
2846}
2847
d38ceaf9
AD
2848#if defined(CONFIG_DEBUG_FS)
2849
2850static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2851 size_t size, loff_t *pos)
2852{
45063097 2853 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
2854 ssize_t result = 0;
2855 int r;
bd12267d 2856 bool pm_pg_lock, use_bank;
56628159 2857 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
2858
2859 if (size & 0x3 || *pos & 0x3)
2860 return -EINVAL;
2861
bd12267d
TSD
2862 /* are we reading registers for which a PG lock is necessary? */
2863 pm_pg_lock = (*pos >> 23) & 1;
2864
56628159
TSD
2865 if (*pos & (1ULL << 62)) {
2866 se_bank = (*pos >> 24) & 0x3FF;
2867 sh_bank = (*pos >> 34) & 0x3FF;
2868 instance_bank = (*pos >> 44) & 0x3FF;
32977f93
TSD
2869
2870 if (se_bank == 0x3FF)
2871 se_bank = 0xFFFFFFFF;
2872 if (sh_bank == 0x3FF)
2873 sh_bank = 0xFFFFFFFF;
2874 if (instance_bank == 0x3FF)
2875 instance_bank = 0xFFFFFFFF;
56628159 2876 use_bank = 1;
56628159
TSD
2877 } else {
2878 use_bank = 0;
2879 }
2880
801a6aa9 2881 *pos &= (1UL << 22) - 1;
bd12267d 2882
56628159 2883 if (use_bank) {
32977f93
TSD
2884 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2885 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
56628159
TSD
2886 return -EINVAL;
2887 mutex_lock(&adev->grbm_idx_mutex);
2888 amdgpu_gfx_select_se_sh(adev, se_bank,
2889 sh_bank, instance_bank);
2890 }
2891
bd12267d
TSD
2892 if (pm_pg_lock)
2893 mutex_lock(&adev->pm.mutex);
2894
d38ceaf9
AD
2895 while (size) {
2896 uint32_t value;
2897
2898 if (*pos > adev->rmmio_size)
56628159 2899 goto end;
d38ceaf9
AD
2900
2901 value = RREG32(*pos >> 2);
2902 r = put_user(value, (uint32_t *)buf);
56628159
TSD
2903 if (r) {
2904 result = r;
2905 goto end;
2906 }
d38ceaf9
AD
2907
2908 result += 4;
2909 buf += 4;
2910 *pos += 4;
2911 size -= 4;
2912 }
2913
56628159
TSD
2914end:
2915 if (use_bank) {
2916 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2917 mutex_unlock(&adev->grbm_idx_mutex);
2918 }
2919
bd12267d
TSD
2920 if (pm_pg_lock)
2921 mutex_unlock(&adev->pm.mutex);
2922
d38ceaf9
AD
2923 return result;
2924}
2925
2926static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2927 size_t size, loff_t *pos)
2928{
45063097 2929 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
2930 ssize_t result = 0;
2931 int r;
394fdde2
TSD
2932 bool pm_pg_lock, use_bank;
2933 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
2934
2935 if (size & 0x3 || *pos & 0x3)
2936 return -EINVAL;
2937
394fdde2
TSD
2938 /* are we reading registers for which a PG lock is necessary? */
2939 pm_pg_lock = (*pos >> 23) & 1;
2940
2941 if (*pos & (1ULL << 62)) {
2942 se_bank = (*pos >> 24) & 0x3FF;
2943 sh_bank = (*pos >> 34) & 0x3FF;
2944 instance_bank = (*pos >> 44) & 0x3FF;
2945
2946 if (se_bank == 0x3FF)
2947 se_bank = 0xFFFFFFFF;
2948 if (sh_bank == 0x3FF)
2949 sh_bank = 0xFFFFFFFF;
2950 if (instance_bank == 0x3FF)
2951 instance_bank = 0xFFFFFFFF;
2952 use_bank = 1;
2953 } else {
2954 use_bank = 0;
2955 }
2956
801a6aa9 2957 *pos &= (1UL << 22) - 1;
394fdde2
TSD
2958
2959 if (use_bank) {
2960 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2961 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2962 return -EINVAL;
2963 mutex_lock(&adev->grbm_idx_mutex);
2964 amdgpu_gfx_select_se_sh(adev, se_bank,
2965 sh_bank, instance_bank);
2966 }
2967
2968 if (pm_pg_lock)
2969 mutex_lock(&adev->pm.mutex);
2970
d38ceaf9
AD
2971 while (size) {
2972 uint32_t value;
2973
2974 if (*pos > adev->rmmio_size)
2975 return result;
2976
2977 r = get_user(value, (uint32_t *)buf);
2978 if (r)
2979 return r;
2980
2981 WREG32(*pos >> 2, value);
2982
2983 result += 4;
2984 buf += 4;
2985 *pos += 4;
2986 size -= 4;
2987 }
2988
394fdde2
TSD
2989 if (use_bank) {
2990 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2991 mutex_unlock(&adev->grbm_idx_mutex);
2992 }
2993
2994 if (pm_pg_lock)
2995 mutex_unlock(&adev->pm.mutex);
2996
d38ceaf9
AD
2997 return result;
2998}
2999
adcec288
TSD
3000static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3001 size_t size, loff_t *pos)
3002{
45063097 3003 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3004 ssize_t result = 0;
3005 int r;
3006
3007 if (size & 0x3 || *pos & 0x3)
3008 return -EINVAL;
3009
3010 while (size) {
3011 uint32_t value;
3012
3013 value = RREG32_PCIE(*pos >> 2);
3014 r = put_user(value, (uint32_t *)buf);
3015 if (r)
3016 return r;
3017
3018 result += 4;
3019 buf += 4;
3020 *pos += 4;
3021 size -= 4;
3022 }
3023
3024 return result;
3025}
3026
3027static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3028 size_t size, loff_t *pos)
3029{
45063097 3030 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3031 ssize_t result = 0;
3032 int r;
3033
3034 if (size & 0x3 || *pos & 0x3)
3035 return -EINVAL;
3036
3037 while (size) {
3038 uint32_t value;
3039
3040 r = get_user(value, (uint32_t *)buf);
3041 if (r)
3042 return r;
3043
3044 WREG32_PCIE(*pos >> 2, value);
3045
3046 result += 4;
3047 buf += 4;
3048 *pos += 4;
3049 size -= 4;
3050 }
3051
3052 return result;
3053}
3054
3055static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3056 size_t size, loff_t *pos)
3057{
45063097 3058 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3059 ssize_t result = 0;
3060 int r;
3061
3062 if (size & 0x3 || *pos & 0x3)
3063 return -EINVAL;
3064
3065 while (size) {
3066 uint32_t value;
3067
3068 value = RREG32_DIDT(*pos >> 2);
3069 r = put_user(value, (uint32_t *)buf);
3070 if (r)
3071 return r;
3072
3073 result += 4;
3074 buf += 4;
3075 *pos += 4;
3076 size -= 4;
3077 }
3078
3079 return result;
3080}
3081
3082static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3083 size_t size, loff_t *pos)
3084{
45063097 3085 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3086 ssize_t result = 0;
3087 int r;
3088
3089 if (size & 0x3 || *pos & 0x3)
3090 return -EINVAL;
3091
3092 while (size) {
3093 uint32_t value;
3094
3095 r = get_user(value, (uint32_t *)buf);
3096 if (r)
3097 return r;
3098
3099 WREG32_DIDT(*pos >> 2, value);
3100
3101 result += 4;
3102 buf += 4;
3103 *pos += 4;
3104 size -= 4;
3105 }
3106
3107 return result;
3108}
3109
3110static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3111 size_t size, loff_t *pos)
3112{
45063097 3113 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3114 ssize_t result = 0;
3115 int r;
3116
3117 if (size & 0x3 || *pos & 0x3)
3118 return -EINVAL;
3119
3120 while (size) {
3121 uint32_t value;
3122
6fc0deaf 3123 value = RREG32_SMC(*pos);
adcec288
TSD
3124 r = put_user(value, (uint32_t *)buf);
3125 if (r)
3126 return r;
3127
3128 result += 4;
3129 buf += 4;
3130 *pos += 4;
3131 size -= 4;
3132 }
3133
3134 return result;
3135}
3136
3137static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3138 size_t size, loff_t *pos)
3139{
45063097 3140 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3141 ssize_t result = 0;
3142 int r;
3143
3144 if (size & 0x3 || *pos & 0x3)
3145 return -EINVAL;
3146
3147 while (size) {
3148 uint32_t value;
3149
3150 r = get_user(value, (uint32_t *)buf);
3151 if (r)
3152 return r;
3153
6fc0deaf 3154 WREG32_SMC(*pos, value);
adcec288
TSD
3155
3156 result += 4;
3157 buf += 4;
3158 *pos += 4;
3159 size -= 4;
3160 }
3161
3162 return result;
3163}
3164
1e051413
TSD
3165static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3166 size_t size, loff_t *pos)
3167{
45063097 3168 struct amdgpu_device *adev = file_inode(f)->i_private;
1e051413
TSD
3169 ssize_t result = 0;
3170 int r;
3171 uint32_t *config, no_regs = 0;
3172
3173 if (size & 0x3 || *pos & 0x3)
3174 return -EINVAL;
3175
ecab7668 3176 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
1e051413
TSD
3177 if (!config)
3178 return -ENOMEM;
3179
3180 /* version, increment each time something is added */
9a999359 3181 config[no_regs++] = 3;
1e051413
TSD
3182 config[no_regs++] = adev->gfx.config.max_shader_engines;
3183 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3184 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3185 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3186 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3187 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3188 config[no_regs++] = adev->gfx.config.max_gprs;
3189 config[no_regs++] = adev->gfx.config.max_gs_threads;
3190 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3191 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3192 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3193 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3194 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3195 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3196 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3197 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3198 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3199 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3200 config[no_regs++] = adev->gfx.config.num_gpus;
3201 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3202 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3203 config[no_regs++] = adev->gfx.config.gb_addr_config;
3204 config[no_regs++] = adev->gfx.config.num_rbs;
3205
89a8f309
TSD
3206 /* rev==1 */
3207 config[no_regs++] = adev->rev_id;
3208 config[no_regs++] = adev->pg_flags;
3209 config[no_regs++] = adev->cg_flags;
3210
e9f11dc8
TSD
3211 /* rev==2 */
3212 config[no_regs++] = adev->family;
3213 config[no_regs++] = adev->external_rev_id;
3214
9a999359
TSD
3215 /* rev==3 */
3216 config[no_regs++] = adev->pdev->device;
3217 config[no_regs++] = adev->pdev->revision;
3218 config[no_regs++] = adev->pdev->subsystem_device;
3219 config[no_regs++] = adev->pdev->subsystem_vendor;
3220
1e051413
TSD
3221 while (size && (*pos < no_regs * 4)) {
3222 uint32_t value;
3223
3224 value = config[*pos >> 2];
3225 r = put_user(value, (uint32_t *)buf);
3226 if (r) {
3227 kfree(config);
3228 return r;
3229 }
3230
3231 result += 4;
3232 buf += 4;
3233 *pos += 4;
3234 size -= 4;
3235 }
3236
3237 kfree(config);
3238 return result;
3239}
3240
f2cdaf20
TSD
3241static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3242 size_t size, loff_t *pos)
3243{
45063097 3244 struct amdgpu_device *adev = file_inode(f)->i_private;
9f8df7d7
TSD
3245 int idx, x, outsize, r, valuesize;
3246 uint32_t values[16];
f2cdaf20 3247
9f8df7d7 3248 if (size & 3 || *pos & 0x3)
f2cdaf20
TSD
3249 return -EINVAL;
3250
3cbc614f
SP
3251 if (amdgpu_dpm == 0)
3252 return -EINVAL;
3253
f2cdaf20
TSD
3254 /* convert offset to sensor number */
3255 idx = *pos >> 2;
3256
9f8df7d7 3257 valuesize = sizeof(values);
f2cdaf20 3258 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
9f8df7d7 3259 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
3cbc614f
SP
3260 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3261 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3262 &valuesize);
f2cdaf20
TSD
3263 else
3264 return -EINVAL;
3265
9f8df7d7
TSD
3266 if (size > valuesize)
3267 return -EINVAL;
3268
3269 outsize = 0;
3270 x = 0;
3271 if (!r) {
3272 while (size) {
3273 r = put_user(values[x++], (int32_t *)buf);
3274 buf += 4;
3275 size -= 4;
3276 outsize += 4;
3277 }
3278 }
f2cdaf20 3279
9f8df7d7 3280 return !r ? outsize : r;
f2cdaf20 3281}
1e051413 3282
273d7aa1
TSD
3283static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3284 size_t size, loff_t *pos)
3285{
3286 struct amdgpu_device *adev = f->f_inode->i_private;
3287 int r, x;
3288 ssize_t result=0;
472259f0 3289 uint32_t offset, se, sh, cu, wave, simd, data[32];
273d7aa1
TSD
3290
3291 if (size & 3 || *pos & 3)
3292 return -EINVAL;
3293
3294 /* decode offset */
3295 offset = (*pos & 0x7F);
3296 se = ((*pos >> 7) & 0xFF);
3297 sh = ((*pos >> 15) & 0xFF);
3298 cu = ((*pos >> 23) & 0xFF);
3299 wave = ((*pos >> 31) & 0xFF);
3300 simd = ((*pos >> 37) & 0xFF);
273d7aa1
TSD
3301
3302 /* switch to the specific se/sh/cu */
3303 mutex_lock(&adev->grbm_idx_mutex);
3304 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3305
3306 x = 0;
472259f0
TSD
3307 if (adev->gfx.funcs->read_wave_data)
3308 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
273d7aa1
TSD
3309
3310 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3311 mutex_unlock(&adev->grbm_idx_mutex);
3312
5ecfb3b8
TSD
3313 if (!x)
3314 return -EINVAL;
3315
472259f0 3316 while (size && (offset < x * 4)) {
273d7aa1
TSD
3317 uint32_t value;
3318
472259f0 3319 value = data[offset >> 2];
273d7aa1
TSD
3320 r = put_user(value, (uint32_t *)buf);
3321 if (r)
3322 return r;
3323
3324 result += 4;
3325 buf += 4;
472259f0 3326 offset += 4;
273d7aa1
TSD
3327 size -= 4;
3328 }
3329
3330 return result;
3331}
3332
c5a60ce8
TSD
3333static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3334 size_t size, loff_t *pos)
3335{
3336 struct amdgpu_device *adev = f->f_inode->i_private;
3337 int r;
3338 ssize_t result = 0;
3339 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3340
3341 if (size & 3 || *pos & 3)
3342 return -EINVAL;
3343
3344 /* decode offset */
3345 offset = (*pos & 0xFFF); /* in dwords */
3346 se = ((*pos >> 12) & 0xFF);
3347 sh = ((*pos >> 20) & 0xFF);
3348 cu = ((*pos >> 28) & 0xFF);
3349 wave = ((*pos >> 36) & 0xFF);
3350 simd = ((*pos >> 44) & 0xFF);
3351 thread = ((*pos >> 52) & 0xFF);
3352 bank = ((*pos >> 60) & 1);
3353
3354 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3355 if (!data)
3356 return -ENOMEM;
3357
3358 /* switch to the specific se/sh/cu */
3359 mutex_lock(&adev->grbm_idx_mutex);
3360 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3361
3362 if (bank == 0) {
3363 if (adev->gfx.funcs->read_wave_vgprs)
3364 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3365 } else {
3366 if (adev->gfx.funcs->read_wave_sgprs)
3367 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3368 }
3369
3370 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3371 mutex_unlock(&adev->grbm_idx_mutex);
3372
3373 while (size) {
3374 uint32_t value;
3375
3376 value = data[offset++];
3377 r = put_user(value, (uint32_t *)buf);
3378 if (r) {
3379 result = r;
3380 goto err;
3381 }
3382
3383 result += 4;
3384 buf += 4;
3385 size -= 4;
3386 }
3387
3388err:
3389 kfree(data);
3390 return result;
3391}
3392
d38ceaf9
AD
3393static const struct file_operations amdgpu_debugfs_regs_fops = {
3394 .owner = THIS_MODULE,
3395 .read = amdgpu_debugfs_regs_read,
3396 .write = amdgpu_debugfs_regs_write,
3397 .llseek = default_llseek
3398};
adcec288
TSD
3399static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3400 .owner = THIS_MODULE,
3401 .read = amdgpu_debugfs_regs_didt_read,
3402 .write = amdgpu_debugfs_regs_didt_write,
3403 .llseek = default_llseek
3404};
3405static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3406 .owner = THIS_MODULE,
3407 .read = amdgpu_debugfs_regs_pcie_read,
3408 .write = amdgpu_debugfs_regs_pcie_write,
3409 .llseek = default_llseek
3410};
3411static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3412 .owner = THIS_MODULE,
3413 .read = amdgpu_debugfs_regs_smc_read,
3414 .write = amdgpu_debugfs_regs_smc_write,
3415 .llseek = default_llseek
3416};
3417
1e051413
TSD
3418static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3419 .owner = THIS_MODULE,
3420 .read = amdgpu_debugfs_gca_config_read,
3421 .llseek = default_llseek
3422};
3423
f2cdaf20
TSD
3424static const struct file_operations amdgpu_debugfs_sensors_fops = {
3425 .owner = THIS_MODULE,
3426 .read = amdgpu_debugfs_sensor_read,
3427 .llseek = default_llseek
3428};
3429
273d7aa1
TSD
3430static const struct file_operations amdgpu_debugfs_wave_fops = {
3431 .owner = THIS_MODULE,
3432 .read = amdgpu_debugfs_wave_read,
3433 .llseek = default_llseek
3434};
c5a60ce8
TSD
3435static const struct file_operations amdgpu_debugfs_gpr_fops = {
3436 .owner = THIS_MODULE,
3437 .read = amdgpu_debugfs_gpr_read,
3438 .llseek = default_llseek
3439};
273d7aa1 3440
adcec288
TSD
3441static const struct file_operations *debugfs_regs[] = {
3442 &amdgpu_debugfs_regs_fops,
3443 &amdgpu_debugfs_regs_didt_fops,
3444 &amdgpu_debugfs_regs_pcie_fops,
3445 &amdgpu_debugfs_regs_smc_fops,
1e051413 3446 &amdgpu_debugfs_gca_config_fops,
f2cdaf20 3447 &amdgpu_debugfs_sensors_fops,
273d7aa1 3448 &amdgpu_debugfs_wave_fops,
c5a60ce8 3449 &amdgpu_debugfs_gpr_fops,
adcec288
TSD
3450};
3451
3452static const char *debugfs_regs_names[] = {
3453 "amdgpu_regs",
3454 "amdgpu_regs_didt",
3455 "amdgpu_regs_pcie",
3456 "amdgpu_regs_smc",
1e051413 3457 "amdgpu_gca_config",
f2cdaf20 3458 "amdgpu_sensors",
273d7aa1 3459 "amdgpu_wave",
c5a60ce8 3460 "amdgpu_gpr",
adcec288 3461};
d38ceaf9
AD
3462
3463static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3464{
3465 struct drm_minor *minor = adev->ddev->primary;
3466 struct dentry *ent, *root = minor->debugfs_root;
adcec288
TSD
3467 unsigned i, j;
3468
3469 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3470 ent = debugfs_create_file(debugfs_regs_names[i],
3471 S_IFREG | S_IRUGO, root,
3472 adev, debugfs_regs[i]);
3473 if (IS_ERR(ent)) {
3474 for (j = 0; j < i; j++) {
3475 debugfs_remove(adev->debugfs_regs[i]);
3476 adev->debugfs_regs[i] = NULL;
3477 }
3478 return PTR_ERR(ent);
3479 }
d38ceaf9 3480
adcec288
TSD
3481 if (!i)
3482 i_size_write(ent->d_inode, adev->rmmio_size);
3483 adev->debugfs_regs[i] = ent;
3484 }
d38ceaf9
AD
3485
3486 return 0;
3487}
3488
3489static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3490{
adcec288
TSD
3491 unsigned i;
3492
3493 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3494 if (adev->debugfs_regs[i]) {
3495 debugfs_remove(adev->debugfs_regs[i]);
3496 adev->debugfs_regs[i] = NULL;
3497 }
3498 }
d38ceaf9
AD
3499}
3500
3501int amdgpu_debugfs_init(struct drm_minor *minor)
3502{
3503 return 0;
3504}
7cebc728
AK
3505#else
3506static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3507{
3508 return 0;
3509}
3510static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
d38ceaf9 3511#endif