drm/radeon: force the UVD DPB into VRAM as well
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
f4b373f4 39#include "amdgpu_trace.h"
d38ceaf9
AD
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
a5bde2f9 43#include "amdgpu_atomfirmware.h"
d0dd7f0c 44#include "amd_pcie.h"
33f34802
KW
45#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
a2e73f56
AD
48#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
aaa36a97 51#include "vi.h"
460826e6 52#include "soc15.h"
d38ceaf9 53#include "bif/bif_4_1_d.h"
9accf2fd 54#include <linux/pci.h>
bec86378 55#include <linux/firmware.h>
d1aff8ec 56#include "amdgpu_pm.h"
d38ceaf9
AD
57
58static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
59static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
60
61static const char *amdgpu_asic_name[] = {
da69c161
KW
62 "TAHITI",
63 "PITCAIRN",
64 "VERDE",
65 "OLAND",
66 "HAINAN",
d38ceaf9
AD
67 "BONAIRE",
68 "KAVERI",
69 "KABINI",
70 "HAWAII",
71 "MULLINS",
72 "TOPAZ",
73 "TONGA",
48299f95 74 "FIJI",
d38ceaf9 75 "CARRIZO",
139f4917 76 "STONEY",
2cc0c0b5
FC
77 "POLARIS10",
78 "POLARIS11",
c4642a47 79 "POLARIS12",
d4196f01 80 "VEGA10",
d38ceaf9
AD
81 "LAST",
82};
83
84bool amdgpu_device_is_px(struct drm_device *dev)
85{
86 struct amdgpu_device *adev = dev->dev_private;
87
2f7d10b3 88 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
89 return true;
90 return false;
91}
92
93/*
94 * MMIO register access helper functions.
95 */
96uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 97 uint32_t acc_flags)
d38ceaf9 98{
f4b373f4
TSD
99 uint32_t ret;
100
15d72fd7 101 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
bc992ba5
XY
102 BUG_ON(in_interrupt());
103 return amdgpu_virt_kiq_rreg(adev, reg);
104 }
105
15d72fd7 106 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 107 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
108 else {
109 unsigned long flags;
d38ceaf9
AD
110
111 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
112 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
113 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
114 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 115 }
f4b373f4
TSD
116 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
117 return ret;
d38ceaf9
AD
118}
119
120void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 121 uint32_t acc_flags)
d38ceaf9 122{
f4b373f4 123 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 124
15d72fd7 125 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
bc992ba5
XY
126 BUG_ON(in_interrupt());
127 return amdgpu_virt_kiq_wreg(adev, reg, v);
128 }
129
15d72fd7 130 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
131 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
132 else {
133 unsigned long flags;
134
135 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
136 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
137 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
138 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
139 }
140}
141
142u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
143{
144 if ((reg * 4) < adev->rio_mem_size)
145 return ioread32(adev->rio_mem + (reg * 4));
146 else {
147 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
148 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
149 }
150}
151
152void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
153{
154
155 if ((reg * 4) < adev->rio_mem_size)
156 iowrite32(v, adev->rio_mem + (reg * 4));
157 else {
158 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
159 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
160 }
161}
162
163/**
164 * amdgpu_mm_rdoorbell - read a doorbell dword
165 *
166 * @adev: amdgpu_device pointer
167 * @index: doorbell index
168 *
169 * Returns the value in the doorbell aperture at the
170 * requested doorbell index (CIK).
171 */
172u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
173{
174 if (index < adev->doorbell.num_doorbells) {
175 return readl(adev->doorbell.ptr + index);
176 } else {
177 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
178 return 0;
179 }
180}
181
182/**
183 * amdgpu_mm_wdoorbell - write a doorbell dword
184 *
185 * @adev: amdgpu_device pointer
186 * @index: doorbell index
187 * @v: value to write
188 *
189 * Writes @v to the doorbell aperture at the
190 * requested doorbell index (CIK).
191 */
192void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
193{
194 if (index < adev->doorbell.num_doorbells) {
195 writel(v, adev->doorbell.ptr + index);
196 } else {
197 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
198 }
199}
200
832be404
KW
201/**
202 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
203 *
204 * @adev: amdgpu_device pointer
205 * @index: doorbell index
206 *
207 * Returns the value in the doorbell aperture at the
208 * requested doorbell index (VEGA10+).
209 */
210u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
211{
212 if (index < adev->doorbell.num_doorbells) {
213 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
214 } else {
215 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
216 return 0;
217 }
218}
219
220/**
221 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
222 *
223 * @adev: amdgpu_device pointer
224 * @index: doorbell index
225 * @v: value to write
226 *
227 * Writes @v to the doorbell aperture at the
228 * requested doorbell index (VEGA10+).
229 */
230void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
231{
232 if (index < adev->doorbell.num_doorbells) {
233 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
234 } else {
235 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
236 }
237}
238
d38ceaf9
AD
239/**
240 * amdgpu_invalid_rreg - dummy reg read function
241 *
242 * @adev: amdgpu device pointer
243 * @reg: offset of register
244 *
245 * Dummy register read function. Used for register blocks
246 * that certain asics don't have (all asics).
247 * Returns the value in the register.
248 */
249static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
250{
251 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
252 BUG();
253 return 0;
254}
255
256/**
257 * amdgpu_invalid_wreg - dummy reg write function
258 *
259 * @adev: amdgpu device pointer
260 * @reg: offset of register
261 * @v: value to write to the register
262 *
263 * Dummy register read function. Used for register blocks
264 * that certain asics don't have (all asics).
265 */
266static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
267{
268 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
269 reg, v);
270 BUG();
271}
272
273/**
274 * amdgpu_block_invalid_rreg - dummy reg read function
275 *
276 * @adev: amdgpu device pointer
277 * @block: offset of instance
278 * @reg: offset of register
279 *
280 * Dummy register read function. Used for register blocks
281 * that certain asics don't have (all asics).
282 * Returns the value in the register.
283 */
284static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
285 uint32_t block, uint32_t reg)
286{
287 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
288 reg, block);
289 BUG();
290 return 0;
291}
292
293/**
294 * amdgpu_block_invalid_wreg - dummy reg write function
295 *
296 * @adev: amdgpu device pointer
297 * @block: offset of instance
298 * @reg: offset of register
299 * @v: value to write to the register
300 *
301 * Dummy register read function. Used for register blocks
302 * that certain asics don't have (all asics).
303 */
304static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
305 uint32_t block,
306 uint32_t reg, uint32_t v)
307{
308 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
309 reg, block, v);
310 BUG();
311}
312
313static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
314{
315 int r;
316
317 if (adev->vram_scratch.robj == NULL) {
318 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
857d913d 319 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
03f48dd5
CK
320 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
321 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
72d7668b 322 NULL, NULL, &adev->vram_scratch.robj);
d38ceaf9
AD
323 if (r) {
324 return r;
325 }
326 }
327
328 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
329 if (unlikely(r != 0))
330 return r;
331 r = amdgpu_bo_pin(adev->vram_scratch.robj,
332 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
333 if (r) {
334 amdgpu_bo_unreserve(adev->vram_scratch.robj);
335 return r;
336 }
337 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
338 (void **)&adev->vram_scratch.ptr);
339 if (r)
340 amdgpu_bo_unpin(adev->vram_scratch.robj);
341 amdgpu_bo_unreserve(adev->vram_scratch.robj);
342
343 return r;
344}
345
346static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
347{
348 int r;
349
350 if (adev->vram_scratch.robj == NULL) {
351 return;
352 }
353 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
354 if (likely(r == 0)) {
355 amdgpu_bo_kunmap(adev->vram_scratch.robj);
356 amdgpu_bo_unpin(adev->vram_scratch.robj);
357 amdgpu_bo_unreserve(adev->vram_scratch.robj);
358 }
359 amdgpu_bo_unref(&adev->vram_scratch.robj);
360}
361
362/**
363 * amdgpu_program_register_sequence - program an array of registers.
364 *
365 * @adev: amdgpu_device pointer
366 * @registers: pointer to the register array
367 * @array_size: size of the register array
368 *
369 * Programs an array or registers with and and or masks.
370 * This is a helper for setting golden registers.
371 */
372void amdgpu_program_register_sequence(struct amdgpu_device *adev,
373 const u32 *registers,
374 const u32 array_size)
375{
376 u32 tmp, reg, and_mask, or_mask;
377 int i;
378
379 if (array_size % 3)
380 return;
381
382 for (i = 0; i < array_size; i +=3) {
383 reg = registers[i + 0];
384 and_mask = registers[i + 1];
385 or_mask = registers[i + 2];
386
387 if (and_mask == 0xffffffff) {
388 tmp = or_mask;
389 } else {
390 tmp = RREG32(reg);
391 tmp &= ~and_mask;
392 tmp |= or_mask;
393 }
394 WREG32(reg, tmp);
395 }
396}
397
398void amdgpu_pci_config_reset(struct amdgpu_device *adev)
399{
400 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
401}
402
403/*
404 * GPU doorbell aperture helpers function.
405 */
406/**
407 * amdgpu_doorbell_init - Init doorbell driver information.
408 *
409 * @adev: amdgpu_device pointer
410 *
411 * Init doorbell driver information (CIK)
412 * Returns 0 on success, error on failure.
413 */
414static int amdgpu_doorbell_init(struct amdgpu_device *adev)
415{
416 /* doorbell bar mapping */
417 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
418 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
419
edf600da 420 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
421 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
422 if (adev->doorbell.num_doorbells == 0)
423 return -EINVAL;
424
425 adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
426 if (adev->doorbell.ptr == NULL) {
427 return -ENOMEM;
428 }
429 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
430 DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
431
432 return 0;
433}
434
435/**
436 * amdgpu_doorbell_fini - Tear down doorbell driver information.
437 *
438 * @adev: amdgpu_device pointer
439 *
440 * Tear down doorbell driver information (CIK)
441 */
442static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
443{
444 iounmap(adev->doorbell.ptr);
445 adev->doorbell.ptr = NULL;
446}
447
448/**
449 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
450 * setup amdkfd
451 *
452 * @adev: amdgpu_device pointer
453 * @aperture_base: output returning doorbell aperture base physical address
454 * @aperture_size: output returning doorbell aperture size in bytes
455 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
456 *
457 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
458 * takes doorbells required for its own rings and reports the setup to amdkfd.
459 * amdgpu reserved doorbells are at the start of the doorbell aperture.
460 */
461void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
462 phys_addr_t *aperture_base,
463 size_t *aperture_size,
464 size_t *start_offset)
465{
466 /*
467 * The first num_doorbells are used by amdgpu.
468 * amdkfd takes whatever's left in the aperture.
469 */
470 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
471 *aperture_base = adev->doorbell.base;
472 *aperture_size = adev->doorbell.size;
473 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
474 } else {
475 *aperture_base = 0;
476 *aperture_size = 0;
477 *start_offset = 0;
478 }
479}
480
481/*
482 * amdgpu_wb_*()
483 * Writeback is the the method by which the the GPU updates special pages
484 * in memory with the status of certain GPU events (fences, ring pointers,
485 * etc.).
486 */
487
488/**
489 * amdgpu_wb_fini - Disable Writeback and free memory
490 *
491 * @adev: amdgpu_device pointer
492 *
493 * Disables Writeback and frees the Writeback memory (all asics).
494 * Used at driver shutdown.
495 */
496static void amdgpu_wb_fini(struct amdgpu_device *adev)
497{
498 if (adev->wb.wb_obj) {
a76ed485
AD
499 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
500 &adev->wb.gpu_addr,
501 (void **)&adev->wb.wb);
d38ceaf9
AD
502 adev->wb.wb_obj = NULL;
503 }
504}
505
506/**
507 * amdgpu_wb_init- Init Writeback driver info and allocate memory
508 *
509 * @adev: amdgpu_device pointer
510 *
511 * Disables Writeback and frees the Writeback memory (all asics).
512 * Used at driver startup.
513 * Returns 0 on success or an -error on failure.
514 */
515static int amdgpu_wb_init(struct amdgpu_device *adev)
516{
517 int r;
518
519 if (adev->wb.wb_obj == NULL) {
60a970a6 520 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
a76ed485
AD
521 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
522 &adev->wb.wb_obj, &adev->wb.gpu_addr,
523 (void **)&adev->wb.wb);
d38ceaf9
AD
524 if (r) {
525 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
526 return r;
527 }
d38ceaf9
AD
528
529 adev->wb.num_wb = AMDGPU_MAX_WB;
530 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
531
532 /* clear wb memory */
60a970a6 533 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
d38ceaf9
AD
534 }
535
536 return 0;
537}
538
539/**
540 * amdgpu_wb_get - Allocate a wb entry
541 *
542 * @adev: amdgpu_device pointer
543 * @wb: wb index
544 *
545 * Allocate a wb slot for use by the driver (all asics).
546 * Returns 0 on success or -EINVAL on failure.
547 */
548int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
549{
550 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
551 if (offset < adev->wb.num_wb) {
552 __set_bit(offset, adev->wb.used);
553 *wb = offset;
554 return 0;
555 } else {
556 return -EINVAL;
557 }
558}
559
7014285a
KW
560/**
561 * amdgpu_wb_get_64bit - Allocate a wb entry
562 *
563 * @adev: amdgpu_device pointer
564 * @wb: wb index
565 *
566 * Allocate a wb slot for use by the driver (all asics).
567 * Returns 0 on success or -EINVAL on failure.
568 */
569int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
570{
571 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
572 adev->wb.num_wb, 0, 2, 7, 0);
573 if ((offset + 1) < adev->wb.num_wb) {
574 __set_bit(offset, adev->wb.used);
575 __set_bit(offset + 1, adev->wb.used);
576 *wb = offset;
577 return 0;
578 } else {
579 return -EINVAL;
580 }
581}
582
d38ceaf9
AD
583/**
584 * amdgpu_wb_free - Free a wb entry
585 *
586 * @adev: amdgpu_device pointer
587 * @wb: wb index
588 *
589 * Free a wb slot allocated for use by the driver (all asics)
590 */
591void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
592{
593 if (wb < adev->wb.num_wb)
594 __clear_bit(wb, adev->wb.used);
595}
596
7014285a
KW
597/**
598 * amdgpu_wb_free_64bit - Free a wb entry
599 *
600 * @adev: amdgpu_device pointer
601 * @wb: wb index
602 *
603 * Free a wb slot allocated for use by the driver (all asics)
604 */
605void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
606{
607 if ((wb + 1) < adev->wb.num_wb) {
608 __clear_bit(wb, adev->wb.used);
609 __clear_bit(wb + 1, adev->wb.used);
610 }
611}
612
d38ceaf9
AD
613/**
614 * amdgpu_vram_location - try to find VRAM location
615 * @adev: amdgpu device structure holding all necessary informations
616 * @mc: memory controller structure holding memory informations
617 * @base: base address at which to put VRAM
618 *
619 * Function will place try to place VRAM at base address provided
620 * as parameter (which is so far either PCI aperture address or
621 * for IGP TOM base address).
622 *
623 * If there is not enough space to fit the unvisible VRAM in the 32bits
624 * address space then we limit the VRAM size to the aperture.
625 *
626 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
627 * this shouldn't be a problem as we are using the PCI aperture as a reference.
628 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
629 * not IGP.
630 *
631 * Note: we use mc_vram_size as on some board we need to program the mc to
632 * cover the whole aperture even if VRAM size is inferior to aperture size
633 * Novell bug 204882 + along with lots of ubuntu ones
634 *
635 * Note: when limiting vram it's safe to overwritte real_vram_size because
636 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
637 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
638 * ones)
639 *
640 * Note: IGP TOM addr should be the same as the aperture addr, we don't
641 * explicitly check for that thought.
642 *
643 * FIXME: when reducing VRAM size align new size on power of 2.
644 */
645void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
646{
647 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
648
649 mc->vram_start = base;
650 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
651 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
652 mc->real_vram_size = mc->aper_size;
653 mc->mc_vram_size = mc->aper_size;
654 }
655 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
656 if (limit && limit < mc->real_vram_size)
657 mc->real_vram_size = limit;
658 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
659 mc->mc_vram_size >> 20, mc->vram_start,
660 mc->vram_end, mc->real_vram_size >> 20);
661}
662
663/**
664 * amdgpu_gtt_location - try to find GTT location
665 * @adev: amdgpu device structure holding all necessary informations
666 * @mc: memory controller structure holding memory informations
667 *
668 * Function will place try to place GTT before or after VRAM.
669 *
670 * If GTT size is bigger than space left then we ajust GTT size.
671 * Thus function will never fails.
672 *
673 * FIXME: when reducing GTT size align new size on power of 2.
674 */
675void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
676{
677 u64 size_af, size_bf;
678
679 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
680 size_bf = mc->vram_start & ~mc->gtt_base_align;
681 if (size_bf > size_af) {
682 if (mc->gtt_size > size_bf) {
683 dev_warn(adev->dev, "limiting GTT\n");
684 mc->gtt_size = size_bf;
685 }
9dc5a91e 686 mc->gtt_start = 0;
d38ceaf9
AD
687 } else {
688 if (mc->gtt_size > size_af) {
689 dev_warn(adev->dev, "limiting GTT\n");
690 mc->gtt_size = size_af;
691 }
692 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
693 }
694 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
695 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
696 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
697}
698
699/*
700 * GPU helpers function.
701 */
702/**
c836fec5 703 * amdgpu_need_post - check if the hw need post or not
d38ceaf9
AD
704 *
705 * @adev: amdgpu_device pointer
706 *
c836fec5
JQ
707 * Check if the asic has been initialized (all asics) at driver startup
708 * or post is needed if hw reset is performed.
709 * Returns true if need or false if not.
d38ceaf9 710 */
c836fec5 711bool amdgpu_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
712{
713 uint32_t reg;
714
c836fec5
JQ
715 if (adev->has_hw_reset) {
716 adev->has_hw_reset = false;
717 return true;
718 }
d38ceaf9 719 /* then check MEM_SIZE, in case the crtcs are off */
bbf282d8 720 reg = amdgpu_asic_get_config_memsize(adev);
d38ceaf9 721
f2713e8c 722 if ((reg != 0) && (reg != 0xffffffff))
c836fec5 723 return false;
d38ceaf9 724
c836fec5 725 return true;
d38ceaf9
AD
726
727}
728
bec86378
ML
729static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
730{
731 if (amdgpu_sriov_vf(adev))
732 return false;
733
734 if (amdgpu_passthrough(adev)) {
1da2c326
ML
735 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
736 * some old smc fw still need driver do vPost otherwise gpu hang, while
737 * those smc fw version above 22.15 doesn't have this flaw, so we force
738 * vpost executed for smc version below 22.15
bec86378
ML
739 */
740 if (adev->asic_type == CHIP_FIJI) {
741 int err;
742 uint32_t fw_ver;
743 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
744 /* force vPost if error occured */
745 if (err)
746 return true;
747
748 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
749 if (fw_ver < 0x00160e00)
750 return true;
bec86378 751 }
bec86378 752 }
c836fec5 753 return amdgpu_need_post(adev);
bec86378
ML
754}
755
d38ceaf9
AD
756/**
757 * amdgpu_dummy_page_init - init dummy page used by the driver
758 *
759 * @adev: amdgpu_device pointer
760 *
761 * Allocate the dummy page used by the driver (all asics).
762 * This dummy page is used by the driver as a filler for gart entries
763 * when pages are taken out of the GART
764 * Returns 0 on sucess, -ENOMEM on failure.
765 */
766int amdgpu_dummy_page_init(struct amdgpu_device *adev)
767{
768 if (adev->dummy_page.page)
769 return 0;
770 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
771 if (adev->dummy_page.page == NULL)
772 return -ENOMEM;
773 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
774 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
775 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
776 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
777 __free_page(adev->dummy_page.page);
778 adev->dummy_page.page = NULL;
779 return -ENOMEM;
780 }
781 return 0;
782}
783
784/**
785 * amdgpu_dummy_page_fini - free dummy page used by the driver
786 *
787 * @adev: amdgpu_device pointer
788 *
789 * Frees the dummy page used by the driver (all asics).
790 */
791void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
792{
793 if (adev->dummy_page.page == NULL)
794 return;
795 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
796 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
797 __free_page(adev->dummy_page.page);
798 adev->dummy_page.page = NULL;
799}
800
801
802/* ATOM accessor methods */
803/*
804 * ATOM is an interpreted byte code stored in tables in the vbios. The
805 * driver registers callbacks to access registers and the interpreter
806 * in the driver parses the tables and executes then to program specific
807 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
808 * atombios.h, and atom.c
809 */
810
811/**
812 * cail_pll_read - read PLL register
813 *
814 * @info: atom card_info pointer
815 * @reg: PLL register offset
816 *
817 * Provides a PLL register accessor for the atom interpreter (r4xx+).
818 * Returns the value of the PLL register.
819 */
820static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
821{
822 return 0;
823}
824
825/**
826 * cail_pll_write - write PLL register
827 *
828 * @info: atom card_info pointer
829 * @reg: PLL register offset
830 * @val: value to write to the pll register
831 *
832 * Provides a PLL register accessor for the atom interpreter (r4xx+).
833 */
834static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
835{
836
837}
838
839/**
840 * cail_mc_read - read MC (Memory Controller) register
841 *
842 * @info: atom card_info pointer
843 * @reg: MC register offset
844 *
845 * Provides an MC register accessor for the atom interpreter (r4xx+).
846 * Returns the value of the MC register.
847 */
848static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
849{
850 return 0;
851}
852
853/**
854 * cail_mc_write - write MC (Memory Controller) register
855 *
856 * @info: atom card_info pointer
857 * @reg: MC register offset
858 * @val: value to write to the pll register
859 *
860 * Provides a MC register accessor for the atom interpreter (r4xx+).
861 */
862static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
863{
864
865}
866
867/**
868 * cail_reg_write - write MMIO register
869 *
870 * @info: atom card_info pointer
871 * @reg: MMIO register offset
872 * @val: value to write to the pll register
873 *
874 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
875 */
876static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
877{
878 struct amdgpu_device *adev = info->dev->dev_private;
879
880 WREG32(reg, val);
881}
882
883/**
884 * cail_reg_read - read MMIO register
885 *
886 * @info: atom card_info pointer
887 * @reg: MMIO register offset
888 *
889 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
890 * Returns the value of the MMIO register.
891 */
892static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
893{
894 struct amdgpu_device *adev = info->dev->dev_private;
895 uint32_t r;
896
897 r = RREG32(reg);
898 return r;
899}
900
901/**
902 * cail_ioreg_write - write IO register
903 *
904 * @info: atom card_info pointer
905 * @reg: IO register offset
906 * @val: value to write to the pll register
907 *
908 * Provides a IO register accessor for the atom interpreter (r4xx+).
909 */
910static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
911{
912 struct amdgpu_device *adev = info->dev->dev_private;
913
914 WREG32_IO(reg, val);
915}
916
917/**
918 * cail_ioreg_read - read IO register
919 *
920 * @info: atom card_info pointer
921 * @reg: IO register offset
922 *
923 * Provides an IO register accessor for the atom interpreter (r4xx+).
924 * Returns the value of the IO register.
925 */
926static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
927{
928 struct amdgpu_device *adev = info->dev->dev_private;
929 uint32_t r;
930
931 r = RREG32_IO(reg);
932 return r;
933}
934
935/**
936 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
937 *
938 * @adev: amdgpu_device pointer
939 *
940 * Frees the driver info and register access callbacks for the ATOM
941 * interpreter (r4xx+).
942 * Called at driver shutdown.
943 */
944static void amdgpu_atombios_fini(struct amdgpu_device *adev)
945{
89e0ec9f 946 if (adev->mode_info.atom_context) {
d38ceaf9 947 kfree(adev->mode_info.atom_context->scratch);
89e0ec9f
ML
948 kfree(adev->mode_info.atom_context->iio);
949 }
d38ceaf9
AD
950 kfree(adev->mode_info.atom_context);
951 adev->mode_info.atom_context = NULL;
952 kfree(adev->mode_info.atom_card_info);
953 adev->mode_info.atom_card_info = NULL;
954}
955
956/**
957 * amdgpu_atombios_init - init the driver info and callbacks for atombios
958 *
959 * @adev: amdgpu_device pointer
960 *
961 * Initializes the driver info and register access callbacks for the
962 * ATOM interpreter (r4xx+).
963 * Returns 0 on sucess, -ENOMEM on failure.
964 * Called at driver startup.
965 */
966static int amdgpu_atombios_init(struct amdgpu_device *adev)
967{
968 struct card_info *atom_card_info =
969 kzalloc(sizeof(struct card_info), GFP_KERNEL);
970
971 if (!atom_card_info)
972 return -ENOMEM;
973
974 adev->mode_info.atom_card_info = atom_card_info;
975 atom_card_info->dev = adev->ddev;
976 atom_card_info->reg_read = cail_reg_read;
977 atom_card_info->reg_write = cail_reg_write;
978 /* needed for iio ops */
979 if (adev->rio_mem) {
980 atom_card_info->ioreg_read = cail_ioreg_read;
981 atom_card_info->ioreg_write = cail_ioreg_write;
982 } else {
b64a18c5 983 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
d38ceaf9
AD
984 atom_card_info->ioreg_read = cail_reg_read;
985 atom_card_info->ioreg_write = cail_reg_write;
986 }
987 atom_card_info->mc_read = cail_mc_read;
988 atom_card_info->mc_write = cail_mc_write;
989 atom_card_info->pll_read = cail_pll_read;
990 atom_card_info->pll_write = cail_pll_write;
991
992 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
993 if (!adev->mode_info.atom_context) {
994 amdgpu_atombios_fini(adev);
995 return -ENOMEM;
996 }
997
998 mutex_init(&adev->mode_info.atom_context->mutex);
a5bde2f9
AD
999 if (adev->is_atom_fw) {
1000 amdgpu_atomfirmware_scratch_regs_init(adev);
1001 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1002 } else {
1003 amdgpu_atombios_scratch_regs_init(adev);
1004 amdgpu_atombios_allocate_fb_scratch(adev);
1005 }
d38ceaf9
AD
1006 return 0;
1007}
1008
1009/* if we get transitioned to only one device, take VGA back */
1010/**
1011 * amdgpu_vga_set_decode - enable/disable vga decode
1012 *
1013 * @cookie: amdgpu_device pointer
1014 * @state: enable/disable vga decode
1015 *
1016 * Enable/disable vga decode (all asics).
1017 * Returns VGA resource flags.
1018 */
1019static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1020{
1021 struct amdgpu_device *adev = cookie;
1022 amdgpu_asic_set_vga_state(adev, state);
1023 if (state)
1024 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1025 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1026 else
1027 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1028}
1029
1030/**
1031 * amdgpu_check_pot_argument - check that argument is a power of two
1032 *
1033 * @arg: value to check
1034 *
1035 * Validates that a certain argument is a power of two (all asics).
1036 * Returns true if argument is valid.
1037 */
1038static bool amdgpu_check_pot_argument(int arg)
1039{
1040 return (arg & (arg - 1)) == 0;
1041}
1042
bab4fee7 1043static void amdgpu_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
1044{
1045 /* defines number of bits in page table versus page directory,
1046 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1047 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
1048 if (amdgpu_vm_block_size == -1)
1049 return;
a1adf8be 1050
bab4fee7 1051 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
1052 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1053 amdgpu_vm_block_size);
bab4fee7 1054 goto def_value;
a1adf8be
CZ
1055 }
1056
1057 if (amdgpu_vm_block_size > 24 ||
1058 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1059 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1060 amdgpu_vm_block_size);
bab4fee7 1061 goto def_value;
a1adf8be 1062 }
bab4fee7
JZ
1063
1064 return;
1065
1066def_value:
1067 amdgpu_vm_block_size = -1;
a1adf8be
CZ
1068}
1069
83ca145d
ZJ
1070static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1071{
1072 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
1073 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1074 amdgpu_vm_size);
1075 goto def_value;
1076 }
1077
1078 if (amdgpu_vm_size < 1) {
1079 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1080 amdgpu_vm_size);
1081 goto def_value;
1082 }
1083
1084 /*
1085 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1086 */
1087 if (amdgpu_vm_size > 1024) {
1088 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1089 amdgpu_vm_size);
1090 goto def_value;
1091 }
1092
1093 return;
1094
1095def_value:
bab4fee7 1096 amdgpu_vm_size = -1;
83ca145d
ZJ
1097}
1098
d38ceaf9
AD
1099/**
1100 * amdgpu_check_arguments - validate module params
1101 *
1102 * @adev: amdgpu_device pointer
1103 *
1104 * Validates certain module parameters and updates
1105 * the associated values used by the driver (all asics).
1106 */
1107static void amdgpu_check_arguments(struct amdgpu_device *adev)
1108{
5b011235
CZ
1109 if (amdgpu_sched_jobs < 4) {
1110 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1111 amdgpu_sched_jobs);
1112 amdgpu_sched_jobs = 4;
1113 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
1114 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1115 amdgpu_sched_jobs);
1116 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1117 }
d38ceaf9
AD
1118
1119 if (amdgpu_gart_size != -1) {
c4e1a13a 1120 /* gtt size must be greater or equal to 32M */
d38ceaf9
AD
1121 if (amdgpu_gart_size < 32) {
1122 dev_warn(adev->dev, "gart size (%d) too small\n",
1123 amdgpu_gart_size);
1124 amdgpu_gart_size = -1;
d38ceaf9
AD
1125 }
1126 }
1127
83ca145d 1128 amdgpu_check_vm_size(adev);
d38ceaf9 1129
bab4fee7 1130 amdgpu_check_block_size(adev);
6a7f76e7 1131
526bae37 1132 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1133 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
6a7f76e7
CK
1134 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1135 amdgpu_vram_page_split);
1136 amdgpu_vram_page_split = 1024;
1137 }
d38ceaf9
AD
1138}
1139
1140/**
1141 * amdgpu_switcheroo_set_state - set switcheroo state
1142 *
1143 * @pdev: pci dev pointer
1694467b 1144 * @state: vga_switcheroo state
d38ceaf9
AD
1145 *
1146 * Callback for the switcheroo driver. Suspends or resumes the
1147 * the asics before or after it is powered up using ACPI methods.
1148 */
1149static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1150{
1151 struct drm_device *dev = pci_get_drvdata(pdev);
1152
1153 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1154 return;
1155
1156 if (state == VGA_SWITCHEROO_ON) {
1157 unsigned d3_delay = dev->pdev->d3_delay;
1158
7ca85295 1159 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1160 /* don't suspend or resume card normally */
1161 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1162
810ddc3a 1163 amdgpu_device_resume(dev, true, true);
d38ceaf9
AD
1164
1165 dev->pdev->d3_delay = d3_delay;
1166
1167 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1168 drm_kms_helper_poll_enable(dev);
1169 } else {
7ca85295 1170 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1171 drm_kms_helper_poll_disable(dev);
1172 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1173 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1174 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1175 }
1176}
1177
1178/**
1179 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1180 *
1181 * @pdev: pci dev pointer
1182 *
1183 * Callback for the switcheroo driver. Check of the switcheroo
1184 * state can be changed.
1185 * Returns true if the state can be changed, false if not.
1186 */
1187static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1188{
1189 struct drm_device *dev = pci_get_drvdata(pdev);
1190
1191 /*
1192 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1193 * locking inversion with the driver load path. And the access here is
1194 * completely racy anyway. So don't bother with locking for now.
1195 */
1196 return dev->open_count == 0;
1197}
1198
1199static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1200 .set_gpu_state = amdgpu_switcheroo_set_state,
1201 .reprobe = NULL,
1202 .can_switch = amdgpu_switcheroo_can_switch,
1203};
1204
1205int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
5fc3aeeb 1206 enum amd_ip_block_type block_type,
1207 enum amd_clockgating_state state)
d38ceaf9
AD
1208{
1209 int i, r = 0;
1210
1211 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1212 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1213 continue;
c722865a
RZ
1214 if (adev->ip_blocks[i].version->type != block_type)
1215 continue;
1216 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1217 continue;
1218 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1219 (void *)adev, state);
1220 if (r)
1221 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1222 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1223 }
1224 return r;
1225}
1226
1227int amdgpu_set_powergating_state(struct amdgpu_device *adev,
5fc3aeeb 1228 enum amd_ip_block_type block_type,
1229 enum amd_powergating_state state)
d38ceaf9
AD
1230{
1231 int i, r = 0;
1232
1233 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1234 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1235 continue;
c722865a
RZ
1236 if (adev->ip_blocks[i].version->type != block_type)
1237 continue;
1238 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1239 continue;
1240 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1241 (void *)adev, state);
1242 if (r)
1243 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1244 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1245 }
1246 return r;
1247}
1248
6cb2d4e4
HR
1249void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1250{
1251 int i;
1252
1253 for (i = 0; i < adev->num_ip_blocks; i++) {
1254 if (!adev->ip_blocks[i].status.valid)
1255 continue;
1256 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1257 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1258 }
1259}
1260
5dbbb60b
AD
1261int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1262 enum amd_ip_block_type block_type)
1263{
1264 int i, r;
1265
1266 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1267 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1268 continue;
a1255107
AD
1269 if (adev->ip_blocks[i].version->type == block_type) {
1270 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1271 if (r)
1272 return r;
1273 break;
1274 }
1275 }
1276 return 0;
1277
1278}
1279
1280bool amdgpu_is_idle(struct amdgpu_device *adev,
1281 enum amd_ip_block_type block_type)
1282{
1283 int i;
1284
1285 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1286 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1287 continue;
a1255107
AD
1288 if (adev->ip_blocks[i].version->type == block_type)
1289 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1290 }
1291 return true;
1292
1293}
1294
a1255107
AD
1295struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1296 enum amd_ip_block_type type)
d38ceaf9
AD
1297{
1298 int i;
1299
1300 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1301 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1302 return &adev->ip_blocks[i];
1303
1304 return NULL;
1305}
1306
1307/**
1308 * amdgpu_ip_block_version_cmp
1309 *
1310 * @adev: amdgpu_device pointer
5fc3aeeb 1311 * @type: enum amd_ip_block_type
d38ceaf9
AD
1312 * @major: major version
1313 * @minor: minor version
1314 *
1315 * return 0 if equal or greater
1316 * return 1 if smaller or the ip_block doesn't exist
1317 */
1318int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
5fc3aeeb 1319 enum amd_ip_block_type type,
d38ceaf9
AD
1320 u32 major, u32 minor)
1321{
a1255107 1322 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
d38ceaf9 1323
a1255107
AD
1324 if (ip_block && ((ip_block->version->major > major) ||
1325 ((ip_block->version->major == major) &&
1326 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1327 return 0;
1328
1329 return 1;
1330}
1331
a1255107
AD
1332/**
1333 * amdgpu_ip_block_add
1334 *
1335 * @adev: amdgpu_device pointer
1336 * @ip_block_version: pointer to the IP to add
1337 *
1338 * Adds the IP block driver information to the collection of IPs
1339 * on the asic.
1340 */
1341int amdgpu_ip_block_add(struct amdgpu_device *adev,
1342 const struct amdgpu_ip_block_version *ip_block_version)
1343{
1344 if (!ip_block_version)
1345 return -EINVAL;
1346
1347 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1348
1349 return 0;
1350}
1351
483ef985 1352static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1353{
1354 adev->enable_virtual_display = false;
1355
1356 if (amdgpu_virtual_display) {
1357 struct drm_device *ddev = adev->ddev;
1358 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1359 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1360
1361 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1362 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1363 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1364 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1365 if (!strcmp("all", pciaddname)
1366 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1367 long num_crtc;
1368 int res = -1;
1369
9accf2fd 1370 adev->enable_virtual_display = true;
0f66356d
ED
1371
1372 if (pciaddname_tmp)
1373 res = kstrtol(pciaddname_tmp, 10,
1374 &num_crtc);
1375
1376 if (!res) {
1377 if (num_crtc < 1)
1378 num_crtc = 1;
1379 if (num_crtc > 6)
1380 num_crtc = 6;
1381 adev->mode_info.num_crtc = num_crtc;
1382 } else {
1383 adev->mode_info.num_crtc = 1;
1384 }
9accf2fd
ED
1385 break;
1386 }
1387 }
1388
0f66356d
ED
1389 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1390 amdgpu_virtual_display, pci_address_name,
1391 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1392
1393 kfree(pciaddstr);
1394 }
1395}
1396
d38ceaf9
AD
1397static int amdgpu_early_init(struct amdgpu_device *adev)
1398{
aaa36a97 1399 int i, r;
d38ceaf9 1400
483ef985 1401 amdgpu_device_enable_virtual_display(adev);
a6be7570 1402
d38ceaf9 1403 switch (adev->asic_type) {
aaa36a97
AD
1404 case CHIP_TOPAZ:
1405 case CHIP_TONGA:
48299f95 1406 case CHIP_FIJI:
2cc0c0b5
FC
1407 case CHIP_POLARIS11:
1408 case CHIP_POLARIS10:
c4642a47 1409 case CHIP_POLARIS12:
aaa36a97 1410 case CHIP_CARRIZO:
39bb0c92
SL
1411 case CHIP_STONEY:
1412 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1413 adev->family = AMDGPU_FAMILY_CZ;
1414 else
1415 adev->family = AMDGPU_FAMILY_VI;
1416
1417 r = vi_set_ip_blocks(adev);
1418 if (r)
1419 return r;
1420 break;
33f34802
KW
1421#ifdef CONFIG_DRM_AMDGPU_SI
1422 case CHIP_VERDE:
1423 case CHIP_TAHITI:
1424 case CHIP_PITCAIRN:
1425 case CHIP_OLAND:
1426 case CHIP_HAINAN:
295d0daf 1427 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1428 r = si_set_ip_blocks(adev);
1429 if (r)
1430 return r;
1431 break;
1432#endif
a2e73f56
AD
1433#ifdef CONFIG_DRM_AMDGPU_CIK
1434 case CHIP_BONAIRE:
1435 case CHIP_HAWAII:
1436 case CHIP_KAVERI:
1437 case CHIP_KABINI:
1438 case CHIP_MULLINS:
1439 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1440 adev->family = AMDGPU_FAMILY_CI;
1441 else
1442 adev->family = AMDGPU_FAMILY_KV;
1443
1444 r = cik_set_ip_blocks(adev);
1445 if (r)
1446 return r;
1447 break;
1448#endif
460826e6
KW
1449 case CHIP_VEGA10:
1450 adev->family = AMDGPU_FAMILY_AI;
1451
1452 r = soc15_set_ip_blocks(adev);
1453 if (r)
1454 return r;
1455 break;
d38ceaf9
AD
1456 default:
1457 /* FIXME: not supported yet */
1458 return -EINVAL;
1459 }
1460
3149d9da
XY
1461 if (amdgpu_sriov_vf(adev)) {
1462 r = amdgpu_virt_request_full_gpu(adev, true);
1463 if (r)
1464 return r;
1465 }
1466
d38ceaf9
AD
1467 for (i = 0; i < adev->num_ip_blocks; i++) {
1468 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1469 DRM_ERROR("disabled ip block: %d\n", i);
a1255107 1470 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1471 } else {
a1255107
AD
1472 if (adev->ip_blocks[i].version->funcs->early_init) {
1473 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1474 if (r == -ENOENT) {
a1255107 1475 adev->ip_blocks[i].status.valid = false;
2c1a2784 1476 } else if (r) {
a1255107
AD
1477 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1478 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1479 return r;
2c1a2784 1480 } else {
a1255107 1481 adev->ip_blocks[i].status.valid = true;
2c1a2784 1482 }
974e6b64 1483 } else {
a1255107 1484 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1485 }
d38ceaf9
AD
1486 }
1487 }
1488
395d1fb9
NH
1489 adev->cg_flags &= amdgpu_cg_mask;
1490 adev->pg_flags &= amdgpu_pg_mask;
1491
d38ceaf9
AD
1492 return 0;
1493}
1494
1495static int amdgpu_init(struct amdgpu_device *adev)
1496{
1497 int i, r;
1498
1499 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1500 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1501 continue;
a1255107 1502 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1503 if (r) {
a1255107
AD
1504 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1505 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1506 return r;
2c1a2784 1507 }
a1255107 1508 adev->ip_blocks[i].status.sw = true;
d38ceaf9 1509 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1510 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9 1511 r = amdgpu_vram_scratch_init(adev);
2c1a2784
AD
1512 if (r) {
1513 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1514 return r;
2c1a2784 1515 }
a1255107 1516 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1517 if (r) {
1518 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1519 return r;
2c1a2784 1520 }
d38ceaf9 1521 r = amdgpu_wb_init(adev);
2c1a2784
AD
1522 if (r) {
1523 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
d38ceaf9 1524 return r;
2c1a2784 1525 }
a1255107 1526 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1527
1528 /* right after GMC hw init, we create CSA */
1529 if (amdgpu_sriov_vf(adev)) {
1530 r = amdgpu_allocate_static_csa(adev);
1531 if (r) {
1532 DRM_ERROR("allocate CSA failed %d\n", r);
1533 return r;
1534 }
1535 }
d38ceaf9
AD
1536 }
1537 }
1538
1539 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1540 if (!adev->ip_blocks[i].status.sw)
d38ceaf9
AD
1541 continue;
1542 /* gmc hw init is done early */
a1255107 1543 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
d38ceaf9 1544 continue;
a1255107 1545 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1546 if (r) {
a1255107
AD
1547 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1548 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1549 return r;
2c1a2784 1550 }
a1255107 1551 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1552 }
1553
1554 return 0;
1555}
1556
1557static int amdgpu_late_init(struct amdgpu_device *adev)
1558{
1559 int i = 0, r;
1560
1561 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1562 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1563 continue;
a1255107
AD
1564 if (adev->ip_blocks[i].version->funcs->late_init) {
1565 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2c1a2784 1566 if (r) {
a1255107
AD
1567 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1568 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1569 return r;
2c1a2784 1570 }
a1255107 1571 adev->ip_blocks[i].status.late_initialized = true;
d38ceaf9 1572 }
4a446d55 1573 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1574 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1575 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1576 /* enable clockgating to save power */
a1255107
AD
1577 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1578 AMD_CG_STATE_GATE);
4a446d55
AD
1579 if (r) {
1580 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1581 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1582 return r;
1583 }
b0b00ff1 1584 }
d38ceaf9
AD
1585 }
1586
d1aff8ec
TSD
1587 amdgpu_dpm_enable_uvd(adev, false);
1588 amdgpu_dpm_enable_vce(adev, false);
1589
d38ceaf9
AD
1590 return 0;
1591}
1592
1593static int amdgpu_fini(struct amdgpu_device *adev)
1594{
1595 int i, r;
1596
3e96dbfd
AD
1597 /* need to disable SMC first */
1598 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1599 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1600 continue;
a1255107 1601 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1602 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1603 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1604 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1605 if (r) {
1606 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1607 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1608 return r;
1609 }
a1255107 1610 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1611 /* XXX handle errors */
1612 if (r) {
1613 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1614 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1615 }
a1255107 1616 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1617 break;
1618 }
1619 }
1620
d38ceaf9 1621 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1622 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1623 continue;
a1255107 1624 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9
AD
1625 amdgpu_wb_fini(adev);
1626 amdgpu_vram_scratch_fini(adev);
1627 }
8201a67a
RZ
1628
1629 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1630 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1631 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1632 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1633 AMD_CG_STATE_UNGATE);
1634 if (r) {
1635 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1636 adev->ip_blocks[i].version->funcs->name, r);
1637 return r;
1638 }
2c1a2784 1639 }
8201a67a 1640
a1255107 1641 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1642 /* XXX handle errors */
2c1a2784 1643 if (r) {
a1255107
AD
1644 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1645 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1646 }
8201a67a 1647
a1255107 1648 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1649 }
1650
1651 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1652 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1653 continue;
a1255107 1654 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1655 /* XXX handle errors */
2c1a2784 1656 if (r) {
a1255107
AD
1657 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1658 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1659 }
a1255107
AD
1660 adev->ip_blocks[i].status.sw = false;
1661 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1662 }
1663
a6dcfd9c 1664 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1665 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1666 continue;
a1255107
AD
1667 if (adev->ip_blocks[i].version->funcs->late_fini)
1668 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1669 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1670 }
1671
3149d9da 1672 if (amdgpu_sriov_vf(adev)) {
2493664f 1673 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
3149d9da
XY
1674 amdgpu_virt_release_full_gpu(adev, false);
1675 }
2493664f 1676
d38ceaf9
AD
1677 return 0;
1678}
1679
faefba95 1680int amdgpu_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1681{
1682 int i, r;
1683
e941ea99
XY
1684 if (amdgpu_sriov_vf(adev))
1685 amdgpu_virt_request_full_gpu(adev, false);
1686
c5a93a28
FC
1687 /* ungate SMC block first */
1688 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1689 AMD_CG_STATE_UNGATE);
1690 if (r) {
1691 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1692 }
1693
d38ceaf9 1694 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1695 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1696 continue;
1697 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1698 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1699 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1700 AMD_CG_STATE_UNGATE);
c5a93a28 1701 if (r) {
a1255107
AD
1702 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1703 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1704 }
2c1a2784 1705 }
d38ceaf9 1706 /* XXX handle errors */
a1255107 1707 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1708 /* XXX handle errors */
2c1a2784 1709 if (r) {
a1255107
AD
1710 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1711 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1712 }
d38ceaf9
AD
1713 }
1714
e941ea99
XY
1715 if (amdgpu_sriov_vf(adev))
1716 amdgpu_virt_release_full_gpu(adev, false);
1717
d38ceaf9
AD
1718 return 0;
1719}
1720
e4f0fdcc 1721static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
a90ad3c2
ML
1722{
1723 int i, r;
1724
1725 for (i = 0; i < adev->num_ip_blocks; i++) {
1726 if (!adev->ip_blocks[i].status.valid)
1727 continue;
1728
1729 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1730 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1731 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
e4f0fdcc 1732 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
a90ad3c2
ML
1733
1734 if (r) {
1735 DRM_ERROR("resume of IP block <%s> failed %d\n",
1736 adev->ip_blocks[i].version->funcs->name, r);
1737 return r;
1738 }
1739 }
1740
1741 return 0;
1742}
1743
e4f0fdcc 1744static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
a90ad3c2
ML
1745{
1746 int i, r;
1747
1748 for (i = 0; i < adev->num_ip_blocks; i++) {
1749 if (!adev->ip_blocks[i].status.valid)
1750 continue;
1751
1752 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1753 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1754 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1755 continue;
1756
e4f0fdcc 1757 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
a90ad3c2
ML
1758 if (r) {
1759 DRM_ERROR("resume of IP block <%s> failed %d\n",
1760 adev->ip_blocks[i].version->funcs->name, r);
1761 return r;
1762 }
1763 }
1764
1765 return 0;
1766}
1767
d38ceaf9
AD
1768static int amdgpu_resume(struct amdgpu_device *adev)
1769{
1770 int i, r;
1771
1772 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1773 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1774 continue;
a1255107 1775 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 1776 if (r) {
a1255107
AD
1777 DRM_ERROR("resume of IP block <%s> failed %d\n",
1778 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1779 return r;
2c1a2784 1780 }
d38ceaf9
AD
1781 }
1782
1783 return 0;
1784}
1785
4e99a44e 1786static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 1787{
a5bde2f9
AD
1788 if (adev->is_atom_fw) {
1789 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1790 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1791 } else {
1792 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1793 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1794 }
048765ad
AR
1795}
1796
d38ceaf9
AD
1797/**
1798 * amdgpu_device_init - initialize the driver
1799 *
1800 * @adev: amdgpu_device pointer
1801 * @pdev: drm dev pointer
1802 * @pdev: pci dev pointer
1803 * @flags: driver flags
1804 *
1805 * Initializes the driver info and hw (all asics).
1806 * Returns 0 for success or an error on failure.
1807 * Called at driver startup.
1808 */
1809int amdgpu_device_init(struct amdgpu_device *adev,
1810 struct drm_device *ddev,
1811 struct pci_dev *pdev,
1812 uint32_t flags)
1813{
1814 int r, i;
1815 bool runtime = false;
95844d20 1816 u32 max_MBps;
d38ceaf9
AD
1817
1818 adev->shutdown = false;
1819 adev->dev = &pdev->dev;
1820 adev->ddev = ddev;
1821 adev->pdev = pdev;
1822 adev->flags = flags;
2f7d10b3 1823 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9
AD
1824 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1825 adev->mc.gtt_size = 512 * 1024 * 1024;
1826 adev->accel_working = false;
1827 adev->num_rings = 0;
1828 adev->mman.buffer_funcs = NULL;
1829 adev->mman.buffer_funcs_ring = NULL;
1830 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 1831 adev->vm_manager.vm_pte_num_rings = 0;
d38ceaf9 1832 adev->gart.gart_funcs = NULL;
f54d1867 1833 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
d38ceaf9
AD
1834
1835 adev->smc_rreg = &amdgpu_invalid_rreg;
1836 adev->smc_wreg = &amdgpu_invalid_wreg;
1837 adev->pcie_rreg = &amdgpu_invalid_rreg;
1838 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
1839 adev->pciep_rreg = &amdgpu_invalid_rreg;
1840 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1841 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1842 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1843 adev->didt_rreg = &amdgpu_invalid_rreg;
1844 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
1845 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1846 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1847 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1848 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1849
ccdbb20a 1850
3e39ab90
AD
1851 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1852 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1853 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
1854
1855 /* mutex initialization are all done here so we
1856 * can recall function without having locking issues */
d38ceaf9 1857 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 1858 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
1859 mutex_init(&adev->pm.mutex);
1860 mutex_init(&adev->gfx.gpu_clock_mutex);
1861 mutex_init(&adev->srbm_mutex);
1862 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9
AD
1863 mutex_init(&adev->mn_lock);
1864 hash_init(adev->mn_hash);
1865
1866 amdgpu_check_arguments(adev);
1867
1868 /* Registers mapping */
1869 /* TODO: block userspace mapping of io register */
1870 spin_lock_init(&adev->mmio_idx_lock);
1871 spin_lock_init(&adev->smc_idx_lock);
1872 spin_lock_init(&adev->pcie_idx_lock);
1873 spin_lock_init(&adev->uvd_ctx_idx_lock);
1874 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 1875 spin_lock_init(&adev->gc_cac_idx_lock);
d38ceaf9 1876 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 1877 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 1878
0c4e7fa5
CZ
1879 INIT_LIST_HEAD(&adev->shadow_list);
1880 mutex_init(&adev->shadow_list_lock);
1881
5c1354bd
CZ
1882 INIT_LIST_HEAD(&adev->gtt_list);
1883 spin_lock_init(&adev->gtt_list_lock);
1884
da69c161
KW
1885 if (adev->asic_type >= CHIP_BONAIRE) {
1886 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1887 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1888 } else {
1889 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1890 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1891 }
d38ceaf9 1892
d38ceaf9
AD
1893 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1894 if (adev->rmmio == NULL) {
1895 return -ENOMEM;
1896 }
1897 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1898 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1899
da69c161
KW
1900 if (adev->asic_type >= CHIP_BONAIRE)
1901 /* doorbell bar mapping */
1902 amdgpu_doorbell_init(adev);
d38ceaf9
AD
1903
1904 /* io port mapping */
1905 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1906 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1907 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1908 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1909 break;
1910 }
1911 }
1912 if (adev->rio_mem == NULL)
b64a18c5 1913 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
1914
1915 /* early init functions */
1916 r = amdgpu_early_init(adev);
1917 if (r)
1918 return r;
1919
1920 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1921 /* this will fail for cards that aren't VGA class devices, just
1922 * ignore it */
1923 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1924
1925 if (amdgpu_runtime_pm == 1)
1926 runtime = true;
e9bef455 1927 if (amdgpu_device_is_px(ddev))
d38ceaf9 1928 runtime = true;
84c8b22e
LW
1929 if (!pci_is_thunderbolt_attached(adev->pdev))
1930 vga_switcheroo_register_client(adev->pdev,
1931 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
1932 if (runtime)
1933 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1934
1935 /* Read BIOS */
83ba126a
AD
1936 if (!amdgpu_get_bios(adev)) {
1937 r = -EINVAL;
1938 goto failed;
1939 }
f7e9e9fe 1940
d38ceaf9 1941 r = amdgpu_atombios_init(adev);
2c1a2784
AD
1942 if (r) {
1943 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
83ba126a 1944 goto failed;
2c1a2784 1945 }
d38ceaf9 1946
4e99a44e
ML
1947 /* detect if we are with an SRIOV vbios */
1948 amdgpu_device_detect_sriov_bios(adev);
048765ad 1949
d38ceaf9 1950 /* Post card if necessary */
bec86378 1951 if (amdgpu_vpost_needed(adev)) {
d38ceaf9 1952 if (!adev->bios) {
bec86378 1953 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
1954 r = -EINVAL;
1955 goto failed;
d38ceaf9 1956 }
bec86378 1957 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
1958 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1959 if (r) {
1960 dev_err(adev->dev, "gpu post error!\n");
1961 goto failed;
1962 }
1963 } else {
1964 DRM_INFO("GPU post is not needed\n");
d38ceaf9
AD
1965 }
1966
a5bde2f9
AD
1967 if (!adev->is_atom_fw) {
1968 /* Initialize clocks */
1969 r = amdgpu_atombios_get_clock_info(adev);
1970 if (r) {
1971 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
1972 return r;
1973 }
1974 /* init i2c buses */
1975 amdgpu_atombios_i2c_init(adev);
2c1a2784 1976 }
d38ceaf9
AD
1977
1978 /* Fence driver */
1979 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
1980 if (r) {
1981 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
83ba126a 1982 goto failed;
2c1a2784 1983 }
d38ceaf9
AD
1984
1985 /* init the mode config */
1986 drm_mode_config_init(adev->ddev);
1987
1988 r = amdgpu_init(adev);
1989 if (r) {
2c1a2784 1990 dev_err(adev->dev, "amdgpu_init failed\n");
d38ceaf9 1991 amdgpu_fini(adev);
83ba126a 1992 goto failed;
d38ceaf9
AD
1993 }
1994
1995 adev->accel_working = true;
1996
95844d20
MO
1997 /* Initialize the buffer migration limit. */
1998 if (amdgpu_moverate >= 0)
1999 max_MBps = amdgpu_moverate;
2000 else
2001 max_MBps = 8; /* Allow 8 MB/s. */
2002 /* Get a log2 for easy divisions. */
2003 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2004
d38ceaf9
AD
2005 r = amdgpu_ib_pool_init(adev);
2006 if (r) {
2007 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
83ba126a 2008 goto failed;
d38ceaf9
AD
2009 }
2010
2011 r = amdgpu_ib_ring_tests(adev);
2012 if (r)
2013 DRM_ERROR("ib ring test failed (%d).\n", r);
2014
9bc92b9c
ML
2015 amdgpu_fbdev_init(adev);
2016
d38ceaf9 2017 r = amdgpu_gem_debugfs_init(adev);
3f14e623 2018 if (r)
d38ceaf9 2019 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2020
2021 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2022 if (r)
d38ceaf9 2023 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2024
50ab2533 2025 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2026 if (r)
50ab2533 2027 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2028
d38ceaf9
AD
2029 if ((amdgpu_testing & 1)) {
2030 if (adev->accel_working)
2031 amdgpu_test_moves(adev);
2032 else
2033 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2034 }
d38ceaf9
AD
2035 if (amdgpu_benchmarking) {
2036 if (adev->accel_working)
2037 amdgpu_benchmark(adev, amdgpu_benchmarking);
2038 else
2039 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2040 }
2041
2042 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2043 * explicit gating rather than handling it automatically.
2044 */
2045 r = amdgpu_late_init(adev);
2c1a2784
AD
2046 if (r) {
2047 dev_err(adev->dev, "amdgpu_late_init failed\n");
83ba126a 2048 goto failed;
2c1a2784 2049 }
d38ceaf9
AD
2050
2051 return 0;
83ba126a
AD
2052
2053failed:
2054 if (runtime)
2055 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2056 return r;
d38ceaf9
AD
2057}
2058
d38ceaf9
AD
2059/**
2060 * amdgpu_device_fini - tear down the driver
2061 *
2062 * @adev: amdgpu_device pointer
2063 *
2064 * Tear down the driver info (all asics).
2065 * Called at driver shutdown.
2066 */
2067void amdgpu_device_fini(struct amdgpu_device *adev)
2068{
2069 int r;
2070
2071 DRM_INFO("amdgpu: finishing device.\n");
2072 adev->shutdown = true;
a951ed85 2073 drm_crtc_force_disable_all(adev->ddev);
d38ceaf9
AD
2074 /* evict vram memory */
2075 amdgpu_bo_evict_vram(adev);
2076 amdgpu_ib_pool_fini(adev);
2077 amdgpu_fence_driver_fini(adev);
2078 amdgpu_fbdev_fini(adev);
2079 r = amdgpu_fini(adev);
d38ceaf9
AD
2080 adev->accel_working = false;
2081 /* free i2c buses */
2082 amdgpu_i2c_fini(adev);
2083 amdgpu_atombios_fini(adev);
2084 kfree(adev->bios);
2085 adev->bios = NULL;
84c8b22e
LW
2086 if (!pci_is_thunderbolt_attached(adev->pdev))
2087 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2088 if (adev->flags & AMD_IS_PX)
2089 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2090 vga_client_register(adev->pdev, NULL, NULL, NULL);
2091 if (adev->rio_mem)
2092 pci_iounmap(adev->pdev, adev->rio_mem);
2093 adev->rio_mem = NULL;
2094 iounmap(adev->rmmio);
2095 adev->rmmio = NULL;
da69c161
KW
2096 if (adev->asic_type >= CHIP_BONAIRE)
2097 amdgpu_doorbell_fini(adev);
d38ceaf9 2098 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2099}
2100
2101
2102/*
2103 * Suspend & resume.
2104 */
2105/**
810ddc3a 2106 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2107 *
2108 * @pdev: drm dev pointer
2109 * @state: suspend state
2110 *
2111 * Puts the hw in the suspend state (all asics).
2112 * Returns 0 for success or an error on failure.
2113 * Called at driver suspend.
2114 */
810ddc3a 2115int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2116{
2117 struct amdgpu_device *adev;
2118 struct drm_crtc *crtc;
2119 struct drm_connector *connector;
5ceb54c6 2120 int r;
d38ceaf9
AD
2121
2122 if (dev == NULL || dev->dev_private == NULL) {
2123 return -ENODEV;
2124 }
2125
2126 adev = dev->dev_private;
2127
2128 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2129 return 0;
2130
2131 drm_kms_helper_poll_disable(dev);
2132
2133 /* turn off display hw */
4c7fbc39 2134 drm_modeset_lock_all(dev);
d38ceaf9
AD
2135 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2136 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2137 }
4c7fbc39 2138 drm_modeset_unlock_all(dev);
d38ceaf9 2139
756e6880 2140 /* unpin the front buffers and cursors */
d38ceaf9 2141 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2142 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2143 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2144 struct amdgpu_bo *robj;
2145
756e6880
AD
2146 if (amdgpu_crtc->cursor_bo) {
2147 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2148 r = amdgpu_bo_reserve(aobj, false);
2149 if (r == 0) {
2150 amdgpu_bo_unpin(aobj);
2151 amdgpu_bo_unreserve(aobj);
2152 }
2153 }
2154
d38ceaf9
AD
2155 if (rfb == NULL || rfb->obj == NULL) {
2156 continue;
2157 }
2158 robj = gem_to_amdgpu_bo(rfb->obj);
2159 /* don't unpin kernel fb objects */
2160 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2161 r = amdgpu_bo_reserve(robj, false);
2162 if (r == 0) {
2163 amdgpu_bo_unpin(robj);
2164 amdgpu_bo_unreserve(robj);
2165 }
2166 }
2167 }
2168 /* evict vram memory */
2169 amdgpu_bo_evict_vram(adev);
2170
5ceb54c6 2171 amdgpu_fence_driver_suspend(adev);
d38ceaf9
AD
2172
2173 r = amdgpu_suspend(adev);
2174
a0a71e49
AD
2175 /* evict remaining vram memory
2176 * This second call to evict vram is to evict the gart page table
2177 * using the CPU.
2178 */
d38ceaf9
AD
2179 amdgpu_bo_evict_vram(adev);
2180
be34d3bf
AD
2181 if (adev->is_atom_fw)
2182 amdgpu_atomfirmware_scratch_regs_save(adev);
2183 else
2184 amdgpu_atombios_scratch_regs_save(adev);
d38ceaf9
AD
2185 pci_save_state(dev->pdev);
2186 if (suspend) {
2187 /* Shut down the device */
2188 pci_disable_device(dev->pdev);
2189 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2190 } else {
2191 r = amdgpu_asic_reset(adev);
2192 if (r)
2193 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2194 }
2195
2196 if (fbcon) {
2197 console_lock();
2198 amdgpu_fbdev_set_suspend(adev, 1);
2199 console_unlock();
2200 }
2201 return 0;
2202}
2203
2204/**
810ddc3a 2205 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2206 *
2207 * @pdev: drm dev pointer
2208 *
2209 * Bring the hw back to operating state (all asics).
2210 * Returns 0 for success or an error on failure.
2211 * Called at driver resume.
2212 */
810ddc3a 2213int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2214{
2215 struct drm_connector *connector;
2216 struct amdgpu_device *adev = dev->dev_private;
756e6880 2217 struct drm_crtc *crtc;
d38ceaf9
AD
2218 int r;
2219
2220 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2221 return 0;
2222
74b0b157 2223 if (fbcon)
d38ceaf9 2224 console_lock();
74b0b157 2225
d38ceaf9
AD
2226 if (resume) {
2227 pci_set_power_state(dev->pdev, PCI_D0);
2228 pci_restore_state(dev->pdev);
74b0b157 2229 r = pci_enable_device(dev->pdev);
2230 if (r) {
d38ceaf9
AD
2231 if (fbcon)
2232 console_unlock();
74b0b157 2233 return r;
d38ceaf9
AD
2234 }
2235 }
be34d3bf
AD
2236 if (adev->is_atom_fw)
2237 amdgpu_atomfirmware_scratch_regs_restore(adev);
2238 else
2239 amdgpu_atombios_scratch_regs_restore(adev);
d38ceaf9
AD
2240
2241 /* post card */
c836fec5 2242 if (amdgpu_need_post(adev)) {
74b0b157 2243 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2244 if (r)
2245 DRM_ERROR("amdgpu asic init failed\n");
2246 }
d38ceaf9
AD
2247
2248 r = amdgpu_resume(adev);
e6707218 2249 if (r) {
ca198528 2250 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
e6707218
RZ
2251 return r;
2252 }
5ceb54c6
AD
2253 amdgpu_fence_driver_resume(adev);
2254
ca198528
FC
2255 if (resume) {
2256 r = amdgpu_ib_ring_tests(adev);
2257 if (r)
2258 DRM_ERROR("ib ring test failed (%d).\n", r);
2259 }
d38ceaf9
AD
2260
2261 r = amdgpu_late_init(adev);
c085bd51
JQ
2262 if (r) {
2263 if (fbcon)
2264 console_unlock();
d38ceaf9 2265 return r;
c085bd51 2266 }
d38ceaf9 2267
756e6880
AD
2268 /* pin cursors */
2269 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2270 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2271
2272 if (amdgpu_crtc->cursor_bo) {
2273 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2274 r = amdgpu_bo_reserve(aobj, false);
2275 if (r == 0) {
2276 r = amdgpu_bo_pin(aobj,
2277 AMDGPU_GEM_DOMAIN_VRAM,
2278 &amdgpu_crtc->cursor_addr);
2279 if (r != 0)
2280 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2281 amdgpu_bo_unreserve(aobj);
2282 }
2283 }
2284 }
2285
d38ceaf9
AD
2286 /* blat the mode back in */
2287 if (fbcon) {
2288 drm_helper_resume_force_mode(dev);
2289 /* turn on display hw */
4c7fbc39 2290 drm_modeset_lock_all(dev);
d38ceaf9
AD
2291 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2292 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2293 }
4c7fbc39 2294 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2295 }
2296
2297 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2298
2299 /*
2300 * Most of the connector probing functions try to acquire runtime pm
2301 * refs to ensure that the GPU is powered on when connector polling is
2302 * performed. Since we're calling this from a runtime PM callback,
2303 * trying to acquire rpm refs will cause us to deadlock.
2304 *
2305 * Since we're guaranteed to be holding the rpm lock, it's safe to
2306 * temporarily disable the rpm helpers so this doesn't deadlock us.
2307 */
2308#ifdef CONFIG_PM
2309 dev->dev->power.disable_depth++;
2310#endif
54fb2a5c 2311 drm_helper_hpd_irq_event(dev);
23a1a9e5
L
2312#ifdef CONFIG_PM
2313 dev->dev->power.disable_depth--;
2314#endif
d38ceaf9
AD
2315
2316 if (fbcon) {
2317 amdgpu_fbdev_set_suspend(adev, 0);
2318 console_unlock();
2319 }
2320
2321 return 0;
2322}
2323
63fbf42f
CZ
2324static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2325{
2326 int i;
2327 bool asic_hang = false;
2328
2329 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2330 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2331 continue;
a1255107
AD
2332 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2333 adev->ip_blocks[i].status.hang =
2334 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2335 if (adev->ip_blocks[i].status.hang) {
2336 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2337 asic_hang = true;
2338 }
2339 }
2340 return asic_hang;
2341}
2342
4d446656 2343static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2344{
2345 int i, r = 0;
2346
2347 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2348 if (!adev->ip_blocks[i].status.valid)
d31a501e 2349 continue;
a1255107
AD
2350 if (adev->ip_blocks[i].status.hang &&
2351 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2352 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2353 if (r)
2354 return r;
2355 }
2356 }
2357
2358 return 0;
2359}
2360
35d782fe
CZ
2361static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2362{
da146d3b
AD
2363 int i;
2364
2365 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2366 if (!adev->ip_blocks[i].status.valid)
da146d3b 2367 continue;
a1255107
AD
2368 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2369 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2370 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2371 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2372 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2373 DRM_INFO("Some block need full reset!\n");
2374 return true;
2375 }
2376 }
35d782fe
CZ
2377 }
2378 return false;
2379}
2380
2381static int amdgpu_soft_reset(struct amdgpu_device *adev)
2382{
2383 int i, r = 0;
2384
2385 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2386 if (!adev->ip_blocks[i].status.valid)
35d782fe 2387 continue;
a1255107
AD
2388 if (adev->ip_blocks[i].status.hang &&
2389 adev->ip_blocks[i].version->funcs->soft_reset) {
2390 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2391 if (r)
2392 return r;
2393 }
2394 }
2395
2396 return 0;
2397}
2398
2399static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2400{
2401 int i, r = 0;
2402
2403 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2404 if (!adev->ip_blocks[i].status.valid)
35d782fe 2405 continue;
a1255107
AD
2406 if (adev->ip_blocks[i].status.hang &&
2407 adev->ip_blocks[i].version->funcs->post_soft_reset)
2408 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2409 if (r)
2410 return r;
2411 }
2412
2413 return 0;
2414}
2415
3ad81f16
CZ
2416bool amdgpu_need_backup(struct amdgpu_device *adev)
2417{
2418 if (adev->flags & AMD_IS_APU)
2419 return false;
2420
2421 return amdgpu_lockup_timeout > 0 ? true : false;
2422}
2423
53cdccd5
CZ
2424static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2425 struct amdgpu_ring *ring,
2426 struct amdgpu_bo *bo,
f54d1867 2427 struct dma_fence **fence)
53cdccd5
CZ
2428{
2429 uint32_t domain;
2430 int r;
2431
2432 if (!bo->shadow)
2433 return 0;
2434
2435 r = amdgpu_bo_reserve(bo, false);
2436 if (r)
2437 return r;
2438 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2439 /* if bo has been evicted, then no need to recover */
2440 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2441 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2442 NULL, fence, true);
2443 if (r) {
2444 DRM_ERROR("recover page table failed!\n");
2445 goto err;
2446 }
2447 }
2448err:
2449 amdgpu_bo_unreserve(bo);
2450 return r;
2451}
2452
a90ad3c2
ML
2453/**
2454 * amdgpu_sriov_gpu_reset - reset the asic
2455 *
2456 * @adev: amdgpu device pointer
2457 * @voluntary: if this reset is requested by guest.
2458 * (true means by guest and false means by HYPERVISOR )
2459 *
2460 * Attempt the reset the GPU if it has hung (all asics).
2461 * for SRIOV case.
2462 * Returns 0 for success or an error on failure.
2463 */
2464int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
2465{
2466 int i, r = 0;
2467 int resched;
2468 struct amdgpu_bo *bo, *tmp;
2469 struct amdgpu_ring *ring;
2470 struct dma_fence *fence = NULL, *next = NULL;
2471
147b5983 2472 mutex_lock(&adev->virt.lock_reset);
a90ad3c2 2473 atomic_inc(&adev->gpu_reset_counter);
1fb37a3d 2474 adev->gfx.in_reset = true;
a90ad3c2
ML
2475
2476 /* block TTM */
2477 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2478
2479 /* block scheduler */
2480 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2481 ring = adev->rings[i];
2482
2483 if (!ring || !ring->sched.thread)
2484 continue;
2485
2486 kthread_park(ring->sched.thread);
2487 amd_sched_hw_job_reset(&ring->sched);
2488 }
2489
2490 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2491 amdgpu_fence_driver_force_completion(adev);
2492
2493 /* request to take full control of GPU before re-initialization */
2494 if (voluntary)
2495 amdgpu_virt_reset_gpu(adev);
2496 else
2497 amdgpu_virt_request_full_gpu(adev, true);
2498
2499
2500 /* Resume IP prior to SMC */
e4f0fdcc 2501 amdgpu_sriov_reinit_early(adev);
a90ad3c2
ML
2502
2503 /* we need recover gart prior to run SMC/CP/SDMA resume */
2504 amdgpu_ttm_recover_gart(adev);
2505
2506 /* now we are okay to resume SMC/CP/SDMA */
e4f0fdcc 2507 amdgpu_sriov_reinit_late(adev);
a90ad3c2
ML
2508
2509 amdgpu_irq_gpu_reset_resume_helper(adev);
2510
2511 if (amdgpu_ib_ring_tests(adev))
2512 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2513
2514 /* release full control of GPU after ib test */
2515 amdgpu_virt_release_full_gpu(adev, true);
2516
2517 DRM_INFO("recover vram bo from shadow\n");
2518
2519 ring = adev->mman.buffer_funcs_ring;
2520 mutex_lock(&adev->shadow_list_lock);
2521 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2522 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2523 if (fence) {
2524 r = dma_fence_wait(fence, false);
2525 if (r) {
2526 WARN(r, "recovery from shadow isn't completed\n");
2527 break;
2528 }
2529 }
2530
2531 dma_fence_put(fence);
2532 fence = next;
2533 }
2534 mutex_unlock(&adev->shadow_list_lock);
2535
2536 if (fence) {
2537 r = dma_fence_wait(fence, false);
2538 if (r)
2539 WARN(r, "recovery from shadow isn't completed\n");
2540 }
2541 dma_fence_put(fence);
2542
2543 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2544 struct amdgpu_ring *ring = adev->rings[i];
2545 if (!ring || !ring->sched.thread)
2546 continue;
2547
2548 amd_sched_job_recovery(&ring->sched);
2549 kthread_unpark(ring->sched.thread);
2550 }
2551
2552 drm_helper_resume_force_mode(adev->ddev);
2553 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2554 if (r) {
2555 /* bad news, how to tell it to userspace ? */
2556 dev_info(adev->dev, "GPU reset failed\n");
2557 }
2558
1fb37a3d 2559 adev->gfx.in_reset = false;
147b5983 2560 mutex_unlock(&adev->virt.lock_reset);
a90ad3c2
ML
2561 return r;
2562}
2563
d38ceaf9
AD
2564/**
2565 * amdgpu_gpu_reset - reset the asic
2566 *
2567 * @adev: amdgpu device pointer
2568 *
2569 * Attempt the reset the GPU if it has hung (all asics).
2570 * Returns 0 for success or an error on failure.
2571 */
2572int amdgpu_gpu_reset(struct amdgpu_device *adev)
2573{
d38ceaf9
AD
2574 int i, r;
2575 int resched;
35d782fe 2576 bool need_full_reset;
d38ceaf9 2577
fb140b29 2578 if (amdgpu_sriov_vf(adev))
a90ad3c2 2579 return amdgpu_sriov_gpu_reset(adev, true);
fb140b29 2580
63fbf42f
CZ
2581 if (!amdgpu_check_soft_reset(adev)) {
2582 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2583 return 0;
2584 }
d38ceaf9 2585
d94aed5a 2586 atomic_inc(&adev->gpu_reset_counter);
d38ceaf9 2587
a3c47d6b
CZ
2588 /* block TTM */
2589 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2590
0875dc9e
CZ
2591 /* block scheduler */
2592 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2593 struct amdgpu_ring *ring = adev->rings[i];
2594
2595 if (!ring)
2596 continue;
2597 kthread_park(ring->sched.thread);
aa1c8900 2598 amd_sched_hw_job_reset(&ring->sched);
0875dc9e 2599 }
2200edac
CZ
2600 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2601 amdgpu_fence_driver_force_completion(adev);
d38ceaf9 2602
35d782fe 2603 need_full_reset = amdgpu_need_full_reset(adev);
d38ceaf9 2604
35d782fe
CZ
2605 if (!need_full_reset) {
2606 amdgpu_pre_soft_reset(adev);
2607 r = amdgpu_soft_reset(adev);
2608 amdgpu_post_soft_reset(adev);
2609 if (r || amdgpu_check_soft_reset(adev)) {
2610 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2611 need_full_reset = true;
2612 }
f1aa7e08
CZ
2613 }
2614
35d782fe 2615 if (need_full_reset) {
35d782fe 2616 r = amdgpu_suspend(adev);
bfa99269 2617
35d782fe
CZ
2618retry:
2619 /* Disable fb access */
2620 if (adev->mode_info.num_crtc) {
2621 struct amdgpu_mode_mc_save save;
2622 amdgpu_display_stop_mc_access(adev, &save);
2623 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2624 }
be34d3bf
AD
2625 if (adev->is_atom_fw)
2626 amdgpu_atomfirmware_scratch_regs_save(adev);
2627 else
2628 amdgpu_atombios_scratch_regs_save(adev);
35d782fe 2629 r = amdgpu_asic_reset(adev);
be34d3bf
AD
2630 if (adev->is_atom_fw)
2631 amdgpu_atomfirmware_scratch_regs_restore(adev);
2632 else
2633 amdgpu_atombios_scratch_regs_restore(adev);
35d782fe
CZ
2634 /* post card */
2635 amdgpu_atom_asic_init(adev->mode_info.atom_context);
2636
2637 if (!r) {
2638 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2639 r = amdgpu_resume(adev);
2640 }
d38ceaf9 2641 }
d38ceaf9 2642 if (!r) {
e72cfd58 2643 amdgpu_irq_gpu_reset_resume_helper(adev);
2c0d7318
CZ
2644 if (need_full_reset && amdgpu_need_backup(adev)) {
2645 r = amdgpu_ttm_recover_gart(adev);
2646 if (r)
2647 DRM_ERROR("gart recovery failed!!!\n");
2648 }
1f465087
CZ
2649 r = amdgpu_ib_ring_tests(adev);
2650 if (r) {
2651 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
40019dc4 2652 r = amdgpu_suspend(adev);
53cdccd5 2653 need_full_reset = true;
40019dc4 2654 goto retry;
1f465087 2655 }
53cdccd5
CZ
2656 /**
2657 * recovery vm page tables, since we cannot depend on VRAM is
2658 * consistent after gpu full reset.
2659 */
2660 if (need_full_reset && amdgpu_need_backup(adev)) {
2661 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2662 struct amdgpu_bo *bo, *tmp;
f54d1867 2663 struct dma_fence *fence = NULL, *next = NULL;
53cdccd5
CZ
2664
2665 DRM_INFO("recover vram bo from shadow\n");
2666 mutex_lock(&adev->shadow_list_lock);
2667 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2668 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2669 if (fence) {
f54d1867 2670 r = dma_fence_wait(fence, false);
53cdccd5 2671 if (r) {
1d7b17b0 2672 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5
CZ
2673 break;
2674 }
2675 }
1f465087 2676
f54d1867 2677 dma_fence_put(fence);
53cdccd5
CZ
2678 fence = next;
2679 }
2680 mutex_unlock(&adev->shadow_list_lock);
2681 if (fence) {
f54d1867 2682 r = dma_fence_wait(fence, false);
53cdccd5 2683 if (r)
1d7b17b0 2684 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5 2685 }
f54d1867 2686 dma_fence_put(fence);
53cdccd5 2687 }
d38ceaf9
AD
2688 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2689 struct amdgpu_ring *ring = adev->rings[i];
2690 if (!ring)
2691 continue;
53cdccd5 2692
aa1c8900 2693 amd_sched_job_recovery(&ring->sched);
0875dc9e 2694 kthread_unpark(ring->sched.thread);
d38ceaf9 2695 }
d38ceaf9 2696 } else {
2200edac 2697 dev_err(adev->dev, "asic resume failed (%d).\n", r);
d38ceaf9 2698 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
0875dc9e
CZ
2699 if (adev->rings[i]) {
2700 kthread_unpark(adev->rings[i]->sched.thread);
0875dc9e 2701 }
d38ceaf9
AD
2702 }
2703 }
2704
2705 drm_helper_resume_force_mode(adev->ddev);
2706
2707 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2708 if (r) {
2709 /* bad news, how to tell it to userspace ? */
2710 dev_info(adev->dev, "GPU reset failed\n");
2711 }
2712
d38ceaf9
AD
2713 return r;
2714}
2715
d0dd7f0c
AD
2716void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2717{
2718 u32 mask;
2719 int ret;
2720
cd474ba0
AD
2721 if (amdgpu_pcie_gen_cap)
2722 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 2723
cd474ba0
AD
2724 if (amdgpu_pcie_lane_cap)
2725 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 2726
cd474ba0
AD
2727 /* covers APUs as well */
2728 if (pci_is_root_bus(adev->pdev->bus)) {
2729 if (adev->pm.pcie_gen_mask == 0)
2730 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2731 if (adev->pm.pcie_mlw_mask == 0)
2732 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 2733 return;
cd474ba0 2734 }
d0dd7f0c 2735
cd474ba0
AD
2736 if (adev->pm.pcie_gen_mask == 0) {
2737 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2738 if (!ret) {
2739 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2740 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2741 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2742
2743 if (mask & DRM_PCIE_SPEED_25)
2744 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2745 if (mask & DRM_PCIE_SPEED_50)
2746 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2747 if (mask & DRM_PCIE_SPEED_80)
2748 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2749 } else {
2750 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2751 }
2752 }
2753 if (adev->pm.pcie_mlw_mask == 0) {
2754 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2755 if (!ret) {
2756 switch (mask) {
2757 case 32:
2758 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2759 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2760 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2761 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2762 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2763 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2764 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2765 break;
2766 case 16:
2767 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2768 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2769 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2770 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2771 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2772 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2773 break;
2774 case 12:
2775 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2776 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2777 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2778 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2779 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2780 break;
2781 case 8:
2782 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2783 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2784 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2785 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2786 break;
2787 case 4:
2788 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2789 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2790 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2791 break;
2792 case 2:
2793 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2794 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2795 break;
2796 case 1:
2797 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2798 break;
2799 default:
2800 break;
2801 }
2802 } else {
2803 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
2804 }
2805 }
2806}
d38ceaf9
AD
2807
2808/*
2809 * Debugfs
2810 */
2811int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
06ab6832 2812 const struct drm_info_list *files,
d38ceaf9
AD
2813 unsigned nfiles)
2814{
2815 unsigned i;
2816
2817 for (i = 0; i < adev->debugfs_count; i++) {
2818 if (adev->debugfs[i].files == files) {
2819 /* Already registered */
2820 return 0;
2821 }
2822 }
2823
2824 i = adev->debugfs_count + 1;
2825 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2826 DRM_ERROR("Reached maximum number of debugfs components.\n");
2827 DRM_ERROR("Report so we increase "
2828 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2829 return -EINVAL;
2830 }
2831 adev->debugfs[adev->debugfs_count].files = files;
2832 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2833 adev->debugfs_count = i;
2834#if defined(CONFIG_DEBUG_FS)
d38ceaf9
AD
2835 drm_debugfs_create_files(files, nfiles,
2836 adev->ddev->primary->debugfs_root,
2837 adev->ddev->primary);
2838#endif
2839 return 0;
2840}
2841
d38ceaf9
AD
2842#if defined(CONFIG_DEBUG_FS)
2843
2844static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2845 size_t size, loff_t *pos)
2846{
45063097 2847 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
2848 ssize_t result = 0;
2849 int r;
bd12267d 2850 bool pm_pg_lock, use_bank;
56628159 2851 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
2852
2853 if (size & 0x3 || *pos & 0x3)
2854 return -EINVAL;
2855
bd12267d
TSD
2856 /* are we reading registers for which a PG lock is necessary? */
2857 pm_pg_lock = (*pos >> 23) & 1;
2858
56628159
TSD
2859 if (*pos & (1ULL << 62)) {
2860 se_bank = (*pos >> 24) & 0x3FF;
2861 sh_bank = (*pos >> 34) & 0x3FF;
2862 instance_bank = (*pos >> 44) & 0x3FF;
32977f93
TSD
2863
2864 if (se_bank == 0x3FF)
2865 se_bank = 0xFFFFFFFF;
2866 if (sh_bank == 0x3FF)
2867 sh_bank = 0xFFFFFFFF;
2868 if (instance_bank == 0x3FF)
2869 instance_bank = 0xFFFFFFFF;
56628159 2870 use_bank = 1;
56628159
TSD
2871 } else {
2872 use_bank = 0;
2873 }
2874
801a6aa9 2875 *pos &= (1UL << 22) - 1;
bd12267d 2876
56628159 2877 if (use_bank) {
32977f93
TSD
2878 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2879 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
56628159
TSD
2880 return -EINVAL;
2881 mutex_lock(&adev->grbm_idx_mutex);
2882 amdgpu_gfx_select_se_sh(adev, se_bank,
2883 sh_bank, instance_bank);
2884 }
2885
bd12267d
TSD
2886 if (pm_pg_lock)
2887 mutex_lock(&adev->pm.mutex);
2888
d38ceaf9
AD
2889 while (size) {
2890 uint32_t value;
2891
2892 if (*pos > adev->rmmio_size)
56628159 2893 goto end;
d38ceaf9
AD
2894
2895 value = RREG32(*pos >> 2);
2896 r = put_user(value, (uint32_t *)buf);
56628159
TSD
2897 if (r) {
2898 result = r;
2899 goto end;
2900 }
d38ceaf9
AD
2901
2902 result += 4;
2903 buf += 4;
2904 *pos += 4;
2905 size -= 4;
2906 }
2907
56628159
TSD
2908end:
2909 if (use_bank) {
2910 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2911 mutex_unlock(&adev->grbm_idx_mutex);
2912 }
2913
bd12267d
TSD
2914 if (pm_pg_lock)
2915 mutex_unlock(&adev->pm.mutex);
2916
d38ceaf9
AD
2917 return result;
2918}
2919
2920static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2921 size_t size, loff_t *pos)
2922{
45063097 2923 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
2924 ssize_t result = 0;
2925 int r;
394fdde2
TSD
2926 bool pm_pg_lock, use_bank;
2927 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
2928
2929 if (size & 0x3 || *pos & 0x3)
2930 return -EINVAL;
2931
394fdde2
TSD
2932 /* are we reading registers for which a PG lock is necessary? */
2933 pm_pg_lock = (*pos >> 23) & 1;
2934
2935 if (*pos & (1ULL << 62)) {
2936 se_bank = (*pos >> 24) & 0x3FF;
2937 sh_bank = (*pos >> 34) & 0x3FF;
2938 instance_bank = (*pos >> 44) & 0x3FF;
2939
2940 if (se_bank == 0x3FF)
2941 se_bank = 0xFFFFFFFF;
2942 if (sh_bank == 0x3FF)
2943 sh_bank = 0xFFFFFFFF;
2944 if (instance_bank == 0x3FF)
2945 instance_bank = 0xFFFFFFFF;
2946 use_bank = 1;
2947 } else {
2948 use_bank = 0;
2949 }
2950
801a6aa9 2951 *pos &= (1UL << 22) - 1;
394fdde2
TSD
2952
2953 if (use_bank) {
2954 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2955 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2956 return -EINVAL;
2957 mutex_lock(&adev->grbm_idx_mutex);
2958 amdgpu_gfx_select_se_sh(adev, se_bank,
2959 sh_bank, instance_bank);
2960 }
2961
2962 if (pm_pg_lock)
2963 mutex_lock(&adev->pm.mutex);
2964
d38ceaf9
AD
2965 while (size) {
2966 uint32_t value;
2967
2968 if (*pos > adev->rmmio_size)
2969 return result;
2970
2971 r = get_user(value, (uint32_t *)buf);
2972 if (r)
2973 return r;
2974
2975 WREG32(*pos >> 2, value);
2976
2977 result += 4;
2978 buf += 4;
2979 *pos += 4;
2980 size -= 4;
2981 }
2982
394fdde2
TSD
2983 if (use_bank) {
2984 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2985 mutex_unlock(&adev->grbm_idx_mutex);
2986 }
2987
2988 if (pm_pg_lock)
2989 mutex_unlock(&adev->pm.mutex);
2990
d38ceaf9
AD
2991 return result;
2992}
2993
adcec288
TSD
2994static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
2995 size_t size, loff_t *pos)
2996{
45063097 2997 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
2998 ssize_t result = 0;
2999 int r;
3000
3001 if (size & 0x3 || *pos & 0x3)
3002 return -EINVAL;
3003
3004 while (size) {
3005 uint32_t value;
3006
3007 value = RREG32_PCIE(*pos >> 2);
3008 r = put_user(value, (uint32_t *)buf);
3009 if (r)
3010 return r;
3011
3012 result += 4;
3013 buf += 4;
3014 *pos += 4;
3015 size -= 4;
3016 }
3017
3018 return result;
3019}
3020
3021static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3022 size_t size, loff_t *pos)
3023{
45063097 3024 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3025 ssize_t result = 0;
3026 int r;
3027
3028 if (size & 0x3 || *pos & 0x3)
3029 return -EINVAL;
3030
3031 while (size) {
3032 uint32_t value;
3033
3034 r = get_user(value, (uint32_t *)buf);
3035 if (r)
3036 return r;
3037
3038 WREG32_PCIE(*pos >> 2, value);
3039
3040 result += 4;
3041 buf += 4;
3042 *pos += 4;
3043 size -= 4;
3044 }
3045
3046 return result;
3047}
3048
3049static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3050 size_t size, loff_t *pos)
3051{
45063097 3052 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3053 ssize_t result = 0;
3054 int r;
3055
3056 if (size & 0x3 || *pos & 0x3)
3057 return -EINVAL;
3058
3059 while (size) {
3060 uint32_t value;
3061
3062 value = RREG32_DIDT(*pos >> 2);
3063 r = put_user(value, (uint32_t *)buf);
3064 if (r)
3065 return r;
3066
3067 result += 4;
3068 buf += 4;
3069 *pos += 4;
3070 size -= 4;
3071 }
3072
3073 return result;
3074}
3075
3076static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3077 size_t size, loff_t *pos)
3078{
45063097 3079 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3080 ssize_t result = 0;
3081 int r;
3082
3083 if (size & 0x3 || *pos & 0x3)
3084 return -EINVAL;
3085
3086 while (size) {
3087 uint32_t value;
3088
3089 r = get_user(value, (uint32_t *)buf);
3090 if (r)
3091 return r;
3092
3093 WREG32_DIDT(*pos >> 2, value);
3094
3095 result += 4;
3096 buf += 4;
3097 *pos += 4;
3098 size -= 4;
3099 }
3100
3101 return result;
3102}
3103
3104static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3105 size_t size, loff_t *pos)
3106{
45063097 3107 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3108 ssize_t result = 0;
3109 int r;
3110
3111 if (size & 0x3 || *pos & 0x3)
3112 return -EINVAL;
3113
3114 while (size) {
3115 uint32_t value;
3116
6fc0deaf 3117 value = RREG32_SMC(*pos);
adcec288
TSD
3118 r = put_user(value, (uint32_t *)buf);
3119 if (r)
3120 return r;
3121
3122 result += 4;
3123 buf += 4;
3124 *pos += 4;
3125 size -= 4;
3126 }
3127
3128 return result;
3129}
3130
3131static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3132 size_t size, loff_t *pos)
3133{
45063097 3134 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3135 ssize_t result = 0;
3136 int r;
3137
3138 if (size & 0x3 || *pos & 0x3)
3139 return -EINVAL;
3140
3141 while (size) {
3142 uint32_t value;
3143
3144 r = get_user(value, (uint32_t *)buf);
3145 if (r)
3146 return r;
3147
6fc0deaf 3148 WREG32_SMC(*pos, value);
adcec288
TSD
3149
3150 result += 4;
3151 buf += 4;
3152 *pos += 4;
3153 size -= 4;
3154 }
3155
3156 return result;
3157}
3158
1e051413
TSD
3159static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3160 size_t size, loff_t *pos)
3161{
45063097 3162 struct amdgpu_device *adev = file_inode(f)->i_private;
1e051413
TSD
3163 ssize_t result = 0;
3164 int r;
3165 uint32_t *config, no_regs = 0;
3166
3167 if (size & 0x3 || *pos & 0x3)
3168 return -EINVAL;
3169
ecab7668 3170 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
1e051413
TSD
3171 if (!config)
3172 return -ENOMEM;
3173
3174 /* version, increment each time something is added */
9a999359 3175 config[no_regs++] = 3;
1e051413
TSD
3176 config[no_regs++] = adev->gfx.config.max_shader_engines;
3177 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3178 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3179 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3180 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3181 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3182 config[no_regs++] = adev->gfx.config.max_gprs;
3183 config[no_regs++] = adev->gfx.config.max_gs_threads;
3184 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3185 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3186 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3187 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3188 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3189 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3190 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3191 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3192 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3193 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3194 config[no_regs++] = adev->gfx.config.num_gpus;
3195 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3196 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3197 config[no_regs++] = adev->gfx.config.gb_addr_config;
3198 config[no_regs++] = adev->gfx.config.num_rbs;
3199
89a8f309
TSD
3200 /* rev==1 */
3201 config[no_regs++] = adev->rev_id;
3202 config[no_regs++] = adev->pg_flags;
3203 config[no_regs++] = adev->cg_flags;
3204
e9f11dc8
TSD
3205 /* rev==2 */
3206 config[no_regs++] = adev->family;
3207 config[no_regs++] = adev->external_rev_id;
3208
9a999359
TSD
3209 /* rev==3 */
3210 config[no_regs++] = adev->pdev->device;
3211 config[no_regs++] = adev->pdev->revision;
3212 config[no_regs++] = adev->pdev->subsystem_device;
3213 config[no_regs++] = adev->pdev->subsystem_vendor;
3214
1e051413
TSD
3215 while (size && (*pos < no_regs * 4)) {
3216 uint32_t value;
3217
3218 value = config[*pos >> 2];
3219 r = put_user(value, (uint32_t *)buf);
3220 if (r) {
3221 kfree(config);
3222 return r;
3223 }
3224
3225 result += 4;
3226 buf += 4;
3227 *pos += 4;
3228 size -= 4;
3229 }
3230
3231 kfree(config);
3232 return result;
3233}
3234
f2cdaf20
TSD
3235static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3236 size_t size, loff_t *pos)
3237{
45063097 3238 struct amdgpu_device *adev = file_inode(f)->i_private;
9f8df7d7
TSD
3239 int idx, x, outsize, r, valuesize;
3240 uint32_t values[16];
f2cdaf20 3241
9f8df7d7 3242 if (size & 3 || *pos & 0x3)
f2cdaf20
TSD
3243 return -EINVAL;
3244
3cbc614f
SP
3245 if (amdgpu_dpm == 0)
3246 return -EINVAL;
3247
f2cdaf20
TSD
3248 /* convert offset to sensor number */
3249 idx = *pos >> 2;
3250
9f8df7d7 3251 valuesize = sizeof(values);
f2cdaf20 3252 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
9f8df7d7 3253 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
3cbc614f
SP
3254 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3255 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3256 &valuesize);
f2cdaf20
TSD
3257 else
3258 return -EINVAL;
3259
9f8df7d7
TSD
3260 if (size > valuesize)
3261 return -EINVAL;
3262
3263 outsize = 0;
3264 x = 0;
3265 if (!r) {
3266 while (size) {
3267 r = put_user(values[x++], (int32_t *)buf);
3268 buf += 4;
3269 size -= 4;
3270 outsize += 4;
3271 }
3272 }
f2cdaf20 3273
9f8df7d7 3274 return !r ? outsize : r;
f2cdaf20 3275}
1e051413 3276
273d7aa1
TSD
3277static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3278 size_t size, loff_t *pos)
3279{
3280 struct amdgpu_device *adev = f->f_inode->i_private;
3281 int r, x;
3282 ssize_t result=0;
472259f0 3283 uint32_t offset, se, sh, cu, wave, simd, data[32];
273d7aa1
TSD
3284
3285 if (size & 3 || *pos & 3)
3286 return -EINVAL;
3287
3288 /* decode offset */
3289 offset = (*pos & 0x7F);
3290 se = ((*pos >> 7) & 0xFF);
3291 sh = ((*pos >> 15) & 0xFF);
3292 cu = ((*pos >> 23) & 0xFF);
3293 wave = ((*pos >> 31) & 0xFF);
3294 simd = ((*pos >> 37) & 0xFF);
273d7aa1
TSD
3295
3296 /* switch to the specific se/sh/cu */
3297 mutex_lock(&adev->grbm_idx_mutex);
3298 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3299
3300 x = 0;
472259f0
TSD
3301 if (adev->gfx.funcs->read_wave_data)
3302 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
273d7aa1
TSD
3303
3304 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3305 mutex_unlock(&adev->grbm_idx_mutex);
3306
5ecfb3b8
TSD
3307 if (!x)
3308 return -EINVAL;
3309
472259f0 3310 while (size && (offset < x * 4)) {
273d7aa1
TSD
3311 uint32_t value;
3312
472259f0 3313 value = data[offset >> 2];
273d7aa1
TSD
3314 r = put_user(value, (uint32_t *)buf);
3315 if (r)
3316 return r;
3317
3318 result += 4;
3319 buf += 4;
472259f0 3320 offset += 4;
273d7aa1
TSD
3321 size -= 4;
3322 }
3323
3324 return result;
3325}
3326
c5a60ce8
TSD
3327static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3328 size_t size, loff_t *pos)
3329{
3330 struct amdgpu_device *adev = f->f_inode->i_private;
3331 int r;
3332 ssize_t result = 0;
3333 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3334
3335 if (size & 3 || *pos & 3)
3336 return -EINVAL;
3337
3338 /* decode offset */
3339 offset = (*pos & 0xFFF); /* in dwords */
3340 se = ((*pos >> 12) & 0xFF);
3341 sh = ((*pos >> 20) & 0xFF);
3342 cu = ((*pos >> 28) & 0xFF);
3343 wave = ((*pos >> 36) & 0xFF);
3344 simd = ((*pos >> 44) & 0xFF);
3345 thread = ((*pos >> 52) & 0xFF);
3346 bank = ((*pos >> 60) & 1);
3347
3348 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3349 if (!data)
3350 return -ENOMEM;
3351
3352 /* switch to the specific se/sh/cu */
3353 mutex_lock(&adev->grbm_idx_mutex);
3354 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3355
3356 if (bank == 0) {
3357 if (adev->gfx.funcs->read_wave_vgprs)
3358 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3359 } else {
3360 if (adev->gfx.funcs->read_wave_sgprs)
3361 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3362 }
3363
3364 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3365 mutex_unlock(&adev->grbm_idx_mutex);
3366
3367 while (size) {
3368 uint32_t value;
3369
3370 value = data[offset++];
3371 r = put_user(value, (uint32_t *)buf);
3372 if (r) {
3373 result = r;
3374 goto err;
3375 }
3376
3377 result += 4;
3378 buf += 4;
3379 size -= 4;
3380 }
3381
3382err:
3383 kfree(data);
3384 return result;
3385}
3386
d38ceaf9
AD
3387static const struct file_operations amdgpu_debugfs_regs_fops = {
3388 .owner = THIS_MODULE,
3389 .read = amdgpu_debugfs_regs_read,
3390 .write = amdgpu_debugfs_regs_write,
3391 .llseek = default_llseek
3392};
adcec288
TSD
3393static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3394 .owner = THIS_MODULE,
3395 .read = amdgpu_debugfs_regs_didt_read,
3396 .write = amdgpu_debugfs_regs_didt_write,
3397 .llseek = default_llseek
3398};
3399static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3400 .owner = THIS_MODULE,
3401 .read = amdgpu_debugfs_regs_pcie_read,
3402 .write = amdgpu_debugfs_regs_pcie_write,
3403 .llseek = default_llseek
3404};
3405static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3406 .owner = THIS_MODULE,
3407 .read = amdgpu_debugfs_regs_smc_read,
3408 .write = amdgpu_debugfs_regs_smc_write,
3409 .llseek = default_llseek
3410};
3411
1e051413
TSD
3412static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3413 .owner = THIS_MODULE,
3414 .read = amdgpu_debugfs_gca_config_read,
3415 .llseek = default_llseek
3416};
3417
f2cdaf20
TSD
3418static const struct file_operations amdgpu_debugfs_sensors_fops = {
3419 .owner = THIS_MODULE,
3420 .read = amdgpu_debugfs_sensor_read,
3421 .llseek = default_llseek
3422};
3423
273d7aa1
TSD
3424static const struct file_operations amdgpu_debugfs_wave_fops = {
3425 .owner = THIS_MODULE,
3426 .read = amdgpu_debugfs_wave_read,
3427 .llseek = default_llseek
3428};
c5a60ce8
TSD
3429static const struct file_operations amdgpu_debugfs_gpr_fops = {
3430 .owner = THIS_MODULE,
3431 .read = amdgpu_debugfs_gpr_read,
3432 .llseek = default_llseek
3433};
273d7aa1 3434
adcec288
TSD
3435static const struct file_operations *debugfs_regs[] = {
3436 &amdgpu_debugfs_regs_fops,
3437 &amdgpu_debugfs_regs_didt_fops,
3438 &amdgpu_debugfs_regs_pcie_fops,
3439 &amdgpu_debugfs_regs_smc_fops,
1e051413 3440 &amdgpu_debugfs_gca_config_fops,
f2cdaf20 3441 &amdgpu_debugfs_sensors_fops,
273d7aa1 3442 &amdgpu_debugfs_wave_fops,
c5a60ce8 3443 &amdgpu_debugfs_gpr_fops,
adcec288
TSD
3444};
3445
3446static const char *debugfs_regs_names[] = {
3447 "amdgpu_regs",
3448 "amdgpu_regs_didt",
3449 "amdgpu_regs_pcie",
3450 "amdgpu_regs_smc",
1e051413 3451 "amdgpu_gca_config",
f2cdaf20 3452 "amdgpu_sensors",
273d7aa1 3453 "amdgpu_wave",
c5a60ce8 3454 "amdgpu_gpr",
adcec288 3455};
d38ceaf9
AD
3456
3457static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3458{
3459 struct drm_minor *minor = adev->ddev->primary;
3460 struct dentry *ent, *root = minor->debugfs_root;
adcec288
TSD
3461 unsigned i, j;
3462
3463 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3464 ent = debugfs_create_file(debugfs_regs_names[i],
3465 S_IFREG | S_IRUGO, root,
3466 adev, debugfs_regs[i]);
3467 if (IS_ERR(ent)) {
3468 for (j = 0; j < i; j++) {
3469 debugfs_remove(adev->debugfs_regs[i]);
3470 adev->debugfs_regs[i] = NULL;
3471 }
3472 return PTR_ERR(ent);
3473 }
d38ceaf9 3474
adcec288
TSD
3475 if (!i)
3476 i_size_write(ent->d_inode, adev->rmmio_size);
3477 adev->debugfs_regs[i] = ent;
3478 }
d38ceaf9
AD
3479
3480 return 0;
3481}
3482
3483static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3484{
adcec288
TSD
3485 unsigned i;
3486
3487 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3488 if (adev->debugfs_regs[i]) {
3489 debugfs_remove(adev->debugfs_regs[i]);
3490 adev->debugfs_regs[i] = NULL;
3491 }
3492 }
d38ceaf9
AD
3493}
3494
3495int amdgpu_debugfs_init(struct drm_minor *minor)
3496{
3497 return 0;
3498}
7cebc728
AK
3499#else
3500static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3501{
3502 return 0;
3503}
3504static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
d38ceaf9 3505#endif