drm/amd/powerplay: enable/disable gfxoff through smu
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
d38ceaf9
AD
31#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
4562236b 33#include <drm/drm_atomic_helper.h>
d38ceaf9
AD
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
f4b373f4 39#include "amdgpu_trace.h"
d38ceaf9
AD
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
a5bde2f9 43#include "amdgpu_atomfirmware.h"
d0dd7f0c 44#include "amd_pcie.h"
33f34802
KW
45#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
a2e73f56
AD
48#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
aaa36a97 51#include "vi.h"
460826e6 52#include "soc15.h"
d38ceaf9 53#include "bif/bif_4_1_d.h"
9accf2fd 54#include <linux/pci.h>
bec86378 55#include <linux/firmware.h>
89041940 56#include "amdgpu_vf_error.h"
d38ceaf9 57
ba997709 58#include "amdgpu_amdkfd.h"
d2f52ac8 59#include "amdgpu_pm.h"
d38ceaf9 60
e2a75f88 61MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
3f76dced 62MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
2d2e5e7e 63MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
e2a75f88 64
2dc80b00
S
65#define AMDGPU_RESUME_MS 2000
66
d38ceaf9 67static const char *amdgpu_asic_name[] = {
da69c161
KW
68 "TAHITI",
69 "PITCAIRN",
70 "VERDE",
71 "OLAND",
72 "HAINAN",
d38ceaf9
AD
73 "BONAIRE",
74 "KAVERI",
75 "KABINI",
76 "HAWAII",
77 "MULLINS",
78 "TOPAZ",
79 "TONGA",
48299f95 80 "FIJI",
d38ceaf9 81 "CARRIZO",
139f4917 82 "STONEY",
2cc0c0b5
FC
83 "POLARIS10",
84 "POLARIS11",
c4642a47 85 "POLARIS12",
d4196f01 86 "VEGA10",
8fab806a 87 "VEGA12",
2ca8a5d2 88 "RAVEN",
d38ceaf9
AD
89 "LAST",
90};
91
5494d864
AD
92static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
93
e3ecdffa
AD
94/**
95 * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
96 *
97 * @dev: drm_device pointer
98 *
99 * Returns true if the device is a dGPU with HG/PX power control,
100 * otherwise return false.
101 */
d38ceaf9
AD
102bool amdgpu_device_is_px(struct drm_device *dev)
103{
104 struct amdgpu_device *adev = dev->dev_private;
105
2f7d10b3 106 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
107 return true;
108 return false;
109}
110
111/*
112 * MMIO register access helper functions.
113 */
e3ecdffa
AD
114/**
115 * amdgpu_mm_rreg - read a memory mapped IO register
116 *
117 * @adev: amdgpu_device pointer
118 * @reg: dword aligned register offset
119 * @acc_flags: access flags which require special behavior
120 *
121 * Returns the 32 bit value from the offset specified.
122 */
d38ceaf9 123uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 124 uint32_t acc_flags)
d38ceaf9 125{
f4b373f4
TSD
126 uint32_t ret;
127
43ca8efa 128 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 129 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 130
15d72fd7 131 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 132 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
133 else {
134 unsigned long flags;
d38ceaf9
AD
135
136 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
137 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
138 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
139 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 140 }
f4b373f4
TSD
141 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
142 return ret;
d38ceaf9
AD
143}
144
421a2a30
ML
145/*
146 * MMIO register read with bytes helper functions
147 * @offset:bytes offset from MMIO start
148 *
149*/
150
e3ecdffa
AD
151/**
152 * amdgpu_mm_rreg8 - read a memory mapped IO register
153 *
154 * @adev: amdgpu_device pointer
155 * @offset: byte aligned register offset
156 *
157 * Returns the 8 bit value from the offset specified.
158 */
421a2a30
ML
159uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
160 if (offset < adev->rmmio_size)
161 return (readb(adev->rmmio + offset));
162 BUG();
163}
164
165/*
166 * MMIO register write with bytes helper functions
167 * @offset:bytes offset from MMIO start
168 * @value: the value want to be written to the register
169 *
170*/
e3ecdffa
AD
171/**
172 * amdgpu_mm_wreg8 - read a memory mapped IO register
173 *
174 * @adev: amdgpu_device pointer
175 * @offset: byte aligned register offset
176 * @value: 8 bit value to write
177 *
178 * Writes the value specified to the offset specified.
179 */
421a2a30
ML
180void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
181 if (offset < adev->rmmio_size)
182 writeb(value, adev->rmmio + offset);
183 else
184 BUG();
185}
186
e3ecdffa
AD
187/**
188 * amdgpu_mm_wreg - write to a memory mapped IO register
189 *
190 * @adev: amdgpu_device pointer
191 * @reg: dword aligned register offset
192 * @v: 32 bit value to write to the register
193 * @acc_flags: access flags which require special behavior
194 *
195 * Writes the value specified to the offset specified.
196 */
d38ceaf9 197void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 198 uint32_t acc_flags)
d38ceaf9 199{
f4b373f4 200 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 201
47ed4e1c
KW
202 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
203 adev->last_mm_index = v;
204 }
205
43ca8efa 206 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 207 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 208
15d72fd7 209 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
210 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
211 else {
212 unsigned long flags;
213
214 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
215 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
216 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
217 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
218 }
47ed4e1c
KW
219
220 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
221 udelay(500);
222 }
d38ceaf9
AD
223}
224
e3ecdffa
AD
225/**
226 * amdgpu_io_rreg - read an IO register
227 *
228 * @adev: amdgpu_device pointer
229 * @reg: dword aligned register offset
230 *
231 * Returns the 32 bit value from the offset specified.
232 */
d38ceaf9
AD
233u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
234{
235 if ((reg * 4) < adev->rio_mem_size)
236 return ioread32(adev->rio_mem + (reg * 4));
237 else {
238 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
239 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
240 }
241}
242
e3ecdffa
AD
243/**
244 * amdgpu_io_wreg - write to an IO register
245 *
246 * @adev: amdgpu_device pointer
247 * @reg: dword aligned register offset
248 * @v: 32 bit value to write to the register
249 *
250 * Writes the value specified to the offset specified.
251 */
d38ceaf9
AD
252void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
253{
47ed4e1c
KW
254 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
255 adev->last_mm_index = v;
256 }
d38ceaf9
AD
257
258 if ((reg * 4) < adev->rio_mem_size)
259 iowrite32(v, adev->rio_mem + (reg * 4));
260 else {
261 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
262 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
263 }
47ed4e1c
KW
264
265 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
266 udelay(500);
267 }
d38ceaf9
AD
268}
269
270/**
271 * amdgpu_mm_rdoorbell - read a doorbell dword
272 *
273 * @adev: amdgpu_device pointer
274 * @index: doorbell index
275 *
276 * Returns the value in the doorbell aperture at the
277 * requested doorbell index (CIK).
278 */
279u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
280{
281 if (index < adev->doorbell.num_doorbells) {
282 return readl(adev->doorbell.ptr + index);
283 } else {
284 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
285 return 0;
286 }
287}
288
289/**
290 * amdgpu_mm_wdoorbell - write a doorbell dword
291 *
292 * @adev: amdgpu_device pointer
293 * @index: doorbell index
294 * @v: value to write
295 *
296 * Writes @v to the doorbell aperture at the
297 * requested doorbell index (CIK).
298 */
299void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
300{
301 if (index < adev->doorbell.num_doorbells) {
302 writel(v, adev->doorbell.ptr + index);
303 } else {
304 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
305 }
306}
307
832be404
KW
308/**
309 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
310 *
311 * @adev: amdgpu_device pointer
312 * @index: doorbell index
313 *
314 * Returns the value in the doorbell aperture at the
315 * requested doorbell index (VEGA10+).
316 */
317u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
318{
319 if (index < adev->doorbell.num_doorbells) {
320 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
321 } else {
322 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
323 return 0;
324 }
325}
326
327/**
328 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
329 *
330 * @adev: amdgpu_device pointer
331 * @index: doorbell index
332 * @v: value to write
333 *
334 * Writes @v to the doorbell aperture at the
335 * requested doorbell index (VEGA10+).
336 */
337void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
338{
339 if (index < adev->doorbell.num_doorbells) {
340 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
341 } else {
342 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
343 }
344}
345
d38ceaf9
AD
346/**
347 * amdgpu_invalid_rreg - dummy reg read function
348 *
349 * @adev: amdgpu device pointer
350 * @reg: offset of register
351 *
352 * Dummy register read function. Used for register blocks
353 * that certain asics don't have (all asics).
354 * Returns the value in the register.
355 */
356static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
357{
358 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
359 BUG();
360 return 0;
361}
362
363/**
364 * amdgpu_invalid_wreg - dummy reg write function
365 *
366 * @adev: amdgpu device pointer
367 * @reg: offset of register
368 * @v: value to write to the register
369 *
370 * Dummy register read function. Used for register blocks
371 * that certain asics don't have (all asics).
372 */
373static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
374{
375 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
376 reg, v);
377 BUG();
378}
379
380/**
381 * amdgpu_block_invalid_rreg - dummy reg read function
382 *
383 * @adev: amdgpu device pointer
384 * @block: offset of instance
385 * @reg: offset of register
386 *
387 * Dummy register read function. Used for register blocks
388 * that certain asics don't have (all asics).
389 * Returns the value in the register.
390 */
391static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
392 uint32_t block, uint32_t reg)
393{
394 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
395 reg, block);
396 BUG();
397 return 0;
398}
399
400/**
401 * amdgpu_block_invalid_wreg - dummy reg write function
402 *
403 * @adev: amdgpu device pointer
404 * @block: offset of instance
405 * @reg: offset of register
406 * @v: value to write to the register
407 *
408 * Dummy register read function. Used for register blocks
409 * that certain asics don't have (all asics).
410 */
411static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
412 uint32_t block,
413 uint32_t reg, uint32_t v)
414{
415 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
416 reg, block, v);
417 BUG();
418}
419
e3ecdffa
AD
420/**
421 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
422 *
423 * @adev: amdgpu device pointer
424 *
425 * Allocates a scratch page of VRAM for use by various things in the
426 * driver.
427 */
06ec9070 428static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
d38ceaf9 429{
a4a02777
CK
430 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
431 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
432 &adev->vram_scratch.robj,
433 &adev->vram_scratch.gpu_addr,
434 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
435}
436
e3ecdffa
AD
437/**
438 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
439 *
440 * @adev: amdgpu device pointer
441 *
442 * Frees the VRAM scratch page.
443 */
06ec9070 444static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
d38ceaf9 445{
078af1a3 446 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
447}
448
449/**
9c3f2b54 450 * amdgpu_device_program_register_sequence - program an array of registers.
d38ceaf9
AD
451 *
452 * @adev: amdgpu_device pointer
453 * @registers: pointer to the register array
454 * @array_size: size of the register array
455 *
456 * Programs an array or registers with and and or masks.
457 * This is a helper for setting golden registers.
458 */
9c3f2b54
AD
459void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
460 const u32 *registers,
461 const u32 array_size)
d38ceaf9
AD
462{
463 u32 tmp, reg, and_mask, or_mask;
464 int i;
465
466 if (array_size % 3)
467 return;
468
469 for (i = 0; i < array_size; i +=3) {
470 reg = registers[i + 0];
471 and_mask = registers[i + 1];
472 or_mask = registers[i + 2];
473
474 if (and_mask == 0xffffffff) {
475 tmp = or_mask;
476 } else {
477 tmp = RREG32(reg);
478 tmp &= ~and_mask;
479 tmp |= or_mask;
480 }
481 WREG32(reg, tmp);
482 }
483}
484
e3ecdffa
AD
485/**
486 * amdgpu_device_pci_config_reset - reset the GPU
487 *
488 * @adev: amdgpu_device pointer
489 *
490 * Resets the GPU using the pci config reset sequence.
491 * Only applicable to asics prior to vega10.
492 */
8111c387 493void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
d38ceaf9
AD
494{
495 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
496}
497
498/*
499 * GPU doorbell aperture helpers function.
500 */
501/**
06ec9070 502 * amdgpu_device_doorbell_init - Init doorbell driver information.
d38ceaf9
AD
503 *
504 * @adev: amdgpu_device pointer
505 *
506 * Init doorbell driver information (CIK)
507 * Returns 0 on success, error on failure.
508 */
06ec9070 509static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
d38ceaf9 510{
705e519e
CK
511 /* No doorbell on SI hardware generation */
512 if (adev->asic_type < CHIP_BONAIRE) {
513 adev->doorbell.base = 0;
514 adev->doorbell.size = 0;
515 adev->doorbell.num_doorbells = 0;
516 adev->doorbell.ptr = NULL;
517 return 0;
518 }
519
d6895ad3
CK
520 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
521 return -EINVAL;
522
d38ceaf9
AD
523 /* doorbell bar mapping */
524 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
525 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
526
edf600da 527 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
528 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
529 if (adev->doorbell.num_doorbells == 0)
530 return -EINVAL;
531
8972e5d2
CK
532 adev->doorbell.ptr = ioremap(adev->doorbell.base,
533 adev->doorbell.num_doorbells *
534 sizeof(u32));
535 if (adev->doorbell.ptr == NULL)
d38ceaf9 536 return -ENOMEM;
d38ceaf9
AD
537
538 return 0;
539}
540
541/**
06ec9070 542 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
d38ceaf9
AD
543 *
544 * @adev: amdgpu_device pointer
545 *
546 * Tear down doorbell driver information (CIK)
547 */
06ec9070 548static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
d38ceaf9
AD
549{
550 iounmap(adev->doorbell.ptr);
551 adev->doorbell.ptr = NULL;
552}
553
22cb0164 554
d38ceaf9
AD
555
556/*
06ec9070 557 * amdgpu_device_wb_*()
455a7bc2 558 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 559 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
560 */
561
562/**
06ec9070 563 * amdgpu_device_wb_fini - Disable Writeback and free memory
d38ceaf9
AD
564 *
565 * @adev: amdgpu_device pointer
566 *
567 * Disables Writeback and frees the Writeback memory (all asics).
568 * Used at driver shutdown.
569 */
06ec9070 570static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
d38ceaf9
AD
571{
572 if (adev->wb.wb_obj) {
a76ed485
AD
573 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
574 &adev->wb.gpu_addr,
575 (void **)&adev->wb.wb);
d38ceaf9
AD
576 adev->wb.wb_obj = NULL;
577 }
578}
579
580/**
06ec9070 581 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
d38ceaf9
AD
582 *
583 * @adev: amdgpu_device pointer
584 *
455a7bc2 585 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
586 * Used at driver startup.
587 * Returns 0 on success or an -error on failure.
588 */
06ec9070 589static int amdgpu_device_wb_init(struct amdgpu_device *adev)
d38ceaf9
AD
590{
591 int r;
592
593 if (adev->wb.wb_obj == NULL) {
97407b63
AD
594 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
595 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
596 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
597 &adev->wb.wb_obj, &adev->wb.gpu_addr,
598 (void **)&adev->wb.wb);
d38ceaf9
AD
599 if (r) {
600 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
601 return r;
602 }
d38ceaf9
AD
603
604 adev->wb.num_wb = AMDGPU_MAX_WB;
605 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
606
607 /* clear wb memory */
73469585 608 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
d38ceaf9
AD
609 }
610
611 return 0;
612}
613
614/**
131b4b36 615 * amdgpu_device_wb_get - Allocate a wb entry
d38ceaf9
AD
616 *
617 * @adev: amdgpu_device pointer
618 * @wb: wb index
619 *
620 * Allocate a wb slot for use by the driver (all asics).
621 * Returns 0 on success or -EINVAL on failure.
622 */
131b4b36 623int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
d38ceaf9
AD
624{
625 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 626
97407b63 627 if (offset < adev->wb.num_wb) {
7014285a 628 __set_bit(offset, adev->wb.used);
63ae07ca 629 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
630 return 0;
631 } else {
632 return -EINVAL;
633 }
634}
635
d38ceaf9 636/**
131b4b36 637 * amdgpu_device_wb_free - Free a wb entry
d38ceaf9
AD
638 *
639 * @adev: amdgpu_device pointer
640 * @wb: wb index
641 *
642 * Free a wb slot allocated for use by the driver (all asics)
643 */
131b4b36 644void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
d38ceaf9 645{
73469585 646 wb >>= 3;
d38ceaf9 647 if (wb < adev->wb.num_wb)
73469585 648 __clear_bit(wb, adev->wb.used);
d38ceaf9
AD
649}
650
651/**
2543e28a 652 * amdgpu_device_vram_location - try to find VRAM location
e3ecdffa 653 *
d38ceaf9
AD
654 * @adev: amdgpu device structure holding all necessary informations
655 * @mc: memory controller structure holding memory informations
656 * @base: base address at which to put VRAM
657 *
455a7bc2 658 * Function will try to place VRAM at base address provided
3d647c8f 659 * as parameter.
d38ceaf9 660 */
2543e28a 661void amdgpu_device_vram_location(struct amdgpu_device *adev,
770d13b1 662 struct amdgpu_gmc *mc, u64 base)
d38ceaf9
AD
663{
664 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
665
666 mc->vram_start = base;
d38ceaf9
AD
667 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
668 if (limit && limit < mc->real_vram_size)
669 mc->real_vram_size = limit;
670 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
671 mc->mc_vram_size >> 20, mc->vram_start,
672 mc->vram_end, mc->real_vram_size >> 20);
673}
674
675/**
2543e28a 676 * amdgpu_device_gart_location - try to find GTT location
e3ecdffa 677 *
d38ceaf9
AD
678 * @adev: amdgpu device structure holding all necessary informations
679 * @mc: memory controller structure holding memory informations
680 *
681 * Function will place try to place GTT before or after VRAM.
682 *
683 * If GTT size is bigger than space left then we ajust GTT size.
684 * Thus function will never fails.
685 *
686 * FIXME: when reducing GTT size align new size on power of 2.
687 */
2543e28a 688void amdgpu_device_gart_location(struct amdgpu_device *adev,
770d13b1 689 struct amdgpu_gmc *mc)
d38ceaf9
AD
690{
691 u64 size_af, size_bf;
692
7951e376
RZ
693 mc->gart_size += adev->pm.smu_prv_buffer_size;
694
770d13b1 695 size_af = adev->gmc.mc_mask - mc->vram_end;
ed21c047 696 size_bf = mc->vram_start;
d38ceaf9 697 if (size_bf > size_af) {
6f02a696 698 if (mc->gart_size > size_bf) {
d38ceaf9 699 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 700 mc->gart_size = size_bf;
d38ceaf9 701 }
6f02a696 702 mc->gart_start = 0;
d38ceaf9 703 } else {
6f02a696 704 if (mc->gart_size > size_af) {
d38ceaf9 705 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 706 mc->gart_size = size_af;
d38ceaf9 707 }
b98f1b9e
CK
708 /* VCE doesn't like it when BOs cross a 4GB segment, so align
709 * the GART base on a 4GB boundary as well.
710 */
711 mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
d38ceaf9 712 }
6f02a696 713 mc->gart_end = mc->gart_start + mc->gart_size - 1;
d38ceaf9 714 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6f02a696 715 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
d38ceaf9
AD
716}
717
d6895ad3
CK
718/**
719 * amdgpu_device_resize_fb_bar - try to resize FB BAR
720 *
721 * @adev: amdgpu_device pointer
722 *
723 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
724 * to fail, but if any of the BARs is not accessible after the size we abort
725 * driver loading by returning -ENODEV.
726 */
727int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
728{
770d13b1 729 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
d6895ad3 730 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
31b8adab
CK
731 struct pci_bus *root;
732 struct resource *res;
733 unsigned i;
d6895ad3
CK
734 u16 cmd;
735 int r;
736
0c03b912 737 /* Bypass for VF */
738 if (amdgpu_sriov_vf(adev))
739 return 0;
740
31b8adab
CK
741 /* Check if the root BUS has 64bit memory resources */
742 root = adev->pdev->bus;
743 while (root->parent)
744 root = root->parent;
745
746 pci_bus_for_each_resource(root, res, i) {
0ebb7c54 747 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
31b8adab
CK
748 res->start > 0x100000000ull)
749 break;
750 }
751
752 /* Trying to resize is pointless without a root hub window above 4GB */
753 if (!res)
754 return 0;
755
d6895ad3
CK
756 /* Disable memory decoding while we change the BAR addresses and size */
757 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
758 pci_write_config_word(adev->pdev, PCI_COMMAND,
759 cmd & ~PCI_COMMAND_MEMORY);
760
761 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
06ec9070 762 amdgpu_device_doorbell_fini(adev);
d6895ad3
CK
763 if (adev->asic_type >= CHIP_BONAIRE)
764 pci_release_resource(adev->pdev, 2);
765
766 pci_release_resource(adev->pdev, 0);
767
768 r = pci_resize_resource(adev->pdev, 0, rbar_size);
769 if (r == -ENOSPC)
770 DRM_INFO("Not enough PCI address space for a large BAR.");
771 else if (r && r != -ENOTSUPP)
772 DRM_ERROR("Problem resizing BAR0 (%d).", r);
773
774 pci_assign_unassigned_bus_resources(adev->pdev->bus);
775
776 /* When the doorbell or fb BAR isn't available we have no chance of
777 * using the device.
778 */
06ec9070 779 r = amdgpu_device_doorbell_init(adev);
d6895ad3
CK
780 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
781 return -ENODEV;
782
783 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
784
785 return 0;
786}
a05502e5 787
d38ceaf9
AD
788/*
789 * GPU helpers function.
790 */
791/**
39c640c0 792 * amdgpu_device_need_post - check if the hw need post or not
d38ceaf9
AD
793 *
794 * @adev: amdgpu_device pointer
795 *
c836fec5
JQ
796 * Check if the asic has been initialized (all asics) at driver startup
797 * or post is needed if hw reset is performed.
798 * Returns true if need or false if not.
d38ceaf9 799 */
39c640c0 800bool amdgpu_device_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
801{
802 uint32_t reg;
803
bec86378
ML
804 if (amdgpu_sriov_vf(adev))
805 return false;
806
807 if (amdgpu_passthrough(adev)) {
1da2c326
ML
808 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
809 * some old smc fw still need driver do vPost otherwise gpu hang, while
810 * those smc fw version above 22.15 doesn't have this flaw, so we force
811 * vpost executed for smc version below 22.15
bec86378
ML
812 */
813 if (adev->asic_type == CHIP_FIJI) {
814 int err;
815 uint32_t fw_ver;
816 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
817 /* force vPost if error occured */
818 if (err)
819 return true;
820
821 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
822 if (fw_ver < 0x00160e00)
823 return true;
bec86378 824 }
bec86378 825 }
91fe77eb 826
827 if (adev->has_hw_reset) {
828 adev->has_hw_reset = false;
829 return true;
830 }
831
832 /* bios scratch used on CIK+ */
833 if (adev->asic_type >= CHIP_BONAIRE)
834 return amdgpu_atombios_scratch_need_asic_init(adev);
835
836 /* check MEM_SIZE for older asics */
837 reg = amdgpu_asic_get_config_memsize(adev);
838
839 if ((reg != 0) && (reg != 0xffffffff))
840 return false;
841
842 return true;
bec86378
ML
843}
844
d38ceaf9
AD
845/* if we get transitioned to only one device, take VGA back */
846/**
06ec9070 847 * amdgpu_device_vga_set_decode - enable/disable vga decode
d38ceaf9
AD
848 *
849 * @cookie: amdgpu_device pointer
850 * @state: enable/disable vga decode
851 *
852 * Enable/disable vga decode (all asics).
853 * Returns VGA resource flags.
854 */
06ec9070 855static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
d38ceaf9
AD
856{
857 struct amdgpu_device *adev = cookie;
858 amdgpu_asic_set_vga_state(adev, state);
859 if (state)
860 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
861 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
862 else
863 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
864}
865
e3ecdffa
AD
866/**
867 * amdgpu_device_check_block_size - validate the vm block size
868 *
869 * @adev: amdgpu_device pointer
870 *
871 * Validates the vm block size specified via module parameter.
872 * The vm block size defines number of bits in page table versus page directory,
873 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
874 * page table and the remaining bits are in the page directory.
875 */
06ec9070 876static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
877{
878 /* defines number of bits in page table versus page directory,
879 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
880 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
881 if (amdgpu_vm_block_size == -1)
882 return;
a1adf8be 883
bab4fee7 884 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
885 dev_warn(adev->dev, "VM page table size (%d) too small\n",
886 amdgpu_vm_block_size);
97489129 887 amdgpu_vm_block_size = -1;
a1adf8be 888 }
a1adf8be
CZ
889}
890
e3ecdffa
AD
891/**
892 * amdgpu_device_check_vm_size - validate the vm size
893 *
894 * @adev: amdgpu_device pointer
895 *
896 * Validates the vm size in GB specified via module parameter.
897 * The VM size is the size of the GPU virtual memory space in GB.
898 */
06ec9070 899static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
83ca145d 900{
64dab074
AD
901 /* no need to check the default value */
902 if (amdgpu_vm_size == -1)
903 return;
904
83ca145d
ZJ
905 if (amdgpu_vm_size < 1) {
906 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
907 amdgpu_vm_size);
f3368128 908 amdgpu_vm_size = -1;
83ca145d 909 }
83ca145d
ZJ
910}
911
7951e376
RZ
912static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
913{
914 struct sysinfo si;
915 bool is_os_64 = (sizeof(void *) == 8) ? true : false;
916 uint64_t total_memory;
917 uint64_t dram_size_seven_GB = 0x1B8000000;
918 uint64_t dram_size_three_GB = 0xB8000000;
919
920 if (amdgpu_smu_memory_pool_size == 0)
921 return;
922
923 if (!is_os_64) {
924 DRM_WARN("Not 64-bit OS, feature not supported\n");
925 goto def_value;
926 }
927 si_meminfo(&si);
928 total_memory = (uint64_t)si.totalram * si.mem_unit;
929
930 if ((amdgpu_smu_memory_pool_size == 1) ||
931 (amdgpu_smu_memory_pool_size == 2)) {
932 if (total_memory < dram_size_three_GB)
933 goto def_value1;
934 } else if ((amdgpu_smu_memory_pool_size == 4) ||
935 (amdgpu_smu_memory_pool_size == 8)) {
936 if (total_memory < dram_size_seven_GB)
937 goto def_value1;
938 } else {
939 DRM_WARN("Smu memory pool size not supported\n");
940 goto def_value;
941 }
942 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
943
944 return;
945
946def_value1:
947 DRM_WARN("No enough system memory\n");
948def_value:
949 adev->pm.smu_prv_buffer_size = 0;
950}
951
d38ceaf9 952/**
06ec9070 953 * amdgpu_device_check_arguments - validate module params
d38ceaf9
AD
954 *
955 * @adev: amdgpu_device pointer
956 *
957 * Validates certain module parameters and updates
958 * the associated values used by the driver (all asics).
959 */
06ec9070 960static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
d38ceaf9 961{
5b011235
CZ
962 if (amdgpu_sched_jobs < 4) {
963 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
964 amdgpu_sched_jobs);
965 amdgpu_sched_jobs = 4;
76117507 966 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
967 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
968 amdgpu_sched_jobs);
969 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
970 }
d38ceaf9 971
83e74db6 972 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
973 /* gart size must be greater or equal to 32M */
974 dev_warn(adev->dev, "gart size (%d) too small\n",
975 amdgpu_gart_size);
83e74db6 976 amdgpu_gart_size = -1;
d38ceaf9
AD
977 }
978
36d38372 979 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 980 /* gtt size must be greater or equal to 32M */
36d38372
CK
981 dev_warn(adev->dev, "gtt size (%d) too small\n",
982 amdgpu_gtt_size);
983 amdgpu_gtt_size = -1;
d38ceaf9
AD
984 }
985
d07f14be
RH
986 /* valid range is between 4 and 9 inclusive */
987 if (amdgpu_vm_fragment_size != -1 &&
988 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
989 dev_warn(adev->dev, "valid range is between 4 and 9\n");
990 amdgpu_vm_fragment_size = -1;
991 }
992
7951e376
RZ
993 amdgpu_device_check_smu_prv_buffer_size(adev);
994
06ec9070 995 amdgpu_device_check_vm_size(adev);
d38ceaf9 996
06ec9070 997 amdgpu_device_check_block_size(adev);
6a7f76e7 998
526bae37 999 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 1000 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
1001 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1002 amdgpu_vram_page_split);
1003 amdgpu_vram_page_split = 1024;
1004 }
8854695a
AG
1005
1006 if (amdgpu_lockup_timeout == 0) {
1007 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
1008 amdgpu_lockup_timeout = 10000;
1009 }
19aede77
AD
1010
1011 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
d38ceaf9
AD
1012}
1013
1014/**
1015 * amdgpu_switcheroo_set_state - set switcheroo state
1016 *
1017 * @pdev: pci dev pointer
1694467b 1018 * @state: vga_switcheroo state
d38ceaf9
AD
1019 *
1020 * Callback for the switcheroo driver. Suspends or resumes the
1021 * the asics before or after it is powered up using ACPI methods.
1022 */
1023static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1024{
1025 struct drm_device *dev = pci_get_drvdata(pdev);
1026
1027 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1028 return;
1029
1030 if (state == VGA_SWITCHEROO_ON) {
7ca85295 1031 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1032 /* don't suspend or resume card normally */
1033 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1034
810ddc3a 1035 amdgpu_device_resume(dev, true, true);
d38ceaf9 1036
d38ceaf9
AD
1037 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1038 drm_kms_helper_poll_enable(dev);
1039 } else {
7ca85295 1040 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1041 drm_kms_helper_poll_disable(dev);
1042 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1043 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1044 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1045 }
1046}
1047
1048/**
1049 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1050 *
1051 * @pdev: pci dev pointer
1052 *
1053 * Callback for the switcheroo driver. Check of the switcheroo
1054 * state can be changed.
1055 * Returns true if the state can be changed, false if not.
1056 */
1057static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1058{
1059 struct drm_device *dev = pci_get_drvdata(pdev);
1060
1061 /*
1062 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1063 * locking inversion with the driver load path. And the access here is
1064 * completely racy anyway. So don't bother with locking for now.
1065 */
1066 return dev->open_count == 0;
1067}
1068
1069static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1070 .set_gpu_state = amdgpu_switcheroo_set_state,
1071 .reprobe = NULL,
1072 .can_switch = amdgpu_switcheroo_can_switch,
1073};
1074
e3ecdffa
AD
1075/**
1076 * amdgpu_device_ip_set_clockgating_state - set the CG state
1077 *
1078 * @adev: amdgpu_device pointer
1079 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1080 * @state: clockgating state (gate or ungate)
1081 *
1082 * Sets the requested clockgating state for all instances of
1083 * the hardware IP specified.
1084 * Returns the error code from the last instance.
1085 */
43fa561f 1086int amdgpu_device_ip_set_clockgating_state(void *dev,
2990a1fc
AD
1087 enum amd_ip_block_type block_type,
1088 enum amd_clockgating_state state)
d38ceaf9 1089{
43fa561f 1090 struct amdgpu_device *adev = dev;
d38ceaf9
AD
1091 int i, r = 0;
1092
1093 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1094 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1095 continue;
c722865a
RZ
1096 if (adev->ip_blocks[i].version->type != block_type)
1097 continue;
1098 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1099 continue;
1100 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1101 (void *)adev, state);
1102 if (r)
1103 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1104 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1105 }
1106 return r;
1107}
1108
e3ecdffa
AD
1109/**
1110 * amdgpu_device_ip_set_powergating_state - set the PG state
1111 *
1112 * @adev: amdgpu_device pointer
1113 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1114 * @state: powergating state (gate or ungate)
1115 *
1116 * Sets the requested powergating state for all instances of
1117 * the hardware IP specified.
1118 * Returns the error code from the last instance.
1119 */
43fa561f 1120int amdgpu_device_ip_set_powergating_state(void *dev,
2990a1fc
AD
1121 enum amd_ip_block_type block_type,
1122 enum amd_powergating_state state)
d38ceaf9 1123{
43fa561f 1124 struct amdgpu_device *adev = dev;
d38ceaf9
AD
1125 int i, r = 0;
1126
1127 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1128 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1129 continue;
c722865a
RZ
1130 if (adev->ip_blocks[i].version->type != block_type)
1131 continue;
1132 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1133 continue;
1134 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1135 (void *)adev, state);
1136 if (r)
1137 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1138 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1139 }
1140 return r;
1141}
1142
e3ecdffa
AD
1143/**
1144 * amdgpu_device_ip_get_clockgating_state - get the CG state
1145 *
1146 * @adev: amdgpu_device pointer
1147 * @flags: clockgating feature flags
1148 *
1149 * Walks the list of IPs on the device and updates the clockgating
1150 * flags for each IP.
1151 * Updates @flags with the feature flags for each hardware IP where
1152 * clockgating is enabled.
1153 */
2990a1fc
AD
1154void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1155 u32 *flags)
6cb2d4e4
HR
1156{
1157 int i;
1158
1159 for (i = 0; i < adev->num_ip_blocks; i++) {
1160 if (!adev->ip_blocks[i].status.valid)
1161 continue;
1162 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1163 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1164 }
1165}
1166
e3ecdffa
AD
1167/**
1168 * amdgpu_device_ip_wait_for_idle - wait for idle
1169 *
1170 * @adev: amdgpu_device pointer
1171 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1172 *
1173 * Waits for the request hardware IP to be idle.
1174 * Returns 0 for success or a negative error code on failure.
1175 */
2990a1fc
AD
1176int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1177 enum amd_ip_block_type block_type)
5dbbb60b
AD
1178{
1179 int i, r;
1180
1181 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1182 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1183 continue;
a1255107
AD
1184 if (adev->ip_blocks[i].version->type == block_type) {
1185 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1186 if (r)
1187 return r;
1188 break;
1189 }
1190 }
1191 return 0;
1192
1193}
1194
e3ecdffa
AD
1195/**
1196 * amdgpu_device_ip_is_idle - is the hardware IP idle
1197 *
1198 * @adev: amdgpu_device pointer
1199 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1200 *
1201 * Check if the hardware IP is idle or not.
1202 * Returns true if it the IP is idle, false if not.
1203 */
2990a1fc
AD
1204bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1205 enum amd_ip_block_type block_type)
5dbbb60b
AD
1206{
1207 int i;
1208
1209 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1210 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1211 continue;
a1255107
AD
1212 if (adev->ip_blocks[i].version->type == block_type)
1213 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1214 }
1215 return true;
1216
1217}
1218
e3ecdffa
AD
1219/**
1220 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1221 *
1222 * @adev: amdgpu_device pointer
1223 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1224 *
1225 * Returns a pointer to the hardware IP block structure
1226 * if it exists for the asic, otherwise NULL.
1227 */
2990a1fc
AD
1228struct amdgpu_ip_block *
1229amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1230 enum amd_ip_block_type type)
d38ceaf9
AD
1231{
1232 int i;
1233
1234 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1235 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1236 return &adev->ip_blocks[i];
1237
1238 return NULL;
1239}
1240
1241/**
2990a1fc 1242 * amdgpu_device_ip_block_version_cmp
d38ceaf9
AD
1243 *
1244 * @adev: amdgpu_device pointer
5fc3aeeb 1245 * @type: enum amd_ip_block_type
d38ceaf9
AD
1246 * @major: major version
1247 * @minor: minor version
1248 *
1249 * return 0 if equal or greater
1250 * return 1 if smaller or the ip_block doesn't exist
1251 */
2990a1fc
AD
1252int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1253 enum amd_ip_block_type type,
1254 u32 major, u32 minor)
d38ceaf9 1255{
2990a1fc 1256 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
d38ceaf9 1257
a1255107
AD
1258 if (ip_block && ((ip_block->version->major > major) ||
1259 ((ip_block->version->major == major) &&
1260 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1261 return 0;
1262
1263 return 1;
1264}
1265
a1255107 1266/**
2990a1fc 1267 * amdgpu_device_ip_block_add
a1255107
AD
1268 *
1269 * @adev: amdgpu_device pointer
1270 * @ip_block_version: pointer to the IP to add
1271 *
1272 * Adds the IP block driver information to the collection of IPs
1273 * on the asic.
1274 */
2990a1fc
AD
1275int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1276 const struct amdgpu_ip_block_version *ip_block_version)
a1255107
AD
1277{
1278 if (!ip_block_version)
1279 return -EINVAL;
1280
e966a725 1281 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
a0bae357
HR
1282 ip_block_version->funcs->name);
1283
a1255107
AD
1284 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1285
1286 return 0;
1287}
1288
e3ecdffa
AD
1289/**
1290 * amdgpu_device_enable_virtual_display - enable virtual display feature
1291 *
1292 * @adev: amdgpu_device pointer
1293 *
1294 * Enabled the virtual display feature if the user has enabled it via
1295 * the module parameter virtual_display. This feature provides a virtual
1296 * display hardware on headless boards or in virtualized environments.
1297 * This function parses and validates the configuration string specified by
1298 * the user and configues the virtual display configuration (number of
1299 * virtual connectors, crtcs, etc.) specified.
1300 */
483ef985 1301static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1302{
1303 adev->enable_virtual_display = false;
1304
1305 if (amdgpu_virtual_display) {
1306 struct drm_device *ddev = adev->ddev;
1307 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1308 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1309
1310 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1311 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1312 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1313 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1314 if (!strcmp("all", pciaddname)
1315 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1316 long num_crtc;
1317 int res = -1;
1318
9accf2fd 1319 adev->enable_virtual_display = true;
0f66356d
ED
1320
1321 if (pciaddname_tmp)
1322 res = kstrtol(pciaddname_tmp, 10,
1323 &num_crtc);
1324
1325 if (!res) {
1326 if (num_crtc < 1)
1327 num_crtc = 1;
1328 if (num_crtc > 6)
1329 num_crtc = 6;
1330 adev->mode_info.num_crtc = num_crtc;
1331 } else {
1332 adev->mode_info.num_crtc = 1;
1333 }
9accf2fd
ED
1334 break;
1335 }
1336 }
1337
0f66356d
ED
1338 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1339 amdgpu_virtual_display, pci_address_name,
1340 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1341
1342 kfree(pciaddstr);
1343 }
1344}
1345
e3ecdffa
AD
1346/**
1347 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1348 *
1349 * @adev: amdgpu_device pointer
1350 *
1351 * Parses the asic configuration parameters specified in the gpu info
1352 * firmware and makes them availale to the driver for use in configuring
1353 * the asic.
1354 * Returns 0 on success, -EINVAL on failure.
1355 */
e2a75f88
AD
1356static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1357{
e2a75f88
AD
1358 const char *chip_name;
1359 char fw_name[30];
1360 int err;
1361 const struct gpu_info_firmware_header_v1_0 *hdr;
1362
ab4fe3e1
HR
1363 adev->firmware.gpu_info_fw = NULL;
1364
e2a75f88
AD
1365 switch (adev->asic_type) {
1366 case CHIP_TOPAZ:
1367 case CHIP_TONGA:
1368 case CHIP_FIJI:
1369 case CHIP_POLARIS11:
1370 case CHIP_POLARIS10:
1371 case CHIP_POLARIS12:
1372 case CHIP_CARRIZO:
1373 case CHIP_STONEY:
1374#ifdef CONFIG_DRM_AMDGPU_SI
1375 case CHIP_VERDE:
1376 case CHIP_TAHITI:
1377 case CHIP_PITCAIRN:
1378 case CHIP_OLAND:
1379 case CHIP_HAINAN:
1380#endif
1381#ifdef CONFIG_DRM_AMDGPU_CIK
1382 case CHIP_BONAIRE:
1383 case CHIP_HAWAII:
1384 case CHIP_KAVERI:
1385 case CHIP_KABINI:
1386 case CHIP_MULLINS:
1387#endif
1388 default:
1389 return 0;
1390 case CHIP_VEGA10:
1391 chip_name = "vega10";
1392 break;
3f76dced
AD
1393 case CHIP_VEGA12:
1394 chip_name = "vega12";
1395 break;
2d2e5e7e
AD
1396 case CHIP_RAVEN:
1397 chip_name = "raven";
1398 break;
e2a75f88
AD
1399 }
1400
1401 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1402 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1403 if (err) {
1404 dev_err(adev->dev,
1405 "Failed to load gpu_info firmware \"%s\"\n",
1406 fw_name);
1407 goto out;
1408 }
ab4fe3e1 1409 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1410 if (err) {
1411 dev_err(adev->dev,
1412 "Failed to validate gpu_info firmware \"%s\"\n",
1413 fw_name);
1414 goto out;
1415 }
1416
ab4fe3e1 1417 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1418 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1419
1420 switch (hdr->version_major) {
1421 case 1:
1422 {
1423 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1424 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1425 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1426
b5ab16bf
AD
1427 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1428 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1429 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1430 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1431 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1432 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1433 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1434 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1435 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1436 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1437 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1438 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1439 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1440 adev->gfx.cu_info.max_waves_per_simd =
1441 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1442 adev->gfx.cu_info.max_scratch_slots_per_cu =
1443 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1444 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1445 break;
1446 }
1447 default:
1448 dev_err(adev->dev,
1449 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1450 err = -EINVAL;
1451 goto out;
1452 }
1453out:
e2a75f88
AD
1454 return err;
1455}
1456
e3ecdffa
AD
1457/**
1458 * amdgpu_device_ip_early_init - run early init for hardware IPs
1459 *
1460 * @adev: amdgpu_device pointer
1461 *
1462 * Early initialization pass for hardware IPs. The hardware IPs that make
1463 * up each asic are discovered each IP's early_init callback is run. This
1464 * is the first stage in initializing the asic.
1465 * Returns 0 on success, negative error code on failure.
1466 */
06ec9070 1467static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
d38ceaf9 1468{
aaa36a97 1469 int i, r;
d38ceaf9 1470
483ef985 1471 amdgpu_device_enable_virtual_display(adev);
a6be7570 1472
d38ceaf9 1473 switch (adev->asic_type) {
aaa36a97
AD
1474 case CHIP_TOPAZ:
1475 case CHIP_TONGA:
48299f95 1476 case CHIP_FIJI:
2cc0c0b5
FC
1477 case CHIP_POLARIS11:
1478 case CHIP_POLARIS10:
c4642a47 1479 case CHIP_POLARIS12:
aaa36a97 1480 case CHIP_CARRIZO:
39bb0c92
SL
1481 case CHIP_STONEY:
1482 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1483 adev->family = AMDGPU_FAMILY_CZ;
1484 else
1485 adev->family = AMDGPU_FAMILY_VI;
1486
1487 r = vi_set_ip_blocks(adev);
1488 if (r)
1489 return r;
1490 break;
33f34802
KW
1491#ifdef CONFIG_DRM_AMDGPU_SI
1492 case CHIP_VERDE:
1493 case CHIP_TAHITI:
1494 case CHIP_PITCAIRN:
1495 case CHIP_OLAND:
1496 case CHIP_HAINAN:
295d0daf 1497 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1498 r = si_set_ip_blocks(adev);
1499 if (r)
1500 return r;
1501 break;
1502#endif
a2e73f56
AD
1503#ifdef CONFIG_DRM_AMDGPU_CIK
1504 case CHIP_BONAIRE:
1505 case CHIP_HAWAII:
1506 case CHIP_KAVERI:
1507 case CHIP_KABINI:
1508 case CHIP_MULLINS:
1509 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1510 adev->family = AMDGPU_FAMILY_CI;
1511 else
1512 adev->family = AMDGPU_FAMILY_KV;
1513
1514 r = cik_set_ip_blocks(adev);
1515 if (r)
1516 return r;
1517 break;
1518#endif
e48a3cd9
AD
1519 case CHIP_VEGA10:
1520 case CHIP_VEGA12:
1521 case CHIP_RAVEN:
2ca8a5d2
CZ
1522 if (adev->asic_type == CHIP_RAVEN)
1523 adev->family = AMDGPU_FAMILY_RV;
1524 else
1525 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1526
1527 r = soc15_set_ip_blocks(adev);
1528 if (r)
1529 return r;
1530 break;
d38ceaf9
AD
1531 default:
1532 /* FIXME: not supported yet */
1533 return -EINVAL;
1534 }
1535
e2a75f88
AD
1536 r = amdgpu_device_parse_gpu_info_fw(adev);
1537 if (r)
1538 return r;
1539
1884734a 1540 amdgpu_amdkfd_device_probe(adev);
1541
3149d9da
XY
1542 if (amdgpu_sriov_vf(adev)) {
1543 r = amdgpu_virt_request_full_gpu(adev, true);
1544 if (r)
5ffa61c1 1545 return -EAGAIN;
3149d9da
XY
1546 }
1547
d38ceaf9
AD
1548 for (i = 0; i < adev->num_ip_blocks; i++) {
1549 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1550 DRM_ERROR("disabled ip block: %d <%s>\n",
1551 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1552 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1553 } else {
a1255107
AD
1554 if (adev->ip_blocks[i].version->funcs->early_init) {
1555 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1556 if (r == -ENOENT) {
a1255107 1557 adev->ip_blocks[i].status.valid = false;
2c1a2784 1558 } else if (r) {
a1255107
AD
1559 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1560 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1561 return r;
2c1a2784 1562 } else {
a1255107 1563 adev->ip_blocks[i].status.valid = true;
2c1a2784 1564 }
974e6b64 1565 } else {
a1255107 1566 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1567 }
d38ceaf9
AD
1568 }
1569 }
1570
395d1fb9
NH
1571 adev->cg_flags &= amdgpu_cg_mask;
1572 adev->pg_flags &= amdgpu_pg_mask;
1573
d38ceaf9
AD
1574 return 0;
1575}
1576
e3ecdffa
AD
1577/**
1578 * amdgpu_device_ip_init - run init for hardware IPs
1579 *
1580 * @adev: amdgpu_device pointer
1581 *
1582 * Main initialization pass for hardware IPs. The list of all the hardware
1583 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1584 * are run. sw_init initializes the software state associated with each IP
1585 * and hw_init initializes the hardware associated with each IP.
1586 * Returns 0 on success, negative error code on failure.
1587 */
06ec9070 1588static int amdgpu_device_ip_init(struct amdgpu_device *adev)
d38ceaf9
AD
1589{
1590 int i, r;
1591
1592 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1593 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1594 continue;
a1255107 1595 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1596 if (r) {
a1255107
AD
1597 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1598 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1599 return r;
2c1a2784 1600 }
a1255107 1601 adev->ip_blocks[i].status.sw = true;
bfca0289 1602
d38ceaf9 1603 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1604 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
06ec9070 1605 r = amdgpu_device_vram_scratch_init(adev);
2c1a2784
AD
1606 if (r) {
1607 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1608 return r;
2c1a2784 1609 }
a1255107 1610 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1611 if (r) {
1612 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1613 return r;
2c1a2784 1614 }
06ec9070 1615 r = amdgpu_device_wb_init(adev);
2c1a2784 1616 if (r) {
06ec9070 1617 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
d38ceaf9 1618 return r;
2c1a2784 1619 }
a1255107 1620 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1621
1622 /* right after GMC hw init, we create CSA */
1623 if (amdgpu_sriov_vf(adev)) {
1624 r = amdgpu_allocate_static_csa(adev);
1625 if (r) {
1626 DRM_ERROR("allocate CSA failed %d\n", r);
1627 return r;
1628 }
1629 }
d38ceaf9
AD
1630 }
1631 }
1632
1633 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1634 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1635 continue;
bfca0289 1636 if (adev->ip_blocks[i].status.hw)
d38ceaf9 1637 continue;
a1255107 1638 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1639 if (r) {
a1255107
AD
1640 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1641 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1642 return r;
2c1a2784 1643 }
a1255107 1644 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1645 }
1646
1884734a 1647 amdgpu_amdkfd_device_init(adev);
c6332b97 1648
1649 if (amdgpu_sriov_vf(adev))
1650 amdgpu_virt_release_full_gpu(adev, true);
1651
d38ceaf9
AD
1652 return 0;
1653}
1654
e3ecdffa
AD
1655/**
1656 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1657 *
1658 * @adev: amdgpu_device pointer
1659 *
1660 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
1661 * this function before a GPU reset. If the value is retained after a
1662 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
1663 */
06ec9070 1664static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
0c49e0b8
CZ
1665{
1666 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1667}
1668
e3ecdffa
AD
1669/**
1670 * amdgpu_device_check_vram_lost - check if vram is valid
1671 *
1672 * @adev: amdgpu_device pointer
1673 *
1674 * Checks the reset magic value written to the gart pointer in VRAM.
1675 * The driver calls this after a GPU reset to see if the contents of
1676 * VRAM is lost or now.
1677 * returns true if vram is lost, false if not.
1678 */
06ec9070 1679static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
0c49e0b8
CZ
1680{
1681 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1682 AMDGPU_RESET_MAGIC_NUM);
1683}
1684
e3ecdffa
AD
1685/**
1686 * amdgpu_device_ip_late_set_cg_state - late init for clockgating
1687 *
1688 * @adev: amdgpu_device pointer
1689 *
1690 * Late initialization pass enabling clockgating for hardware IPs.
1691 * The list of all the hardware IPs that make up the asic is walked and the
1692 * set_clockgating_state callbacks are run. This stage is run late
1693 * in the init process.
1694 * Returns 0 on success, negative error code on failure.
1695 */
06ec9070 1696static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
d38ceaf9
AD
1697{
1698 int i = 0, r;
1699
4a2ba394
SL
1700 if (amdgpu_emu_mode == 1)
1701 return 0;
1702
2c773de2
S
1703 r = amdgpu_ib_ring_tests(adev);
1704 if (r)
1705 DRM_ERROR("ib ring test failed (%d).\n", r);
1706
d38ceaf9 1707 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1708 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1709 continue;
4a446d55 1710 /* skip CG for VCE/UVD, it's handled specially */
a1255107 1711 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
57716327
RZ
1712 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1713 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
4a446d55 1714 /* enable clockgating to save power */
a1255107
AD
1715 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1716 AMD_CG_STATE_GATE);
4a446d55
AD
1717 if (r) {
1718 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1719 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1720 return r;
1721 }
b0b00ff1 1722 }
d38ceaf9 1723 }
2dc80b00
S
1724 return 0;
1725}
1726
e3ecdffa
AD
1727/**
1728 * amdgpu_device_ip_late_init - run late init for hardware IPs
1729 *
1730 * @adev: amdgpu_device pointer
1731 *
1732 * Late initialization pass for hardware IPs. The list of all the hardware
1733 * IPs that make up the asic is walked and the late_init callbacks are run.
1734 * late_init covers any special initialization that an IP requires
1735 * after all of the have been initialized or something that needs to happen
1736 * late in the init process.
1737 * Returns 0 on success, negative error code on failure.
1738 */
06ec9070 1739static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2dc80b00
S
1740{
1741 int i = 0, r;
1742
1743 for (i = 0; i < adev->num_ip_blocks; i++) {
1744 if (!adev->ip_blocks[i].status.valid)
1745 continue;
1746 if (adev->ip_blocks[i].version->funcs->late_init) {
1747 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1748 if (r) {
1749 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1750 adev->ip_blocks[i].version->funcs->name, r);
1751 return r;
1752 }
1753 adev->ip_blocks[i].status.late_initialized = true;
1754 }
1755 }
1756
2c773de2
S
1757 queue_delayed_work(system_wq, &adev->late_init_work,
1758 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1759
06ec9070 1760 amdgpu_device_fill_reset_magic(adev);
d38ceaf9
AD
1761
1762 return 0;
1763}
1764
e3ecdffa
AD
1765/**
1766 * amdgpu_device_ip_fini - run fini for hardware IPs
1767 *
1768 * @adev: amdgpu_device pointer
1769 *
1770 * Main teardown pass for hardware IPs. The list of all the hardware
1771 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
1772 * are run. hw_fini tears down the hardware associated with each IP
1773 * and sw_fini tears down any software state associated with each IP.
1774 * Returns 0 on success, negative error code on failure.
1775 */
06ec9070 1776static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
d38ceaf9
AD
1777{
1778 int i, r;
1779
1884734a 1780 amdgpu_amdkfd_device_fini(adev);
3e96dbfd
AD
1781 /* need to disable SMC first */
1782 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1783 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1784 continue;
57716327
RZ
1785 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
1786 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
3e96dbfd 1787 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1788 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1789 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1790 if (r) {
1791 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1792 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1793 return r;
1794 }
a1255107 1795 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1796 /* XXX handle errors */
1797 if (r) {
1798 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1799 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1800 }
a1255107 1801 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1802 break;
1803 }
1804 }
1805
d38ceaf9 1806 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1807 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1808 continue;
8201a67a
RZ
1809
1810 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
81ce8bea
RZ
1811 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1812 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
8201a67a
RZ
1813 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1814 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1815 AMD_CG_STATE_UNGATE);
1816 if (r) {
1817 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1818 adev->ip_blocks[i].version->funcs->name, r);
1819 return r;
1820 }
2c1a2784 1821 }
8201a67a 1822
a1255107 1823 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1824 /* XXX handle errors */
2c1a2784 1825 if (r) {
a1255107
AD
1826 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1827 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1828 }
8201a67a 1829
a1255107 1830 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1831 }
1832
9950cda2 1833
d38ceaf9 1834 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1835 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1836 continue;
c12aba3a
ML
1837
1838 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1839 amdgpu_free_static_csa(adev);
1840 amdgpu_device_wb_fini(adev);
1841 amdgpu_device_vram_scratch_fini(adev);
1842 }
1843
a1255107 1844 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1845 /* XXX handle errors */
2c1a2784 1846 if (r) {
a1255107
AD
1847 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1848 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1849 }
a1255107
AD
1850 adev->ip_blocks[i].status.sw = false;
1851 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1852 }
1853
a6dcfd9c 1854 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1855 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1856 continue;
a1255107
AD
1857 if (adev->ip_blocks[i].version->funcs->late_fini)
1858 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1859 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1860 }
1861
030308fc 1862 if (amdgpu_sriov_vf(adev))
24136135
ML
1863 if (amdgpu_virt_release_full_gpu(adev, false))
1864 DRM_ERROR("failed to release exclusive mode on fini\n");
2493664f 1865
d38ceaf9
AD
1866 return 0;
1867}
1868
e3ecdffa
AD
1869/**
1870 * amdgpu_device_ip_late_init_func_handler - work handler for clockgating
1871 *
1872 * @work: work_struct
1873 *
1874 * Work handler for amdgpu_device_ip_late_set_cg_state. We put the
1875 * clockgating setup into a worker thread to speed up driver init and
1876 * resume from suspend.
1877 */
06ec9070 1878static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
2dc80b00
S
1879{
1880 struct amdgpu_device *adev =
1881 container_of(work, struct amdgpu_device, late_init_work.work);
06ec9070 1882 amdgpu_device_ip_late_set_cg_state(adev);
2dc80b00
S
1883}
1884
e3ecdffa
AD
1885/**
1886 * amdgpu_device_ip_suspend - run suspend for hardware IPs
1887 *
1888 * @adev: amdgpu_device pointer
1889 *
1890 * Main suspend function for hardware IPs. The list of all the hardware
1891 * IPs that make up the asic is walked, clockgating is disabled and the
1892 * suspend callbacks are run. suspend puts the hardware and software state
1893 * in each IP into a state suitable for suspend.
1894 * Returns 0 on success, negative error code on failure.
1895 */
cdd61df6 1896int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1897{
1898 int i, r;
1899
e941ea99
XY
1900 if (amdgpu_sriov_vf(adev))
1901 amdgpu_virt_request_full_gpu(adev, false);
1902
c5a93a28 1903 /* ungate SMC block first */
2990a1fc
AD
1904 r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1905 AMD_CG_STATE_UNGATE);
c5a93a28 1906 if (r) {
2990a1fc 1907 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
c5a93a28
FC
1908 }
1909
d38ceaf9 1910 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1911 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1912 continue;
1913 /* ungate blocks so that suspend can properly shut them down */
5b2a3d2c 1914 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
57716327 1915 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
a1255107
AD
1916 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1917 AMD_CG_STATE_UNGATE);
c5a93a28 1918 if (r) {
a1255107
AD
1919 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1920 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1921 }
2c1a2784 1922 }
d38ceaf9 1923 /* XXX handle errors */
a1255107 1924 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1925 /* XXX handle errors */
2c1a2784 1926 if (r) {
a1255107
AD
1927 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1928 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1929 }
d38ceaf9
AD
1930 }
1931
e941ea99
XY
1932 if (amdgpu_sriov_vf(adev))
1933 amdgpu_virt_release_full_gpu(adev, false);
1934
d38ceaf9
AD
1935 return 0;
1936}
1937
06ec9070 1938static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1939{
1940 int i, r;
1941
2cb681b6
ML
1942 static enum amd_ip_block_type ip_order[] = {
1943 AMD_IP_BLOCK_TYPE_GMC,
1944 AMD_IP_BLOCK_TYPE_COMMON,
2cb681b6
ML
1945 AMD_IP_BLOCK_TYPE_IH,
1946 };
a90ad3c2 1947
2cb681b6
ML
1948 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1949 int j;
1950 struct amdgpu_ip_block *block;
a90ad3c2 1951
2cb681b6
ML
1952 for (j = 0; j < adev->num_ip_blocks; j++) {
1953 block = &adev->ip_blocks[j];
1954
1955 if (block->version->type != ip_order[i] ||
1956 !block->status.valid)
1957 continue;
1958
1959 r = block->version->funcs->hw_init(adev);
1960 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
c41d1cf6
ML
1961 if (r)
1962 return r;
a90ad3c2
ML
1963 }
1964 }
1965
1966 return 0;
1967}
1968
06ec9070 1969static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1970{
1971 int i, r;
1972
2cb681b6
ML
1973 static enum amd_ip_block_type ip_order[] = {
1974 AMD_IP_BLOCK_TYPE_SMC,
ef4c166d 1975 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
1976 AMD_IP_BLOCK_TYPE_DCE,
1977 AMD_IP_BLOCK_TYPE_GFX,
1978 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
1979 AMD_IP_BLOCK_TYPE_UVD,
1980 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 1981 };
a90ad3c2 1982
2cb681b6
ML
1983 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1984 int j;
1985 struct amdgpu_ip_block *block;
a90ad3c2 1986
2cb681b6
ML
1987 for (j = 0; j < adev->num_ip_blocks; j++) {
1988 block = &adev->ip_blocks[j];
1989
1990 if (block->version->type != ip_order[i] ||
1991 !block->status.valid)
1992 continue;
1993
1994 r = block->version->funcs->hw_init(adev);
1995 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
c41d1cf6
ML
1996 if (r)
1997 return r;
a90ad3c2
ML
1998 }
1999 }
2000
2001 return 0;
2002}
2003
e3ecdffa
AD
2004/**
2005 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2006 *
2007 * @adev: amdgpu_device pointer
2008 *
2009 * First resume function for hardware IPs. The list of all the hardware
2010 * IPs that make up the asic is walked and the resume callbacks are run for
2011 * COMMON, GMC, and IH. resume puts the hardware into a functional state
2012 * after a suspend and updates the software state as necessary. This
2013 * function is also used for restoring the GPU after a GPU reset.
2014 * Returns 0 on success, negative error code on failure.
2015 */
06ec9070 2016static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
2017{
2018 int i, r;
2019
a90ad3c2
ML
2020 for (i = 0; i < adev->num_ip_blocks; i++) {
2021 if (!adev->ip_blocks[i].status.valid)
2022 continue;
a90ad3c2 2023 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa
AD
2024 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2025 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
fcf0649f
CZ
2026 r = adev->ip_blocks[i].version->funcs->resume(adev);
2027 if (r) {
2028 DRM_ERROR("resume of IP block <%s> failed %d\n",
2029 adev->ip_blocks[i].version->funcs->name, r);
2030 return r;
2031 }
a90ad3c2
ML
2032 }
2033 }
2034
2035 return 0;
2036}
2037
e3ecdffa
AD
2038/**
2039 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2040 *
2041 * @adev: amdgpu_device pointer
2042 *
2043 * First resume function for hardware IPs. The list of all the hardware
2044 * IPs that make up the asic is walked and the resume callbacks are run for
2045 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
2046 * functional state after a suspend and updates the software state as
2047 * necessary. This function is also used for restoring the GPU after a GPU
2048 * reset.
2049 * Returns 0 on success, negative error code on failure.
2050 */
06ec9070 2051static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2052{
2053 int i, r;
2054
2055 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2056 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2057 continue;
fcf0649f 2058 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa
AD
2059 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2060 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
fcf0649f 2061 continue;
a1255107 2062 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 2063 if (r) {
a1255107
AD
2064 DRM_ERROR("resume of IP block <%s> failed %d\n",
2065 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 2066 return r;
2c1a2784 2067 }
d38ceaf9
AD
2068 }
2069
2070 return 0;
2071}
2072
e3ecdffa
AD
2073/**
2074 * amdgpu_device_ip_resume - run resume for hardware IPs
2075 *
2076 * @adev: amdgpu_device pointer
2077 *
2078 * Main resume function for hardware IPs. The hardware IPs
2079 * are split into two resume functions because they are
2080 * are also used in in recovering from a GPU reset and some additional
2081 * steps need to be take between them. In this case (S3/S4) they are
2082 * run sequentially.
2083 * Returns 0 on success, negative error code on failure.
2084 */
06ec9070 2085static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
fcf0649f
CZ
2086{
2087 int r;
2088
06ec9070 2089 r = amdgpu_device_ip_resume_phase1(adev);
fcf0649f
CZ
2090 if (r)
2091 return r;
06ec9070 2092 r = amdgpu_device_ip_resume_phase2(adev);
fcf0649f
CZ
2093
2094 return r;
2095}
2096
e3ecdffa
AD
2097/**
2098 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2099 *
2100 * @adev: amdgpu_device pointer
2101 *
2102 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2103 */
4e99a44e 2104static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 2105{
6867e1b5
ML
2106 if (amdgpu_sriov_vf(adev)) {
2107 if (adev->is_atom_fw) {
2108 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2109 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2110 } else {
2111 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2112 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2113 }
2114
2115 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2116 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 2117 }
048765ad
AR
2118}
2119
e3ecdffa
AD
2120/**
2121 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2122 *
2123 * @asic_type: AMD asic type
2124 *
2125 * Check if there is DC (new modesetting infrastructre) support for an asic.
2126 * returns true if DC has support, false if not.
2127 */
4562236b
HW
2128bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2129{
2130 switch (asic_type) {
2131#if defined(CONFIG_DRM_AMD_DC)
2132 case CHIP_BONAIRE:
2133 case CHIP_HAWAII:
0d6fbccb 2134 case CHIP_KAVERI:
367e6687
AD
2135 case CHIP_KABINI:
2136 case CHIP_MULLINS:
4562236b
HW
2137 case CHIP_CARRIZO:
2138 case CHIP_STONEY:
2139 case CHIP_POLARIS11:
2140 case CHIP_POLARIS10:
2c8ad2d5 2141 case CHIP_POLARIS12:
4562236b
HW
2142 case CHIP_TONGA:
2143 case CHIP_FIJI:
42f8ffa1 2144 case CHIP_VEGA10:
dca7b401 2145 case CHIP_VEGA12:
42f8ffa1 2146#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 2147 case CHIP_RAVEN:
42f8ffa1 2148#endif
fd187853 2149 return amdgpu_dc != 0;
4562236b
HW
2150#endif
2151 default:
2152 return false;
2153 }
2154}
2155
2156/**
2157 * amdgpu_device_has_dc_support - check if dc is supported
2158 *
2159 * @adev: amdgpu_device_pointer
2160 *
2161 * Returns true for supported, false for not supported
2162 */
2163bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2164{
2555039d
XY
2165 if (amdgpu_sriov_vf(adev))
2166 return false;
2167
4562236b
HW
2168 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2169}
2170
d38ceaf9
AD
2171/**
2172 * amdgpu_device_init - initialize the driver
2173 *
2174 * @adev: amdgpu_device pointer
2175 * @pdev: drm dev pointer
2176 * @pdev: pci dev pointer
2177 * @flags: driver flags
2178 *
2179 * Initializes the driver info and hw (all asics).
2180 * Returns 0 for success or an error on failure.
2181 * Called at driver startup.
2182 */
2183int amdgpu_device_init(struct amdgpu_device *adev,
2184 struct drm_device *ddev,
2185 struct pci_dev *pdev,
2186 uint32_t flags)
2187{
2188 int r, i;
2189 bool runtime = false;
95844d20 2190 u32 max_MBps;
d38ceaf9
AD
2191
2192 adev->shutdown = false;
2193 adev->dev = &pdev->dev;
2194 adev->ddev = ddev;
2195 adev->pdev = pdev;
2196 adev->flags = flags;
2f7d10b3 2197 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 2198 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
593aa2d2
SL
2199 if (amdgpu_emu_mode == 1)
2200 adev->usec_timeout *= 2;
770d13b1 2201 adev->gmc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
2202 adev->accel_working = false;
2203 adev->num_rings = 0;
2204 adev->mman.buffer_funcs = NULL;
2205 adev->mman.buffer_funcs_ring = NULL;
2206 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 2207 adev->vm_manager.vm_pte_num_rings = 0;
132f34e4 2208 adev->gmc.gmc_funcs = NULL;
f54d1867 2209 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 2210 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
2211
2212 adev->smc_rreg = &amdgpu_invalid_rreg;
2213 adev->smc_wreg = &amdgpu_invalid_wreg;
2214 adev->pcie_rreg = &amdgpu_invalid_rreg;
2215 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
2216 adev->pciep_rreg = &amdgpu_invalid_rreg;
2217 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2218 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2219 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2220 adev->didt_rreg = &amdgpu_invalid_rreg;
2221 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
2222 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2223 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2224 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2225 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2226
3e39ab90
AD
2227 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2228 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2229 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
2230
2231 /* mutex initialization are all done here so we
2232 * can recall function without having locking issues */
d38ceaf9 2233 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 2234 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
2235 mutex_init(&adev->pm.mutex);
2236 mutex_init(&adev->gfx.gpu_clock_mutex);
2237 mutex_init(&adev->srbm_mutex);
b8866c26 2238 mutex_init(&adev->gfx.pipe_reserve_mutex);
d38ceaf9 2239 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 2240 mutex_init(&adev->mn_lock);
e23b74aa 2241 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 2242 hash_init(adev->mn_hash);
13a752e3 2243 mutex_init(&adev->lock_reset);
d38ceaf9 2244
06ec9070 2245 amdgpu_device_check_arguments(adev);
d38ceaf9 2246
d38ceaf9
AD
2247 spin_lock_init(&adev->mmio_idx_lock);
2248 spin_lock_init(&adev->smc_idx_lock);
2249 spin_lock_init(&adev->pcie_idx_lock);
2250 spin_lock_init(&adev->uvd_ctx_idx_lock);
2251 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 2252 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 2253 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 2254 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 2255 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 2256
0c4e7fa5
CZ
2257 INIT_LIST_HEAD(&adev->shadow_list);
2258 mutex_init(&adev->shadow_list_lock);
2259
795f2813
AR
2260 INIT_LIST_HEAD(&adev->ring_lru_list);
2261 spin_lock_init(&adev->ring_lru_list_lock);
2262
06ec9070
AD
2263 INIT_DELAYED_WORK(&adev->late_init_work,
2264 amdgpu_device_ip_late_init_func_handler);
2dc80b00 2265
0fa49558
AX
2266 /* Registers mapping */
2267 /* TODO: block userspace mapping of io register */
da69c161
KW
2268 if (adev->asic_type >= CHIP_BONAIRE) {
2269 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2270 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2271 } else {
2272 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2273 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2274 }
d38ceaf9 2275
d38ceaf9
AD
2276 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2277 if (adev->rmmio == NULL) {
2278 return -ENOMEM;
2279 }
2280 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2281 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2282
705e519e 2283 /* doorbell bar mapping */
06ec9070 2284 amdgpu_device_doorbell_init(adev);
d38ceaf9
AD
2285
2286 /* io port mapping */
2287 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2288 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2289 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2290 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2291 break;
2292 }
2293 }
2294 if (adev->rio_mem == NULL)
b64a18c5 2295 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9 2296
5494d864
AD
2297 amdgpu_device_get_pcie_info(adev);
2298
d38ceaf9 2299 /* early init functions */
06ec9070 2300 r = amdgpu_device_ip_early_init(adev);
d38ceaf9
AD
2301 if (r)
2302 return r;
2303
2304 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2305 /* this will fail for cards that aren't VGA class devices, just
2306 * ignore it */
06ec9070 2307 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
d38ceaf9 2308
e9bef455 2309 if (amdgpu_device_is_px(ddev))
d38ceaf9 2310 runtime = true;
84c8b22e
LW
2311 if (!pci_is_thunderbolt_attached(adev->pdev))
2312 vga_switcheroo_register_client(adev->pdev,
2313 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
2314 if (runtime)
2315 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2316
9475a943
SL
2317 if (amdgpu_emu_mode == 1) {
2318 /* post the asic on emulation mode */
2319 emu_soc_asic_init(adev);
bfca0289 2320 goto fence_driver_init;
9475a943 2321 }
bfca0289 2322
d38ceaf9 2323 /* Read BIOS */
83ba126a
AD
2324 if (!amdgpu_get_bios(adev)) {
2325 r = -EINVAL;
2326 goto failed;
2327 }
f7e9e9fe 2328
d38ceaf9 2329 r = amdgpu_atombios_init(adev);
2c1a2784
AD
2330 if (r) {
2331 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 2332 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 2333 goto failed;
2c1a2784 2334 }
d38ceaf9 2335
4e99a44e
ML
2336 /* detect if we are with an SRIOV vbios */
2337 amdgpu_device_detect_sriov_bios(adev);
048765ad 2338
d38ceaf9 2339 /* Post card if necessary */
39c640c0 2340 if (amdgpu_device_need_post(adev)) {
d38ceaf9 2341 if (!adev->bios) {
bec86378 2342 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
2343 r = -EINVAL;
2344 goto failed;
d38ceaf9 2345 }
bec86378 2346 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
2347 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2348 if (r) {
2349 dev_err(adev->dev, "gpu post error!\n");
2350 goto failed;
2351 }
d38ceaf9
AD
2352 }
2353
88b64e95
AD
2354 if (adev->is_atom_fw) {
2355 /* Initialize clocks */
2356 r = amdgpu_atomfirmware_get_clock_info(adev);
2357 if (r) {
2358 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 2359 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
2360 goto failed;
2361 }
2362 } else {
a5bde2f9
AD
2363 /* Initialize clocks */
2364 r = amdgpu_atombios_get_clock_info(adev);
2365 if (r) {
2366 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 2367 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 2368 goto failed;
a5bde2f9
AD
2369 }
2370 /* init i2c buses */
4562236b
HW
2371 if (!amdgpu_device_has_dc_support(adev))
2372 amdgpu_atombios_i2c_init(adev);
2c1a2784 2373 }
d38ceaf9 2374
bfca0289 2375fence_driver_init:
d38ceaf9
AD
2376 /* Fence driver */
2377 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
2378 if (r) {
2379 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 2380 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 2381 goto failed;
2c1a2784 2382 }
d38ceaf9
AD
2383
2384 /* init the mode config */
2385 drm_mode_config_init(adev->ddev);
2386
06ec9070 2387 r = amdgpu_device_ip_init(adev);
d38ceaf9 2388 if (r) {
8840a387 2389 /* failed in exclusive mode due to timeout */
2390 if (amdgpu_sriov_vf(adev) &&
2391 !amdgpu_sriov_runtime(adev) &&
2392 amdgpu_virt_mmio_blocked(adev) &&
2393 !amdgpu_virt_wait_reset(adev)) {
2394 dev_err(adev->dev, "VF exclusive mode timeout\n");
1daee8b4
PD
2395 /* Don't send request since VF is inactive. */
2396 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2397 adev->virt.ops = NULL;
8840a387 2398 r = -EAGAIN;
2399 goto failed;
2400 }
06ec9070 2401 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
e23b74aa 2402 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
83ba126a 2403 goto failed;
d38ceaf9
AD
2404 }
2405
2406 adev->accel_working = true;
2407
e59c0205
AX
2408 amdgpu_vm_check_compute_bug(adev);
2409
95844d20
MO
2410 /* Initialize the buffer migration limit. */
2411 if (amdgpu_moverate >= 0)
2412 max_MBps = amdgpu_moverate;
2413 else
2414 max_MBps = 8; /* Allow 8 MB/s. */
2415 /* Get a log2 for easy divisions. */
2416 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2417
d38ceaf9
AD
2418 r = amdgpu_ib_pool_init(adev);
2419 if (r) {
2420 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 2421 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 2422 goto failed;
d38ceaf9
AD
2423 }
2424
2dc8f81e
HC
2425 if (amdgpu_sriov_vf(adev))
2426 amdgpu_virt_init_data_exchange(adev);
2427
9bc92b9c
ML
2428 amdgpu_fbdev_init(adev);
2429
d2f52ac8
RZ
2430 r = amdgpu_pm_sysfs_init(adev);
2431 if (r)
2432 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2433
75758255 2434 r = amdgpu_debugfs_gem_init(adev);
3f14e623 2435 if (r)
d38ceaf9 2436 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2437
2438 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2439 if (r)
d38ceaf9 2440 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2441
50ab2533 2442 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2443 if (r)
50ab2533 2444 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2445
763efb6c 2446 r = amdgpu_debugfs_init(adev);
db95e218 2447 if (r)
763efb6c 2448 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
db95e218 2449
d38ceaf9
AD
2450 if ((amdgpu_testing & 1)) {
2451 if (adev->accel_working)
2452 amdgpu_test_moves(adev);
2453 else
2454 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2455 }
d38ceaf9
AD
2456 if (amdgpu_benchmarking) {
2457 if (adev->accel_working)
2458 amdgpu_benchmark(adev, amdgpu_benchmarking);
2459 else
2460 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2461 }
2462
2463 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2464 * explicit gating rather than handling it automatically.
2465 */
06ec9070 2466 r = amdgpu_device_ip_late_init(adev);
2c1a2784 2467 if (r) {
06ec9070 2468 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
e23b74aa 2469 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2470 goto failed;
2c1a2784 2471 }
d38ceaf9
AD
2472
2473 return 0;
83ba126a
AD
2474
2475failed:
89041940 2476 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2477 if (runtime)
2478 vga_switcheroo_fini_domain_pm_ops(adev->dev);
8840a387 2479
83ba126a 2480 return r;
d38ceaf9
AD
2481}
2482
d38ceaf9
AD
2483/**
2484 * amdgpu_device_fini - tear down the driver
2485 *
2486 * @adev: amdgpu_device pointer
2487 *
2488 * Tear down the driver info (all asics).
2489 * Called at driver shutdown.
2490 */
2491void amdgpu_device_fini(struct amdgpu_device *adev)
2492{
2493 int r;
2494
2495 DRM_INFO("amdgpu: finishing device.\n");
2496 adev->shutdown = true;
e5b03032
ML
2497 /* disable all interrupts */
2498 amdgpu_irq_disable_all(adev);
ff97cba8
ML
2499 if (adev->mode_info.mode_config_initialized){
2500 if (!amdgpu_device_has_dc_support(adev))
2501 drm_crtc_force_disable_all(adev->ddev);
2502 else
2503 drm_atomic_helper_shutdown(adev->ddev);
2504 }
d38ceaf9
AD
2505 amdgpu_ib_pool_fini(adev);
2506 amdgpu_fence_driver_fini(adev);
58e955d9 2507 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2508 amdgpu_fbdev_fini(adev);
06ec9070 2509 r = amdgpu_device_ip_fini(adev);
ab4fe3e1
HR
2510 if (adev->firmware.gpu_info_fw) {
2511 release_firmware(adev->firmware.gpu_info_fw);
2512 adev->firmware.gpu_info_fw = NULL;
2513 }
d38ceaf9 2514 adev->accel_working = false;
2dc80b00 2515 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2516 /* free i2c buses */
4562236b
HW
2517 if (!amdgpu_device_has_dc_support(adev))
2518 amdgpu_i2c_fini(adev);
bfca0289
SL
2519
2520 if (amdgpu_emu_mode != 1)
2521 amdgpu_atombios_fini(adev);
2522
d38ceaf9
AD
2523 kfree(adev->bios);
2524 adev->bios = NULL;
84c8b22e
LW
2525 if (!pci_is_thunderbolt_attached(adev->pdev))
2526 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2527 if (adev->flags & AMD_IS_PX)
2528 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2529 vga_client_register(adev->pdev, NULL, NULL, NULL);
2530 if (adev->rio_mem)
2531 pci_iounmap(adev->pdev, adev->rio_mem);
2532 adev->rio_mem = NULL;
2533 iounmap(adev->rmmio);
2534 adev->rmmio = NULL;
06ec9070 2535 amdgpu_device_doorbell_fini(adev);
d38ceaf9 2536 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2537}
2538
2539
2540/*
2541 * Suspend & resume.
2542 */
2543/**
810ddc3a 2544 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2545 *
2546 * @pdev: drm dev pointer
2547 * @state: suspend state
2548 *
2549 * Puts the hw in the suspend state (all asics).
2550 * Returns 0 for success or an error on failure.
2551 * Called at driver suspend.
2552 */
810ddc3a 2553int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2554{
2555 struct amdgpu_device *adev;
2556 struct drm_crtc *crtc;
2557 struct drm_connector *connector;
5ceb54c6 2558 int r;
d38ceaf9
AD
2559
2560 if (dev == NULL || dev->dev_private == NULL) {
2561 return -ENODEV;
2562 }
2563
2564 adev = dev->dev_private;
2565
2566 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2567 return 0;
2568
2569 drm_kms_helper_poll_disable(dev);
2570
4562236b
HW
2571 if (!amdgpu_device_has_dc_support(adev)) {
2572 /* turn off display hw */
2573 drm_modeset_lock_all(dev);
2574 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2575 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2576 }
2577 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2578 }
2579
ba997709
YZ
2580 amdgpu_amdkfd_suspend(adev);
2581
756e6880 2582 /* unpin the front buffers and cursors */
d38ceaf9 2583 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2584 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
e68d14dd 2585 struct drm_framebuffer *fb = crtc->primary->fb;
d38ceaf9
AD
2586 struct amdgpu_bo *robj;
2587
756e6880
AD
2588 if (amdgpu_crtc->cursor_bo) {
2589 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2590 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2591 if (r == 0) {
2592 amdgpu_bo_unpin(aobj);
2593 amdgpu_bo_unreserve(aobj);
2594 }
2595 }
2596
e68d14dd 2597 if (fb == NULL || fb->obj[0] == NULL) {
d38ceaf9
AD
2598 continue;
2599 }
e68d14dd 2600 robj = gem_to_amdgpu_bo(fb->obj[0]);
d38ceaf9
AD
2601 /* don't unpin kernel fb objects */
2602 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2603 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2604 if (r == 0) {
2605 amdgpu_bo_unpin(robj);
2606 amdgpu_bo_unreserve(robj);
2607 }
2608 }
2609 }
2610 /* evict vram memory */
2611 amdgpu_bo_evict_vram(adev);
2612
5ceb54c6 2613 amdgpu_fence_driver_suspend(adev);
d38ceaf9 2614
cdd61df6 2615 r = amdgpu_device_ip_suspend(adev);
d38ceaf9 2616
a0a71e49
AD
2617 /* evict remaining vram memory
2618 * This second call to evict vram is to evict the gart page table
2619 * using the CPU.
2620 */
d38ceaf9
AD
2621 amdgpu_bo_evict_vram(adev);
2622
2623 pci_save_state(dev->pdev);
2624 if (suspend) {
2625 /* Shut down the device */
2626 pci_disable_device(dev->pdev);
2627 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2628 } else {
2629 r = amdgpu_asic_reset(adev);
2630 if (r)
2631 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2632 }
2633
2634 if (fbcon) {
2635 console_lock();
2636 amdgpu_fbdev_set_suspend(adev, 1);
2637 console_unlock();
2638 }
2639 return 0;
2640}
2641
2642/**
810ddc3a 2643 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2644 *
2645 * @pdev: drm dev pointer
2646 *
2647 * Bring the hw back to operating state (all asics).
2648 * Returns 0 for success or an error on failure.
2649 * Called at driver resume.
2650 */
810ddc3a 2651int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2652{
2653 struct drm_connector *connector;
2654 struct amdgpu_device *adev = dev->dev_private;
756e6880 2655 struct drm_crtc *crtc;
03161a6e 2656 int r = 0;
d38ceaf9
AD
2657
2658 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2659 return 0;
2660
74b0b157 2661 if (fbcon)
d38ceaf9 2662 console_lock();
74b0b157 2663
d38ceaf9
AD
2664 if (resume) {
2665 pci_set_power_state(dev->pdev, PCI_D0);
2666 pci_restore_state(dev->pdev);
74b0b157 2667 r = pci_enable_device(dev->pdev);
03161a6e
HR
2668 if (r)
2669 goto unlock;
d38ceaf9
AD
2670 }
2671
2672 /* post card */
39c640c0 2673 if (amdgpu_device_need_post(adev)) {
74b0b157 2674 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2675 if (r)
2676 DRM_ERROR("amdgpu asic init failed\n");
2677 }
d38ceaf9 2678
06ec9070 2679 r = amdgpu_device_ip_resume(adev);
e6707218 2680 if (r) {
06ec9070 2681 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
03161a6e 2682 goto unlock;
e6707218 2683 }
5ceb54c6
AD
2684 amdgpu_fence_driver_resume(adev);
2685
d38ceaf9 2686
06ec9070 2687 r = amdgpu_device_ip_late_init(adev);
03161a6e
HR
2688 if (r)
2689 goto unlock;
d38ceaf9 2690
756e6880
AD
2691 /* pin cursors */
2692 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2693 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2694
2695 if (amdgpu_crtc->cursor_bo) {
2696 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2697 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2698 if (r == 0) {
2699 r = amdgpu_bo_pin(aobj,
2700 AMDGPU_GEM_DOMAIN_VRAM,
2701 &amdgpu_crtc->cursor_addr);
2702 if (r != 0)
2703 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2704 amdgpu_bo_unreserve(aobj);
2705 }
2706 }
2707 }
ba997709
YZ
2708 r = amdgpu_amdkfd_resume(adev);
2709 if (r)
2710 return r;
756e6880 2711
d38ceaf9
AD
2712 /* blat the mode back in */
2713 if (fbcon) {
4562236b
HW
2714 if (!amdgpu_device_has_dc_support(adev)) {
2715 /* pre DCE11 */
2716 drm_helper_resume_force_mode(dev);
2717
2718 /* turn on display hw */
2719 drm_modeset_lock_all(dev);
2720 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2721 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2722 }
2723 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2724 }
2725 }
2726
2727 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2728
2729 /*
2730 * Most of the connector probing functions try to acquire runtime pm
2731 * refs to ensure that the GPU is powered on when connector polling is
2732 * performed. Since we're calling this from a runtime PM callback,
2733 * trying to acquire rpm refs will cause us to deadlock.
2734 *
2735 * Since we're guaranteed to be holding the rpm lock, it's safe to
2736 * temporarily disable the rpm helpers so this doesn't deadlock us.
2737 */
2738#ifdef CONFIG_PM
2739 dev->dev->power.disable_depth++;
2740#endif
4562236b
HW
2741 if (!amdgpu_device_has_dc_support(adev))
2742 drm_helper_hpd_irq_event(dev);
2743 else
2744 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2745#ifdef CONFIG_PM
2746 dev->dev->power.disable_depth--;
2747#endif
d38ceaf9 2748
03161a6e 2749 if (fbcon)
d38ceaf9 2750 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2751
2752unlock:
2753 if (fbcon)
d38ceaf9 2754 console_unlock();
d38ceaf9 2755
03161a6e 2756 return r;
d38ceaf9
AD
2757}
2758
e3ecdffa
AD
2759/**
2760 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
2761 *
2762 * @adev: amdgpu_device pointer
2763 *
2764 * The list of all the hardware IPs that make up the asic is walked and
2765 * the check_soft_reset callbacks are run. check_soft_reset determines
2766 * if the asic is still hung or not.
2767 * Returns true if any of the IPs are still in a hung state, false if not.
2768 */
06ec9070 2769static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
63fbf42f
CZ
2770{
2771 int i;
2772 bool asic_hang = false;
2773
f993d628
ML
2774 if (amdgpu_sriov_vf(adev))
2775 return true;
2776
8bc04c29
AD
2777 if (amdgpu_asic_need_full_reset(adev))
2778 return true;
2779
63fbf42f 2780 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2781 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2782 continue;
a1255107
AD
2783 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2784 adev->ip_blocks[i].status.hang =
2785 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2786 if (adev->ip_blocks[i].status.hang) {
2787 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2788 asic_hang = true;
2789 }
2790 }
2791 return asic_hang;
2792}
2793
e3ecdffa
AD
2794/**
2795 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
2796 *
2797 * @adev: amdgpu_device pointer
2798 *
2799 * The list of all the hardware IPs that make up the asic is walked and the
2800 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
2801 * handles any IP specific hardware or software state changes that are
2802 * necessary for a soft reset to succeed.
2803 * Returns 0 on success, negative error code on failure.
2804 */
06ec9070 2805static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2806{
2807 int i, r = 0;
2808
2809 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2810 if (!adev->ip_blocks[i].status.valid)
d31a501e 2811 continue;
a1255107
AD
2812 if (adev->ip_blocks[i].status.hang &&
2813 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2814 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2815 if (r)
2816 return r;
2817 }
2818 }
2819
2820 return 0;
2821}
2822
e3ecdffa
AD
2823/**
2824 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
2825 *
2826 * @adev: amdgpu_device pointer
2827 *
2828 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
2829 * reset is necessary to recover.
2830 * Returns true if a full asic reset is required, false if not.
2831 */
06ec9070 2832static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
35d782fe 2833{
da146d3b
AD
2834 int i;
2835
8bc04c29
AD
2836 if (amdgpu_asic_need_full_reset(adev))
2837 return true;
2838
da146d3b 2839 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2840 if (!adev->ip_blocks[i].status.valid)
da146d3b 2841 continue;
a1255107
AD
2842 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2843 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2844 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
2845 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2846 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 2847 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2848 DRM_INFO("Some block need full reset!\n");
2849 return true;
2850 }
2851 }
35d782fe
CZ
2852 }
2853 return false;
2854}
2855
e3ecdffa
AD
2856/**
2857 * amdgpu_device_ip_soft_reset - do a soft reset
2858 *
2859 * @adev: amdgpu_device pointer
2860 *
2861 * The list of all the hardware IPs that make up the asic is walked and the
2862 * soft_reset callbacks are run if the block is hung. soft_reset handles any
2863 * IP specific hardware or software state changes that are necessary to soft
2864 * reset the IP.
2865 * Returns 0 on success, negative error code on failure.
2866 */
06ec9070 2867static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2868{
2869 int i, r = 0;
2870
2871 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2872 if (!adev->ip_blocks[i].status.valid)
35d782fe 2873 continue;
a1255107
AD
2874 if (adev->ip_blocks[i].status.hang &&
2875 adev->ip_blocks[i].version->funcs->soft_reset) {
2876 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2877 if (r)
2878 return r;
2879 }
2880 }
2881
2882 return 0;
2883}
2884
e3ecdffa
AD
2885/**
2886 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
2887 *
2888 * @adev: amdgpu_device pointer
2889 *
2890 * The list of all the hardware IPs that make up the asic is walked and the
2891 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
2892 * handles any IP specific hardware or software state changes that are
2893 * necessary after the IP has been soft reset.
2894 * Returns 0 on success, negative error code on failure.
2895 */
06ec9070 2896static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2897{
2898 int i, r = 0;
2899
2900 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2901 if (!adev->ip_blocks[i].status.valid)
35d782fe 2902 continue;
a1255107
AD
2903 if (adev->ip_blocks[i].status.hang &&
2904 adev->ip_blocks[i].version->funcs->post_soft_reset)
2905 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2906 if (r)
2907 return r;
2908 }
2909
2910 return 0;
2911}
2912
e3ecdffa
AD
2913/**
2914 * amdgpu_device_recover_vram_from_shadow - restore shadowed VRAM buffers
2915 *
2916 * @adev: amdgpu_device pointer
2917 * @ring: amdgpu_ring for the engine handling the buffer operations
2918 * @bo: amdgpu_bo buffer whose shadow is being restored
2919 * @fence: dma_fence associated with the operation
2920 *
2921 * Restores the VRAM buffer contents from the shadow in GTT. Used to
2922 * restore things like GPUVM page tables after a GPU reset where
2923 * the contents of VRAM might be lost.
2924 * Returns 0 on success, negative error code on failure.
2925 */
06ec9070
AD
2926static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
2927 struct amdgpu_ring *ring,
2928 struct amdgpu_bo *bo,
2929 struct dma_fence **fence)
53cdccd5
CZ
2930{
2931 uint32_t domain;
2932 int r;
2933
23d2e504
RH
2934 if (!bo->shadow)
2935 return 0;
2936
1d284797 2937 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2938 if (r)
2939 return r;
2940 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2941 /* if bo has been evicted, then no need to recover */
2942 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2943 r = amdgpu_bo_validate(bo->shadow);
2944 if (r) {
2945 DRM_ERROR("bo validate failed!\n");
2946 goto err;
2947 }
2948
23d2e504 2949 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2950 NULL, fence, true);
23d2e504
RH
2951 if (r) {
2952 DRM_ERROR("recover page table failed!\n");
2953 goto err;
2954 }
2955 }
53cdccd5 2956err:
23d2e504
RH
2957 amdgpu_bo_unreserve(bo);
2958 return r;
53cdccd5
CZ
2959}
2960
e3ecdffa
AD
2961/**
2962 * amdgpu_device_handle_vram_lost - Handle the loss of VRAM contents
2963 *
2964 * @adev: amdgpu_device pointer
2965 *
2966 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
2967 * restore things like GPUVM page tables after a GPU reset where
2968 * the contents of VRAM might be lost.
2969 * Returns 0 on success, 1 on failure.
2970 */
c41d1cf6
ML
2971static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
2972{
2973 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2974 struct amdgpu_bo *bo, *tmp;
2975 struct dma_fence *fence = NULL, *next = NULL;
2976 long r = 1;
2977 int i = 0;
2978 long tmo;
2979
2980 if (amdgpu_sriov_runtime(adev))
2981 tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
2982 else
2983 tmo = msecs_to_jiffies(100);
2984
2985 DRM_INFO("recover vram bo from shadow start\n");
2986 mutex_lock(&adev->shadow_list_lock);
2987 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2988 next = NULL;
2989 amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
2990 if (fence) {
2991 r = dma_fence_wait_timeout(fence, false, tmo);
2992 if (r == 0)
2993 pr_err("wait fence %p[%d] timeout\n", fence, i);
2994 else if (r < 0)
2995 pr_err("wait fence %p[%d] interrupted\n", fence, i);
2996 if (r < 1) {
2997 dma_fence_put(fence);
2998 fence = next;
2999 break;
3000 }
3001 i++;
3002 }
3003
3004 dma_fence_put(fence);
3005 fence = next;
3006 }
3007 mutex_unlock(&adev->shadow_list_lock);
3008
3009 if (fence) {
3010 r = dma_fence_wait_timeout(fence, false, tmo);
3011 if (r == 0)
3012 pr_err("wait fence %p[%d] timeout\n", fence, i);
3013 else if (r < 0)
3014 pr_err("wait fence %p[%d] interrupted\n", fence, i);
3015
3016 }
3017 dma_fence_put(fence);
3018
3019 if (r > 0)
3020 DRM_INFO("recover vram bo from shadow done\n");
3021 else
3022 DRM_ERROR("recover vram bo from shadow failed\n");
3023
e3ecdffa 3024 return (r > 0) ? 0 : 1;
c41d1cf6
ML
3025}
3026
e3ecdffa 3027/**
06ec9070 3028 * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
a90ad3c2
ML
3029 *
3030 * @adev: amdgpu device pointer
a90ad3c2 3031 *
5740682e
ML
3032 * attempt to do soft-reset or full-reset and reinitialize Asic
3033 * return 0 means successed otherwise failed
e3ecdffa 3034 */
c41d1cf6 3035static int amdgpu_device_reset(struct amdgpu_device *adev)
a90ad3c2 3036{
5740682e
ML
3037 bool need_full_reset, vram_lost = 0;
3038 int r;
a90ad3c2 3039
06ec9070 3040 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
a90ad3c2 3041
5740682e 3042 if (!need_full_reset) {
06ec9070
AD
3043 amdgpu_device_ip_pre_soft_reset(adev);
3044 r = amdgpu_device_ip_soft_reset(adev);
3045 amdgpu_device_ip_post_soft_reset(adev);
3046 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5740682e
ML
3047 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3048 need_full_reset = true;
3049 }
5740682e 3050 }
a90ad3c2 3051
5740682e 3052 if (need_full_reset) {
cdd61df6 3053 r = amdgpu_device_ip_suspend(adev);
a90ad3c2 3054
5740682e 3055retry:
5740682e 3056 r = amdgpu_asic_reset(adev);
5740682e
ML
3057 /* post card */
3058 amdgpu_atom_asic_init(adev->mode_info.atom_context);
65781c78 3059
5740682e
ML
3060 if (!r) {
3061 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
06ec9070 3062 r = amdgpu_device_ip_resume_phase1(adev);
5740682e
ML
3063 if (r)
3064 goto out;
65781c78 3065
06ec9070 3066 vram_lost = amdgpu_device_check_vram_lost(adev);
5740682e
ML
3067 if (vram_lost) {
3068 DRM_ERROR("VRAM is lost!\n");
3069 atomic_inc(&adev->vram_lost_counter);
3070 }
3071
c1c7ce8f
CK
3072 r = amdgpu_gtt_mgr_recover(
3073 &adev->mman.bdev.man[TTM_PL_TT]);
5740682e
ML
3074 if (r)
3075 goto out;
3076
06ec9070 3077 r = amdgpu_device_ip_resume_phase2(adev);
5740682e
ML
3078 if (r)
3079 goto out;
3080
3081 if (vram_lost)
06ec9070 3082 amdgpu_device_fill_reset_magic(adev);
65781c78 3083 }
5740682e 3084 }
65781c78 3085
5740682e
ML
3086out:
3087 if (!r) {
3088 amdgpu_irq_gpu_reset_resume_helper(adev);
3089 r = amdgpu_ib_ring_tests(adev);
3090 if (r) {
3091 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
cdd61df6 3092 r = amdgpu_device_ip_suspend(adev);
5740682e
ML
3093 need_full_reset = true;
3094 goto retry;
3095 }
3096 }
65781c78 3097
c41d1cf6
ML
3098 if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
3099 r = amdgpu_device_handle_vram_lost(adev);
a90ad3c2 3100
5740682e
ML
3101 return r;
3102}
a90ad3c2 3103
e3ecdffa 3104/**
06ec9070 3105 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5740682e
ML
3106 *
3107 * @adev: amdgpu device pointer
5740682e
ML
3108 *
3109 * do VF FLR and reinitialize Asic
3110 * return 0 means successed otherwise failed
e3ecdffa
AD
3111 */
3112static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3113 bool from_hypervisor)
5740682e
ML
3114{
3115 int r;
3116
3117 if (from_hypervisor)
3118 r = amdgpu_virt_request_full_gpu(adev, true);
3119 else
3120 r = amdgpu_virt_reset_gpu(adev);
3121 if (r)
3122 return r;
a90ad3c2
ML
3123
3124 /* Resume IP prior to SMC */
06ec9070 3125 r = amdgpu_device_ip_reinit_early_sriov(adev);
5740682e
ML
3126 if (r)
3127 goto error;
a90ad3c2
ML
3128
3129 /* we need recover gart prior to run SMC/CP/SDMA resume */
c1c7ce8f 3130 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
a90ad3c2
ML
3131
3132 /* now we are okay to resume SMC/CP/SDMA */
06ec9070 3133 r = amdgpu_device_ip_reinit_late_sriov(adev);
c41d1cf6 3134 amdgpu_virt_release_full_gpu(adev, true);
5740682e
ML
3135 if (r)
3136 goto error;
a90ad3c2
ML
3137
3138 amdgpu_irq_gpu_reset_resume_helper(adev);
5740682e 3139 r = amdgpu_ib_ring_tests(adev);
a90ad3c2 3140
c41d1cf6
ML
3141 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3142 atomic_inc(&adev->vram_lost_counter);
3143 r = amdgpu_device_handle_vram_lost(adev);
a90ad3c2
ML
3144 }
3145
c41d1cf6
ML
3146error:
3147
a90ad3c2
ML
3148 return r;
3149}
3150
d38ceaf9 3151/**
5f152b5e 3152 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
d38ceaf9
AD
3153 *
3154 * @adev: amdgpu device pointer
5740682e 3155 * @job: which job trigger hang
dcebf026 3156 * @force forces reset regardless of amdgpu_gpu_recovery
d38ceaf9 3157 *
5740682e 3158 * Attempt to reset the GPU if it has hung (all asics).
d38ceaf9
AD
3159 * Returns 0 for success or an error on failure.
3160 */
5f152b5e
AD
3161int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3162 struct amdgpu_job *job, bool force)
d38ceaf9 3163{
4562236b 3164 struct drm_atomic_state *state = NULL;
5740682e 3165 int i, r, resched;
fb140b29 3166
54bc1398 3167 if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
63fbf42f
CZ
3168 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
3169 return 0;
3170 }
d38ceaf9 3171
dcebf026
AG
3172 if (!force && (amdgpu_gpu_recovery == 0 ||
3173 (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
3174 DRM_INFO("GPU recovery disabled.\n");
3175 return 0;
3176 }
3177
5740682e
ML
3178 dev_info(adev->dev, "GPU reset begin!\n");
3179
13a752e3 3180 mutex_lock(&adev->lock_reset);
d94aed5a 3181 atomic_inc(&adev->gpu_reset_counter);
13a752e3 3182 adev->in_gpu_reset = 1;
d38ceaf9 3183
a3c47d6b
CZ
3184 /* block TTM */
3185 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
71182665 3186
4562236b
HW
3187 /* store modesetting */
3188 if (amdgpu_device_has_dc_support(adev))
3189 state = drm_atomic_helper_suspend(adev->ddev);
a3c47d6b 3190
71182665 3191 /* block all schedulers and reset given job's ring */
0875dc9e
CZ
3192 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3193 struct amdgpu_ring *ring = adev->rings[i];
3194
51687759 3195 if (!ring || !ring->sched.thread)
0875dc9e 3196 continue;
5740682e 3197
71182665
ML
3198 kthread_park(ring->sched.thread);
3199
5740682e
ML
3200 if (job && job->ring->idx != i)
3201 continue;
3202
1b1f42d8 3203 drm_sched_hw_job_reset(&ring->sched, &job->base);
5740682e 3204
2f9d4084
ML
3205 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3206 amdgpu_fence_driver_force_completion(ring);
0875dc9e 3207 }
d38ceaf9 3208
5740682e 3209 if (amdgpu_sriov_vf(adev))
c41d1cf6 3210 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5740682e 3211 else
c41d1cf6 3212 r = amdgpu_device_reset(adev);
5740682e 3213
71182665
ML
3214 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3215 struct amdgpu_ring *ring = adev->rings[i];
53cdccd5 3216
71182665
ML
3217 if (!ring || !ring->sched.thread)
3218 continue;
5740682e 3219
71182665
ML
3220 /* only need recovery sched of the given job's ring
3221 * or all rings (in the case @job is NULL)
3222 * after above amdgpu_reset accomplished
3223 */
3224 if ((!job || job->ring->idx == i) && !r)
1b1f42d8 3225 drm_sched_job_recovery(&ring->sched);
5740682e 3226
71182665 3227 kthread_unpark(ring->sched.thread);
d38ceaf9
AD
3228 }
3229
4562236b 3230 if (amdgpu_device_has_dc_support(adev)) {
5740682e
ML
3231 if (drm_atomic_helper_resume(adev->ddev, state))
3232 dev_info(adev->dev, "drm resume failed:%d\n", r);
5740682e 3233 } else {
4562236b 3234 drm_helper_resume_force_mode(adev->ddev);
5740682e 3235 }
d38ceaf9
AD
3236
3237 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
5740682e 3238
89041940 3239 if (r) {
d38ceaf9 3240 /* bad news, how to tell it to userspace ? */
5740682e
ML
3241 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3242 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3243 } else {
3244 dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
89041940 3245 }
d38ceaf9 3246
89041940 3247 amdgpu_vf_error_trans_all(adev);
13a752e3
ML
3248 adev->in_gpu_reset = 0;
3249 mutex_unlock(&adev->lock_reset);
d38ceaf9
AD
3250 return r;
3251}
3252
e3ecdffa
AD
3253/**
3254 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
3255 *
3256 * @adev: amdgpu_device pointer
3257 *
3258 * Fetchs and stores in the driver the PCIE capabilities (gen speed
3259 * and lanes) of the slot the device is in. Handles APUs and
3260 * virtualized environments where PCIE config space may not be available.
3261 */
5494d864 3262static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
d0dd7f0c
AD
3263{
3264 u32 mask;
3265 int ret;
3266
cd474ba0
AD
3267 if (amdgpu_pcie_gen_cap)
3268 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 3269
cd474ba0
AD
3270 if (amdgpu_pcie_lane_cap)
3271 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 3272
cd474ba0
AD
3273 /* covers APUs as well */
3274 if (pci_is_root_bus(adev->pdev->bus)) {
3275 if (adev->pm.pcie_gen_mask == 0)
3276 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3277 if (adev->pm.pcie_mlw_mask == 0)
3278 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 3279 return;
cd474ba0 3280 }
d0dd7f0c 3281
cd474ba0
AD
3282 if (adev->pm.pcie_gen_mask == 0) {
3283 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3284 if (!ret) {
3285 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3286 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3287 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3288
3289 if (mask & DRM_PCIE_SPEED_25)
3290 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3291 if (mask & DRM_PCIE_SPEED_50)
3292 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3293 if (mask & DRM_PCIE_SPEED_80)
3294 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3295 } else {
3296 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3297 }
3298 }
3299 if (adev->pm.pcie_mlw_mask == 0) {
3300 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3301 if (!ret) {
3302 switch (mask) {
3303 case 32:
3304 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3305 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3306 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3307 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3308 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3309 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3310 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3311 break;
3312 case 16:
3313 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3314 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3315 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3316 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3317 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3318 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3319 break;
3320 case 12:
3321 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3322 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3323 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3324 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3325 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3326 break;
3327 case 8:
3328 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3329 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3330 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3331 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3332 break;
3333 case 4:
3334 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3335 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3336 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3337 break;
3338 case 2:
3339 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3340 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3341 break;
3342 case 1:
3343 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3344 break;
3345 default:
3346 break;
3347 }
3348 } else {
3349 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
3350 }
3351 }
3352}
d38ceaf9 3353