drm/amd/display: On DCN1, Wait for vupdate on cursor updates
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
b1ddf548 28#include <linux/power_supply.h>
0875dc9e 29#include <linux/kthread.h>
d38ceaf9
AD
30#include <linux/console.h>
31#include <linux/slab.h>
d38ceaf9 32#include <drm/drmP.h>
4562236b 33#include <drm/drm_atomic_helper.h>
fcd70cd3 34#include <drm/drm_probe_helper.h>
d38ceaf9
AD
35#include <drm/amdgpu_drm.h>
36#include <linux/vgaarb.h>
37#include <linux/vga_switcheroo.h>
38#include <linux/efi.h>
39#include "amdgpu.h"
f4b373f4 40#include "amdgpu_trace.h"
d38ceaf9
AD
41#include "amdgpu_i2c.h"
42#include "atom.h"
43#include "amdgpu_atombios.h"
a5bde2f9 44#include "amdgpu_atomfirmware.h"
d0dd7f0c 45#include "amd_pcie.h"
33f34802
KW
46#ifdef CONFIG_DRM_AMDGPU_SI
47#include "si.h"
48#endif
a2e73f56
AD
49#ifdef CONFIG_DRM_AMDGPU_CIK
50#include "cik.h"
51#endif
aaa36a97 52#include "vi.h"
460826e6 53#include "soc15.h"
d38ceaf9 54#include "bif/bif_4_1_d.h"
9accf2fd 55#include <linux/pci.h>
bec86378 56#include <linux/firmware.h>
89041940 57#include "amdgpu_vf_error.h"
d38ceaf9 58
ba997709 59#include "amdgpu_amdkfd.h"
d2f52ac8 60#include "amdgpu_pm.h"
d38ceaf9 61
5183411b
AG
62#include "amdgpu_xgmi.h"
63
e2a75f88 64MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
3f76dced 65MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
2d2e5e7e 66MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
ad5a67a7 67MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
54c4d17e 68MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
e2a75f88 69
2dc80b00
S
70#define AMDGPU_RESUME_MS 2000
71
d38ceaf9 72static const char *amdgpu_asic_name[] = {
da69c161
KW
73 "TAHITI",
74 "PITCAIRN",
75 "VERDE",
76 "OLAND",
77 "HAINAN",
d38ceaf9
AD
78 "BONAIRE",
79 "KAVERI",
80 "KABINI",
81 "HAWAII",
82 "MULLINS",
83 "TOPAZ",
84 "TONGA",
48299f95 85 "FIJI",
d38ceaf9 86 "CARRIZO",
139f4917 87 "STONEY",
2cc0c0b5
FC
88 "POLARIS10",
89 "POLARIS11",
c4642a47 90 "POLARIS12",
48ff108d 91 "VEGAM",
d4196f01 92 "VEGA10",
8fab806a 93 "VEGA12",
956fcddc 94 "VEGA20",
2ca8a5d2 95 "RAVEN",
d38ceaf9
AD
96 "LAST",
97};
98
5494d864
AD
99static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
100
e3ecdffa
AD
101/**
102 * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
103 *
104 * @dev: drm_device pointer
105 *
106 * Returns true if the device is a dGPU with HG/PX power control,
107 * otherwise return false.
108 */
d38ceaf9
AD
109bool amdgpu_device_is_px(struct drm_device *dev)
110{
111 struct amdgpu_device *adev = dev->dev_private;
112
2f7d10b3 113 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
114 return true;
115 return false;
116}
117
118/*
119 * MMIO register access helper functions.
120 */
e3ecdffa
AD
121/**
122 * amdgpu_mm_rreg - read a memory mapped IO register
123 *
124 * @adev: amdgpu_device pointer
125 * @reg: dword aligned register offset
126 * @acc_flags: access flags which require special behavior
127 *
128 * Returns the 32 bit value from the offset specified.
129 */
d38ceaf9 130uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 131 uint32_t acc_flags)
d38ceaf9 132{
f4b373f4
TSD
133 uint32_t ret;
134
43ca8efa 135 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 136 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 137
15d72fd7 138 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 139 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
140 else {
141 unsigned long flags;
d38ceaf9
AD
142
143 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
144 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
145 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
146 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 147 }
f4b373f4
TSD
148 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
149 return ret;
d38ceaf9
AD
150}
151
421a2a30
ML
152/*
153 * MMIO register read with bytes helper functions
154 * @offset:bytes offset from MMIO start
155 *
156*/
157
e3ecdffa
AD
158/**
159 * amdgpu_mm_rreg8 - read a memory mapped IO register
160 *
161 * @adev: amdgpu_device pointer
162 * @offset: byte aligned register offset
163 *
164 * Returns the 8 bit value from the offset specified.
165 */
421a2a30
ML
166uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
167 if (offset < adev->rmmio_size)
168 return (readb(adev->rmmio + offset));
169 BUG();
170}
171
172/*
173 * MMIO register write with bytes helper functions
174 * @offset:bytes offset from MMIO start
175 * @value: the value want to be written to the register
176 *
177*/
e3ecdffa
AD
178/**
179 * amdgpu_mm_wreg8 - read a memory mapped IO register
180 *
181 * @adev: amdgpu_device pointer
182 * @offset: byte aligned register offset
183 * @value: 8 bit value to write
184 *
185 * Writes the value specified to the offset specified.
186 */
421a2a30
ML
187void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
188 if (offset < adev->rmmio_size)
189 writeb(value, adev->rmmio + offset);
190 else
191 BUG();
192}
193
e3ecdffa
AD
194/**
195 * amdgpu_mm_wreg - write to a memory mapped IO register
196 *
197 * @adev: amdgpu_device pointer
198 * @reg: dword aligned register offset
199 * @v: 32 bit value to write to the register
200 * @acc_flags: access flags which require special behavior
201 *
202 * Writes the value specified to the offset specified.
203 */
d38ceaf9 204void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 205 uint32_t acc_flags)
d38ceaf9 206{
f4b373f4 207 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 208
47ed4e1c
KW
209 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
210 adev->last_mm_index = v;
211 }
212
43ca8efa 213 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 214 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 215
15d72fd7 216 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
217 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
218 else {
219 unsigned long flags;
220
221 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
222 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
223 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
224 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
225 }
47ed4e1c
KW
226
227 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
228 udelay(500);
229 }
d38ceaf9
AD
230}
231
e3ecdffa
AD
232/**
233 * amdgpu_io_rreg - read an IO register
234 *
235 * @adev: amdgpu_device pointer
236 * @reg: dword aligned register offset
237 *
238 * Returns the 32 bit value from the offset specified.
239 */
d38ceaf9
AD
240u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
241{
242 if ((reg * 4) < adev->rio_mem_size)
243 return ioread32(adev->rio_mem + (reg * 4));
244 else {
245 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
246 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
247 }
248}
249
e3ecdffa
AD
250/**
251 * amdgpu_io_wreg - write to an IO register
252 *
253 * @adev: amdgpu_device pointer
254 * @reg: dword aligned register offset
255 * @v: 32 bit value to write to the register
256 *
257 * Writes the value specified to the offset specified.
258 */
d38ceaf9
AD
259void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
260{
47ed4e1c
KW
261 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
262 adev->last_mm_index = v;
263 }
d38ceaf9
AD
264
265 if ((reg * 4) < adev->rio_mem_size)
266 iowrite32(v, adev->rio_mem + (reg * 4));
267 else {
268 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
269 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
270 }
47ed4e1c
KW
271
272 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
273 udelay(500);
274 }
d38ceaf9
AD
275}
276
277/**
278 * amdgpu_mm_rdoorbell - read a doorbell dword
279 *
280 * @adev: amdgpu_device pointer
281 * @index: doorbell index
282 *
283 * Returns the value in the doorbell aperture at the
284 * requested doorbell index (CIK).
285 */
286u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
287{
288 if (index < adev->doorbell.num_doorbells) {
289 return readl(adev->doorbell.ptr + index);
290 } else {
291 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
292 return 0;
293 }
294}
295
296/**
297 * amdgpu_mm_wdoorbell - write a doorbell dword
298 *
299 * @adev: amdgpu_device pointer
300 * @index: doorbell index
301 * @v: value to write
302 *
303 * Writes @v to the doorbell aperture at the
304 * requested doorbell index (CIK).
305 */
306void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
307{
308 if (index < adev->doorbell.num_doorbells) {
309 writel(v, adev->doorbell.ptr + index);
310 } else {
311 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
312 }
313}
314
832be404
KW
315/**
316 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
317 *
318 * @adev: amdgpu_device pointer
319 * @index: doorbell index
320 *
321 * Returns the value in the doorbell aperture at the
322 * requested doorbell index (VEGA10+).
323 */
324u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
325{
326 if (index < adev->doorbell.num_doorbells) {
327 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
328 } else {
329 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
330 return 0;
331 }
332}
333
334/**
335 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
336 *
337 * @adev: amdgpu_device pointer
338 * @index: doorbell index
339 * @v: value to write
340 *
341 * Writes @v to the doorbell aperture at the
342 * requested doorbell index (VEGA10+).
343 */
344void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
345{
346 if (index < adev->doorbell.num_doorbells) {
347 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
348 } else {
349 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
350 }
351}
352
d38ceaf9
AD
353/**
354 * amdgpu_invalid_rreg - dummy reg read function
355 *
356 * @adev: amdgpu device pointer
357 * @reg: offset of register
358 *
359 * Dummy register read function. Used for register blocks
360 * that certain asics don't have (all asics).
361 * Returns the value in the register.
362 */
363static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
364{
365 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
366 BUG();
367 return 0;
368}
369
370/**
371 * amdgpu_invalid_wreg - dummy reg write function
372 *
373 * @adev: amdgpu device pointer
374 * @reg: offset of register
375 * @v: value to write to the register
376 *
377 * Dummy register read function. Used for register blocks
378 * that certain asics don't have (all asics).
379 */
380static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
381{
382 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
383 reg, v);
384 BUG();
385}
386
387/**
388 * amdgpu_block_invalid_rreg - dummy reg read function
389 *
390 * @adev: amdgpu device pointer
391 * @block: offset of instance
392 * @reg: offset of register
393 *
394 * Dummy register read function. Used for register blocks
395 * that certain asics don't have (all asics).
396 * Returns the value in the register.
397 */
398static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
399 uint32_t block, uint32_t reg)
400{
401 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
402 reg, block);
403 BUG();
404 return 0;
405}
406
407/**
408 * amdgpu_block_invalid_wreg - dummy reg write function
409 *
410 * @adev: amdgpu device pointer
411 * @block: offset of instance
412 * @reg: offset of register
413 * @v: value to write to the register
414 *
415 * Dummy register read function. Used for register blocks
416 * that certain asics don't have (all asics).
417 */
418static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
419 uint32_t block,
420 uint32_t reg, uint32_t v)
421{
422 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
423 reg, block, v);
424 BUG();
425}
426
e3ecdffa
AD
427/**
428 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
429 *
430 * @adev: amdgpu device pointer
431 *
432 * Allocates a scratch page of VRAM for use by various things in the
433 * driver.
434 */
06ec9070 435static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
d38ceaf9 436{
a4a02777
CK
437 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
438 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
439 &adev->vram_scratch.robj,
440 &adev->vram_scratch.gpu_addr,
441 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
442}
443
e3ecdffa
AD
444/**
445 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
446 *
447 * @adev: amdgpu device pointer
448 *
449 * Frees the VRAM scratch page.
450 */
06ec9070 451static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
d38ceaf9 452{
078af1a3 453 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
454}
455
456/**
9c3f2b54 457 * amdgpu_device_program_register_sequence - program an array of registers.
d38ceaf9
AD
458 *
459 * @adev: amdgpu_device pointer
460 * @registers: pointer to the register array
461 * @array_size: size of the register array
462 *
463 * Programs an array or registers with and and or masks.
464 * This is a helper for setting golden registers.
465 */
9c3f2b54
AD
466void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
467 const u32 *registers,
468 const u32 array_size)
d38ceaf9
AD
469{
470 u32 tmp, reg, and_mask, or_mask;
471 int i;
472
473 if (array_size % 3)
474 return;
475
476 for (i = 0; i < array_size; i +=3) {
477 reg = registers[i + 0];
478 and_mask = registers[i + 1];
479 or_mask = registers[i + 2];
480
481 if (and_mask == 0xffffffff) {
482 tmp = or_mask;
483 } else {
484 tmp = RREG32(reg);
485 tmp &= ~and_mask;
486 tmp |= or_mask;
487 }
488 WREG32(reg, tmp);
489 }
490}
491
e3ecdffa
AD
492/**
493 * amdgpu_device_pci_config_reset - reset the GPU
494 *
495 * @adev: amdgpu_device pointer
496 *
497 * Resets the GPU using the pci config reset sequence.
498 * Only applicable to asics prior to vega10.
499 */
8111c387 500void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
d38ceaf9
AD
501{
502 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
503}
504
505/*
506 * GPU doorbell aperture helpers function.
507 */
508/**
06ec9070 509 * amdgpu_device_doorbell_init - Init doorbell driver information.
d38ceaf9
AD
510 *
511 * @adev: amdgpu_device pointer
512 *
513 * Init doorbell driver information (CIK)
514 * Returns 0 on success, error on failure.
515 */
06ec9070 516static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
d38ceaf9 517{
6585661d 518
705e519e
CK
519 /* No doorbell on SI hardware generation */
520 if (adev->asic_type < CHIP_BONAIRE) {
521 adev->doorbell.base = 0;
522 adev->doorbell.size = 0;
523 adev->doorbell.num_doorbells = 0;
524 adev->doorbell.ptr = NULL;
525 return 0;
526 }
527
d6895ad3
CK
528 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
529 return -EINVAL;
530
22357775
AD
531 amdgpu_asic_init_doorbell_index(adev);
532
d38ceaf9
AD
533 /* doorbell bar mapping */
534 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
535 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
536
edf600da 537 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
9564f192 538 adev->doorbell_index.max_assignment+1);
d38ceaf9
AD
539 if (adev->doorbell.num_doorbells == 0)
540 return -EINVAL;
541
ec3db8a6 542 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
88dc26e4
OZ
543 * paging queue doorbell use the second page. The
544 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
545 * doorbells are in the first page. So with paging queue enabled,
546 * the max num_doorbells should + 1 page (0x400 in dword)
ec3db8a6
PY
547 */
548 if (adev->asic_type >= CHIP_VEGA10)
88dc26e4 549 adev->doorbell.num_doorbells += 0x400;
ec3db8a6 550
8972e5d2
CK
551 adev->doorbell.ptr = ioremap(adev->doorbell.base,
552 adev->doorbell.num_doorbells *
553 sizeof(u32));
554 if (adev->doorbell.ptr == NULL)
d38ceaf9 555 return -ENOMEM;
d38ceaf9
AD
556
557 return 0;
558}
559
560/**
06ec9070 561 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
d38ceaf9
AD
562 *
563 * @adev: amdgpu_device pointer
564 *
565 * Tear down doorbell driver information (CIK)
566 */
06ec9070 567static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
d38ceaf9
AD
568{
569 iounmap(adev->doorbell.ptr);
570 adev->doorbell.ptr = NULL;
571}
572
22cb0164 573
d38ceaf9
AD
574
575/*
06ec9070 576 * amdgpu_device_wb_*()
455a7bc2 577 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 578 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
579 */
580
581/**
06ec9070 582 * amdgpu_device_wb_fini - Disable Writeback and free memory
d38ceaf9
AD
583 *
584 * @adev: amdgpu_device pointer
585 *
586 * Disables Writeback and frees the Writeback memory (all asics).
587 * Used at driver shutdown.
588 */
06ec9070 589static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
d38ceaf9
AD
590{
591 if (adev->wb.wb_obj) {
a76ed485
AD
592 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
593 &adev->wb.gpu_addr,
594 (void **)&adev->wb.wb);
d38ceaf9
AD
595 adev->wb.wb_obj = NULL;
596 }
597}
598
599/**
06ec9070 600 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
d38ceaf9
AD
601 *
602 * @adev: amdgpu_device pointer
603 *
455a7bc2 604 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
605 * Used at driver startup.
606 * Returns 0 on success or an -error on failure.
607 */
06ec9070 608static int amdgpu_device_wb_init(struct amdgpu_device *adev)
d38ceaf9
AD
609{
610 int r;
611
612 if (adev->wb.wb_obj == NULL) {
97407b63
AD
613 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
614 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
615 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
616 &adev->wb.wb_obj, &adev->wb.gpu_addr,
617 (void **)&adev->wb.wb);
d38ceaf9
AD
618 if (r) {
619 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
620 return r;
621 }
d38ceaf9
AD
622
623 adev->wb.num_wb = AMDGPU_MAX_WB;
624 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
625
626 /* clear wb memory */
73469585 627 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
d38ceaf9
AD
628 }
629
630 return 0;
631}
632
633/**
131b4b36 634 * amdgpu_device_wb_get - Allocate a wb entry
d38ceaf9
AD
635 *
636 * @adev: amdgpu_device pointer
637 * @wb: wb index
638 *
639 * Allocate a wb slot for use by the driver (all asics).
640 * Returns 0 on success or -EINVAL on failure.
641 */
131b4b36 642int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
d38ceaf9
AD
643{
644 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 645
97407b63 646 if (offset < adev->wb.num_wb) {
7014285a 647 __set_bit(offset, adev->wb.used);
63ae07ca 648 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
649 return 0;
650 } else {
651 return -EINVAL;
652 }
653}
654
d38ceaf9 655/**
131b4b36 656 * amdgpu_device_wb_free - Free a wb entry
d38ceaf9
AD
657 *
658 * @adev: amdgpu_device pointer
659 * @wb: wb index
660 *
661 * Free a wb slot allocated for use by the driver (all asics)
662 */
131b4b36 663void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
d38ceaf9 664{
73469585 665 wb >>= 3;
d38ceaf9 666 if (wb < adev->wb.num_wb)
73469585 667 __clear_bit(wb, adev->wb.used);
d38ceaf9
AD
668}
669
d6895ad3
CK
670/**
671 * amdgpu_device_resize_fb_bar - try to resize FB BAR
672 *
673 * @adev: amdgpu_device pointer
674 *
675 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
676 * to fail, but if any of the BARs is not accessible after the size we abort
677 * driver loading by returning -ENODEV.
678 */
679int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
680{
770d13b1 681 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
d6895ad3 682 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
31b8adab
CK
683 struct pci_bus *root;
684 struct resource *res;
685 unsigned i;
d6895ad3
CK
686 u16 cmd;
687 int r;
688
0c03b912 689 /* Bypass for VF */
690 if (amdgpu_sriov_vf(adev))
691 return 0;
692
31b8adab
CK
693 /* Check if the root BUS has 64bit memory resources */
694 root = adev->pdev->bus;
695 while (root->parent)
696 root = root->parent;
697
698 pci_bus_for_each_resource(root, res, i) {
0ebb7c54 699 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
31b8adab
CK
700 res->start > 0x100000000ull)
701 break;
702 }
703
704 /* Trying to resize is pointless without a root hub window above 4GB */
705 if (!res)
706 return 0;
707
d6895ad3
CK
708 /* Disable memory decoding while we change the BAR addresses and size */
709 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
710 pci_write_config_word(adev->pdev, PCI_COMMAND,
711 cmd & ~PCI_COMMAND_MEMORY);
712
713 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
06ec9070 714 amdgpu_device_doorbell_fini(adev);
d6895ad3
CK
715 if (adev->asic_type >= CHIP_BONAIRE)
716 pci_release_resource(adev->pdev, 2);
717
718 pci_release_resource(adev->pdev, 0);
719
720 r = pci_resize_resource(adev->pdev, 0, rbar_size);
721 if (r == -ENOSPC)
722 DRM_INFO("Not enough PCI address space for a large BAR.");
723 else if (r && r != -ENOTSUPP)
724 DRM_ERROR("Problem resizing BAR0 (%d).", r);
725
726 pci_assign_unassigned_bus_resources(adev->pdev->bus);
727
728 /* When the doorbell or fb BAR isn't available we have no chance of
729 * using the device.
730 */
06ec9070 731 r = amdgpu_device_doorbell_init(adev);
d6895ad3
CK
732 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
733 return -ENODEV;
734
735 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
736
737 return 0;
738}
a05502e5 739
d38ceaf9
AD
740/*
741 * GPU helpers function.
742 */
743/**
39c640c0 744 * amdgpu_device_need_post - check if the hw need post or not
d38ceaf9
AD
745 *
746 * @adev: amdgpu_device pointer
747 *
c836fec5
JQ
748 * Check if the asic has been initialized (all asics) at driver startup
749 * or post is needed if hw reset is performed.
750 * Returns true if need or false if not.
d38ceaf9 751 */
39c640c0 752bool amdgpu_device_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
753{
754 uint32_t reg;
755
bec86378
ML
756 if (amdgpu_sriov_vf(adev))
757 return false;
758
759 if (amdgpu_passthrough(adev)) {
1da2c326
ML
760 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
761 * some old smc fw still need driver do vPost otherwise gpu hang, while
762 * those smc fw version above 22.15 doesn't have this flaw, so we force
763 * vpost executed for smc version below 22.15
bec86378
ML
764 */
765 if (adev->asic_type == CHIP_FIJI) {
766 int err;
767 uint32_t fw_ver;
768 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
769 /* force vPost if error occured */
770 if (err)
771 return true;
772
773 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
774 if (fw_ver < 0x00160e00)
775 return true;
bec86378 776 }
bec86378 777 }
91fe77eb 778
779 if (adev->has_hw_reset) {
780 adev->has_hw_reset = false;
781 return true;
782 }
783
784 /* bios scratch used on CIK+ */
785 if (adev->asic_type >= CHIP_BONAIRE)
786 return amdgpu_atombios_scratch_need_asic_init(adev);
787
788 /* check MEM_SIZE for older asics */
789 reg = amdgpu_asic_get_config_memsize(adev);
790
791 if ((reg != 0) && (reg != 0xffffffff))
792 return false;
793
794 return true;
bec86378
ML
795}
796
d38ceaf9
AD
797/* if we get transitioned to only one device, take VGA back */
798/**
06ec9070 799 * amdgpu_device_vga_set_decode - enable/disable vga decode
d38ceaf9
AD
800 *
801 * @cookie: amdgpu_device pointer
802 * @state: enable/disable vga decode
803 *
804 * Enable/disable vga decode (all asics).
805 * Returns VGA resource flags.
806 */
06ec9070 807static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
d38ceaf9
AD
808{
809 struct amdgpu_device *adev = cookie;
810 amdgpu_asic_set_vga_state(adev, state);
811 if (state)
812 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
813 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
814 else
815 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
816}
817
e3ecdffa
AD
818/**
819 * amdgpu_device_check_block_size - validate the vm block size
820 *
821 * @adev: amdgpu_device pointer
822 *
823 * Validates the vm block size specified via module parameter.
824 * The vm block size defines number of bits in page table versus page directory,
825 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
826 * page table and the remaining bits are in the page directory.
827 */
06ec9070 828static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
829{
830 /* defines number of bits in page table versus page directory,
831 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
832 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
833 if (amdgpu_vm_block_size == -1)
834 return;
a1adf8be 835
bab4fee7 836 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
837 dev_warn(adev->dev, "VM page table size (%d) too small\n",
838 amdgpu_vm_block_size);
97489129 839 amdgpu_vm_block_size = -1;
a1adf8be 840 }
a1adf8be
CZ
841}
842
e3ecdffa
AD
843/**
844 * amdgpu_device_check_vm_size - validate the vm size
845 *
846 * @adev: amdgpu_device pointer
847 *
848 * Validates the vm size in GB specified via module parameter.
849 * The VM size is the size of the GPU virtual memory space in GB.
850 */
06ec9070 851static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
83ca145d 852{
64dab074
AD
853 /* no need to check the default value */
854 if (amdgpu_vm_size == -1)
855 return;
856
83ca145d
ZJ
857 if (amdgpu_vm_size < 1) {
858 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
859 amdgpu_vm_size);
f3368128 860 amdgpu_vm_size = -1;
83ca145d 861 }
83ca145d
ZJ
862}
863
7951e376
RZ
864static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
865{
866 struct sysinfo si;
867 bool is_os_64 = (sizeof(void *) == 8) ? true : false;
868 uint64_t total_memory;
869 uint64_t dram_size_seven_GB = 0x1B8000000;
870 uint64_t dram_size_three_GB = 0xB8000000;
871
872 if (amdgpu_smu_memory_pool_size == 0)
873 return;
874
875 if (!is_os_64) {
876 DRM_WARN("Not 64-bit OS, feature not supported\n");
877 goto def_value;
878 }
879 si_meminfo(&si);
880 total_memory = (uint64_t)si.totalram * si.mem_unit;
881
882 if ((amdgpu_smu_memory_pool_size == 1) ||
883 (amdgpu_smu_memory_pool_size == 2)) {
884 if (total_memory < dram_size_three_GB)
885 goto def_value1;
886 } else if ((amdgpu_smu_memory_pool_size == 4) ||
887 (amdgpu_smu_memory_pool_size == 8)) {
888 if (total_memory < dram_size_seven_GB)
889 goto def_value1;
890 } else {
891 DRM_WARN("Smu memory pool size not supported\n");
892 goto def_value;
893 }
894 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
895
896 return;
897
898def_value1:
899 DRM_WARN("No enough system memory\n");
900def_value:
901 adev->pm.smu_prv_buffer_size = 0;
902}
903
d38ceaf9 904/**
06ec9070 905 * amdgpu_device_check_arguments - validate module params
d38ceaf9
AD
906 *
907 * @adev: amdgpu_device pointer
908 *
909 * Validates certain module parameters and updates
910 * the associated values used by the driver (all asics).
911 */
06ec9070 912static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
d38ceaf9 913{
5b011235
CZ
914 if (amdgpu_sched_jobs < 4) {
915 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
916 amdgpu_sched_jobs);
917 amdgpu_sched_jobs = 4;
76117507 918 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
919 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
920 amdgpu_sched_jobs);
921 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
922 }
d38ceaf9 923
83e74db6 924 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
925 /* gart size must be greater or equal to 32M */
926 dev_warn(adev->dev, "gart size (%d) too small\n",
927 amdgpu_gart_size);
83e74db6 928 amdgpu_gart_size = -1;
d38ceaf9
AD
929 }
930
36d38372 931 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 932 /* gtt size must be greater or equal to 32M */
36d38372
CK
933 dev_warn(adev->dev, "gtt size (%d) too small\n",
934 amdgpu_gtt_size);
935 amdgpu_gtt_size = -1;
d38ceaf9
AD
936 }
937
d07f14be
RH
938 /* valid range is between 4 and 9 inclusive */
939 if (amdgpu_vm_fragment_size != -1 &&
940 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
941 dev_warn(adev->dev, "valid range is between 4 and 9\n");
942 amdgpu_vm_fragment_size = -1;
943 }
944
7951e376
RZ
945 amdgpu_device_check_smu_prv_buffer_size(adev);
946
06ec9070 947 amdgpu_device_check_vm_size(adev);
d38ceaf9 948
06ec9070 949 amdgpu_device_check_block_size(adev);
6a7f76e7 950
526bae37 951 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 952 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
953 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
954 amdgpu_vram_page_split);
955 amdgpu_vram_page_split = 1024;
956 }
8854695a
AG
957
958 if (amdgpu_lockup_timeout == 0) {
959 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
960 amdgpu_lockup_timeout = 10000;
961 }
19aede77
AD
962
963 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
d38ceaf9
AD
964}
965
966/**
967 * amdgpu_switcheroo_set_state - set switcheroo state
968 *
969 * @pdev: pci dev pointer
1694467b 970 * @state: vga_switcheroo state
d38ceaf9
AD
971 *
972 * Callback for the switcheroo driver. Suspends or resumes the
973 * the asics before or after it is powered up using ACPI methods.
974 */
975static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
976{
977 struct drm_device *dev = pci_get_drvdata(pdev);
978
979 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
980 return;
981
982 if (state == VGA_SWITCHEROO_ON) {
7ca85295 983 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
984 /* don't suspend or resume card normally */
985 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
986
810ddc3a 987 amdgpu_device_resume(dev, true, true);
d38ceaf9 988
d38ceaf9
AD
989 dev->switch_power_state = DRM_SWITCH_POWER_ON;
990 drm_kms_helper_poll_enable(dev);
991 } else {
7ca85295 992 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
993 drm_kms_helper_poll_disable(dev);
994 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 995 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
996 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
997 }
998}
999
1000/**
1001 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1002 *
1003 * @pdev: pci dev pointer
1004 *
1005 * Callback for the switcheroo driver. Check of the switcheroo
1006 * state can be changed.
1007 * Returns true if the state can be changed, false if not.
1008 */
1009static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1010{
1011 struct drm_device *dev = pci_get_drvdata(pdev);
1012
1013 /*
1014 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1015 * locking inversion with the driver load path. And the access here is
1016 * completely racy anyway. So don't bother with locking for now.
1017 */
1018 return dev->open_count == 0;
1019}
1020
1021static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1022 .set_gpu_state = amdgpu_switcheroo_set_state,
1023 .reprobe = NULL,
1024 .can_switch = amdgpu_switcheroo_can_switch,
1025};
1026
e3ecdffa
AD
1027/**
1028 * amdgpu_device_ip_set_clockgating_state - set the CG state
1029 *
87e3f136 1030 * @dev: amdgpu_device pointer
e3ecdffa
AD
1031 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1032 * @state: clockgating state (gate or ungate)
1033 *
1034 * Sets the requested clockgating state for all instances of
1035 * the hardware IP specified.
1036 * Returns the error code from the last instance.
1037 */
43fa561f 1038int amdgpu_device_ip_set_clockgating_state(void *dev,
2990a1fc
AD
1039 enum amd_ip_block_type block_type,
1040 enum amd_clockgating_state state)
d38ceaf9 1041{
43fa561f 1042 struct amdgpu_device *adev = dev;
d38ceaf9
AD
1043 int i, r = 0;
1044
1045 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1046 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1047 continue;
c722865a
RZ
1048 if (adev->ip_blocks[i].version->type != block_type)
1049 continue;
1050 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1051 continue;
1052 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1053 (void *)adev, state);
1054 if (r)
1055 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1056 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1057 }
1058 return r;
1059}
1060
e3ecdffa
AD
1061/**
1062 * amdgpu_device_ip_set_powergating_state - set the PG state
1063 *
87e3f136 1064 * @dev: amdgpu_device pointer
e3ecdffa
AD
1065 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1066 * @state: powergating state (gate or ungate)
1067 *
1068 * Sets the requested powergating state for all instances of
1069 * the hardware IP specified.
1070 * Returns the error code from the last instance.
1071 */
43fa561f 1072int amdgpu_device_ip_set_powergating_state(void *dev,
2990a1fc
AD
1073 enum amd_ip_block_type block_type,
1074 enum amd_powergating_state state)
d38ceaf9 1075{
43fa561f 1076 struct amdgpu_device *adev = dev;
d38ceaf9
AD
1077 int i, r = 0;
1078
1079 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1080 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1081 continue;
c722865a
RZ
1082 if (adev->ip_blocks[i].version->type != block_type)
1083 continue;
1084 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1085 continue;
1086 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1087 (void *)adev, state);
1088 if (r)
1089 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1090 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1091 }
1092 return r;
1093}
1094
e3ecdffa
AD
1095/**
1096 * amdgpu_device_ip_get_clockgating_state - get the CG state
1097 *
1098 * @adev: amdgpu_device pointer
1099 * @flags: clockgating feature flags
1100 *
1101 * Walks the list of IPs on the device and updates the clockgating
1102 * flags for each IP.
1103 * Updates @flags with the feature flags for each hardware IP where
1104 * clockgating is enabled.
1105 */
2990a1fc
AD
1106void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1107 u32 *flags)
6cb2d4e4
HR
1108{
1109 int i;
1110
1111 for (i = 0; i < adev->num_ip_blocks; i++) {
1112 if (!adev->ip_blocks[i].status.valid)
1113 continue;
1114 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1115 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1116 }
1117}
1118
e3ecdffa
AD
1119/**
1120 * amdgpu_device_ip_wait_for_idle - wait for idle
1121 *
1122 * @adev: amdgpu_device pointer
1123 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1124 *
1125 * Waits for the request hardware IP to be idle.
1126 * Returns 0 for success or a negative error code on failure.
1127 */
2990a1fc
AD
1128int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1129 enum amd_ip_block_type block_type)
5dbbb60b
AD
1130{
1131 int i, r;
1132
1133 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1134 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1135 continue;
a1255107
AD
1136 if (adev->ip_blocks[i].version->type == block_type) {
1137 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1138 if (r)
1139 return r;
1140 break;
1141 }
1142 }
1143 return 0;
1144
1145}
1146
e3ecdffa
AD
1147/**
1148 * amdgpu_device_ip_is_idle - is the hardware IP idle
1149 *
1150 * @adev: amdgpu_device pointer
1151 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1152 *
1153 * Check if the hardware IP is idle or not.
1154 * Returns true if it the IP is idle, false if not.
1155 */
2990a1fc
AD
1156bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1157 enum amd_ip_block_type block_type)
5dbbb60b
AD
1158{
1159 int i;
1160
1161 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1162 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1163 continue;
a1255107
AD
1164 if (adev->ip_blocks[i].version->type == block_type)
1165 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1166 }
1167 return true;
1168
1169}
1170
e3ecdffa
AD
1171/**
1172 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1173 *
1174 * @adev: amdgpu_device pointer
87e3f136 1175 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
e3ecdffa
AD
1176 *
1177 * Returns a pointer to the hardware IP block structure
1178 * if it exists for the asic, otherwise NULL.
1179 */
2990a1fc
AD
1180struct amdgpu_ip_block *
1181amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1182 enum amd_ip_block_type type)
d38ceaf9
AD
1183{
1184 int i;
1185
1186 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1187 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1188 return &adev->ip_blocks[i];
1189
1190 return NULL;
1191}
1192
1193/**
2990a1fc 1194 * amdgpu_device_ip_block_version_cmp
d38ceaf9
AD
1195 *
1196 * @adev: amdgpu_device pointer
5fc3aeeb 1197 * @type: enum amd_ip_block_type
d38ceaf9
AD
1198 * @major: major version
1199 * @minor: minor version
1200 *
1201 * return 0 if equal or greater
1202 * return 1 if smaller or the ip_block doesn't exist
1203 */
2990a1fc
AD
1204int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1205 enum amd_ip_block_type type,
1206 u32 major, u32 minor)
d38ceaf9 1207{
2990a1fc 1208 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
d38ceaf9 1209
a1255107
AD
1210 if (ip_block && ((ip_block->version->major > major) ||
1211 ((ip_block->version->major == major) &&
1212 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1213 return 0;
1214
1215 return 1;
1216}
1217
a1255107 1218/**
2990a1fc 1219 * amdgpu_device_ip_block_add
a1255107
AD
1220 *
1221 * @adev: amdgpu_device pointer
1222 * @ip_block_version: pointer to the IP to add
1223 *
1224 * Adds the IP block driver information to the collection of IPs
1225 * on the asic.
1226 */
2990a1fc
AD
1227int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1228 const struct amdgpu_ip_block_version *ip_block_version)
a1255107
AD
1229{
1230 if (!ip_block_version)
1231 return -EINVAL;
1232
e966a725 1233 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
a0bae357
HR
1234 ip_block_version->funcs->name);
1235
a1255107
AD
1236 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1237
1238 return 0;
1239}
1240
e3ecdffa
AD
1241/**
1242 * amdgpu_device_enable_virtual_display - enable virtual display feature
1243 *
1244 * @adev: amdgpu_device pointer
1245 *
1246 * Enabled the virtual display feature if the user has enabled it via
1247 * the module parameter virtual_display. This feature provides a virtual
1248 * display hardware on headless boards or in virtualized environments.
1249 * This function parses and validates the configuration string specified by
1250 * the user and configues the virtual display configuration (number of
1251 * virtual connectors, crtcs, etc.) specified.
1252 */
483ef985 1253static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1254{
1255 adev->enable_virtual_display = false;
1256
1257 if (amdgpu_virtual_display) {
1258 struct drm_device *ddev = adev->ddev;
1259 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1260 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1261
1262 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1263 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1264 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1265 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1266 if (!strcmp("all", pciaddname)
1267 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1268 long num_crtc;
1269 int res = -1;
1270
9accf2fd 1271 adev->enable_virtual_display = true;
0f66356d
ED
1272
1273 if (pciaddname_tmp)
1274 res = kstrtol(pciaddname_tmp, 10,
1275 &num_crtc);
1276
1277 if (!res) {
1278 if (num_crtc < 1)
1279 num_crtc = 1;
1280 if (num_crtc > 6)
1281 num_crtc = 6;
1282 adev->mode_info.num_crtc = num_crtc;
1283 } else {
1284 adev->mode_info.num_crtc = 1;
1285 }
9accf2fd
ED
1286 break;
1287 }
1288 }
1289
0f66356d
ED
1290 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1291 amdgpu_virtual_display, pci_address_name,
1292 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1293
1294 kfree(pciaddstr);
1295 }
1296}
1297
e3ecdffa
AD
1298/**
1299 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1300 *
1301 * @adev: amdgpu_device pointer
1302 *
1303 * Parses the asic configuration parameters specified in the gpu info
1304 * firmware and makes them availale to the driver for use in configuring
1305 * the asic.
1306 * Returns 0 on success, -EINVAL on failure.
1307 */
e2a75f88
AD
1308static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1309{
e2a75f88
AD
1310 const char *chip_name;
1311 char fw_name[30];
1312 int err;
1313 const struct gpu_info_firmware_header_v1_0 *hdr;
1314
ab4fe3e1
HR
1315 adev->firmware.gpu_info_fw = NULL;
1316
e2a75f88
AD
1317 switch (adev->asic_type) {
1318 case CHIP_TOPAZ:
1319 case CHIP_TONGA:
1320 case CHIP_FIJI:
e2a75f88 1321 case CHIP_POLARIS10:
cc07f18d 1322 case CHIP_POLARIS11:
e2a75f88 1323 case CHIP_POLARIS12:
cc07f18d 1324 case CHIP_VEGAM:
e2a75f88
AD
1325 case CHIP_CARRIZO:
1326 case CHIP_STONEY:
1327#ifdef CONFIG_DRM_AMDGPU_SI
1328 case CHIP_VERDE:
1329 case CHIP_TAHITI:
1330 case CHIP_PITCAIRN:
1331 case CHIP_OLAND:
1332 case CHIP_HAINAN:
1333#endif
1334#ifdef CONFIG_DRM_AMDGPU_CIK
1335 case CHIP_BONAIRE:
1336 case CHIP_HAWAII:
1337 case CHIP_KAVERI:
1338 case CHIP_KABINI:
1339 case CHIP_MULLINS:
1340#endif
27c0bc71 1341 case CHIP_VEGA20:
e2a75f88
AD
1342 default:
1343 return 0;
1344 case CHIP_VEGA10:
1345 chip_name = "vega10";
1346 break;
3f76dced
AD
1347 case CHIP_VEGA12:
1348 chip_name = "vega12";
1349 break;
2d2e5e7e 1350 case CHIP_RAVEN:
54c4d17e
FX
1351 if (adev->rev_id >= 8)
1352 chip_name = "raven2";
741deade
AD
1353 else if (adev->pdev->device == 0x15d8)
1354 chip_name = "picasso";
54c4d17e
FX
1355 else
1356 chip_name = "raven";
2d2e5e7e 1357 break;
e2a75f88
AD
1358 }
1359
1360 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1361 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1362 if (err) {
1363 dev_err(adev->dev,
1364 "Failed to load gpu_info firmware \"%s\"\n",
1365 fw_name);
1366 goto out;
1367 }
ab4fe3e1 1368 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1369 if (err) {
1370 dev_err(adev->dev,
1371 "Failed to validate gpu_info firmware \"%s\"\n",
1372 fw_name);
1373 goto out;
1374 }
1375
ab4fe3e1 1376 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1377 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1378
1379 switch (hdr->version_major) {
1380 case 1:
1381 {
1382 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1383 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1384 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1385
b5ab16bf
AD
1386 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1387 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1388 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1389 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1390 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1391 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1392 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1393 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1394 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1395 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1396 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1397 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1398 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1399 adev->gfx.cu_info.max_waves_per_simd =
1400 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1401 adev->gfx.cu_info.max_scratch_slots_per_cu =
1402 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1403 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1404 break;
1405 }
1406 default:
1407 dev_err(adev->dev,
1408 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1409 err = -EINVAL;
1410 goto out;
1411 }
1412out:
e2a75f88
AD
1413 return err;
1414}
1415
e3ecdffa
AD
1416/**
1417 * amdgpu_device_ip_early_init - run early init for hardware IPs
1418 *
1419 * @adev: amdgpu_device pointer
1420 *
1421 * Early initialization pass for hardware IPs. The hardware IPs that make
1422 * up each asic are discovered each IP's early_init callback is run. This
1423 * is the first stage in initializing the asic.
1424 * Returns 0 on success, negative error code on failure.
1425 */
06ec9070 1426static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
d38ceaf9 1427{
aaa36a97 1428 int i, r;
d38ceaf9 1429
483ef985 1430 amdgpu_device_enable_virtual_display(adev);
a6be7570 1431
d38ceaf9 1432 switch (adev->asic_type) {
aaa36a97
AD
1433 case CHIP_TOPAZ:
1434 case CHIP_TONGA:
48299f95 1435 case CHIP_FIJI:
2cc0c0b5 1436 case CHIP_POLARIS10:
32cc7e53 1437 case CHIP_POLARIS11:
c4642a47 1438 case CHIP_POLARIS12:
32cc7e53 1439 case CHIP_VEGAM:
aaa36a97 1440 case CHIP_CARRIZO:
39bb0c92
SL
1441 case CHIP_STONEY:
1442 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1443 adev->family = AMDGPU_FAMILY_CZ;
1444 else
1445 adev->family = AMDGPU_FAMILY_VI;
1446
1447 r = vi_set_ip_blocks(adev);
1448 if (r)
1449 return r;
1450 break;
33f34802
KW
1451#ifdef CONFIG_DRM_AMDGPU_SI
1452 case CHIP_VERDE:
1453 case CHIP_TAHITI:
1454 case CHIP_PITCAIRN:
1455 case CHIP_OLAND:
1456 case CHIP_HAINAN:
295d0daf 1457 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1458 r = si_set_ip_blocks(adev);
1459 if (r)
1460 return r;
1461 break;
1462#endif
a2e73f56
AD
1463#ifdef CONFIG_DRM_AMDGPU_CIK
1464 case CHIP_BONAIRE:
1465 case CHIP_HAWAII:
1466 case CHIP_KAVERI:
1467 case CHIP_KABINI:
1468 case CHIP_MULLINS:
1469 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1470 adev->family = AMDGPU_FAMILY_CI;
1471 else
1472 adev->family = AMDGPU_FAMILY_KV;
1473
1474 r = cik_set_ip_blocks(adev);
1475 if (r)
1476 return r;
1477 break;
1478#endif
e48a3cd9
AD
1479 case CHIP_VEGA10:
1480 case CHIP_VEGA12:
e4bd8170 1481 case CHIP_VEGA20:
e48a3cd9 1482 case CHIP_RAVEN:
741deade 1483 if (adev->asic_type == CHIP_RAVEN)
2ca8a5d2
CZ
1484 adev->family = AMDGPU_FAMILY_RV;
1485 else
1486 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1487
1488 r = soc15_set_ip_blocks(adev);
1489 if (r)
1490 return r;
1491 break;
d38ceaf9
AD
1492 default:
1493 /* FIXME: not supported yet */
1494 return -EINVAL;
1495 }
1496
e2a75f88
AD
1497 r = amdgpu_device_parse_gpu_info_fw(adev);
1498 if (r)
1499 return r;
1500
1884734a 1501 amdgpu_amdkfd_device_probe(adev);
1502
3149d9da
XY
1503 if (amdgpu_sriov_vf(adev)) {
1504 r = amdgpu_virt_request_full_gpu(adev, true);
1505 if (r)
5ffa61c1 1506 return -EAGAIN;
3149d9da
XY
1507 }
1508
3b94fb10 1509 adev->pm.pp_feature = amdgpu_pp_feature_mask;
00f54b97 1510
d38ceaf9
AD
1511 for (i = 0; i < adev->num_ip_blocks; i++) {
1512 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1513 DRM_ERROR("disabled ip block: %d <%s>\n",
1514 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1515 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1516 } else {
a1255107
AD
1517 if (adev->ip_blocks[i].version->funcs->early_init) {
1518 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1519 if (r == -ENOENT) {
a1255107 1520 adev->ip_blocks[i].status.valid = false;
2c1a2784 1521 } else if (r) {
a1255107
AD
1522 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1523 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1524 return r;
2c1a2784 1525 } else {
a1255107 1526 adev->ip_blocks[i].status.valid = true;
2c1a2784 1527 }
974e6b64 1528 } else {
a1255107 1529 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1530 }
d38ceaf9
AD
1531 }
1532 }
1533
395d1fb9
NH
1534 adev->cg_flags &= amdgpu_cg_mask;
1535 adev->pg_flags &= amdgpu_pg_mask;
1536
d38ceaf9
AD
1537 return 0;
1538}
1539
0a4f2520
RZ
1540static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
1541{
1542 int i, r;
1543
1544 for (i = 0; i < adev->num_ip_blocks; i++) {
1545 if (!adev->ip_blocks[i].status.sw)
1546 continue;
1547 if (adev->ip_blocks[i].status.hw)
1548 continue;
1549 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1550 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
1551 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1552 if (r) {
1553 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1554 adev->ip_blocks[i].version->funcs->name, r);
1555 return r;
1556 }
1557 adev->ip_blocks[i].status.hw = true;
1558 }
1559 }
1560
1561 return 0;
1562}
1563
1564static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
1565{
1566 int i, r;
1567
1568 for (i = 0; i < adev->num_ip_blocks; i++) {
1569 if (!adev->ip_blocks[i].status.sw)
1570 continue;
1571 if (adev->ip_blocks[i].status.hw)
1572 continue;
1573 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1574 if (r) {
1575 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1576 adev->ip_blocks[i].version->funcs->name, r);
1577 return r;
1578 }
1579 adev->ip_blocks[i].status.hw = true;
1580 }
1581
1582 return 0;
1583}
1584
7a3e0bb2
RZ
1585static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1586{
1587 int r = 0;
1588 int i;
1589
1590 if (adev->asic_type >= CHIP_VEGA10) {
1591 for (i = 0; i < adev->num_ip_blocks; i++) {
1592 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
1593 if (adev->in_gpu_reset || adev->in_suspend) {
1594 if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset)
1595 break; /* sriov gpu reset, psp need to do hw_init before IH because of hw limit */
1596 r = adev->ip_blocks[i].version->funcs->resume(adev);
1597 if (r) {
1598 DRM_ERROR("resume of IP block <%s> failed %d\n",
1599 adev->ip_blocks[i].version->funcs->name, r);
1600 return r;
1601 }
1602 } else {
1603 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1604 if (r) {
1605 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1606 adev->ip_blocks[i].version->funcs->name, r);
1607 return r;
1608 }
1609 }
1610 adev->ip_blocks[i].status.hw = true;
1611 }
1612 }
1613 }
1614
91eec27e 1615 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
7a3e0bb2
RZ
1616 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
1617 if (r) {
1618 pr_err("firmware loading failed\n");
1619 return r;
1620 }
1621 }
1622
1623 return 0;
1624}
1625
e3ecdffa
AD
1626/**
1627 * amdgpu_device_ip_init - run init for hardware IPs
1628 *
1629 * @adev: amdgpu_device pointer
1630 *
1631 * Main initialization pass for hardware IPs. The list of all the hardware
1632 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1633 * are run. sw_init initializes the software state associated with each IP
1634 * and hw_init initializes the hardware associated with each IP.
1635 * Returns 0 on success, negative error code on failure.
1636 */
06ec9070 1637static int amdgpu_device_ip_init(struct amdgpu_device *adev)
d38ceaf9
AD
1638{
1639 int i, r;
1640
1641 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1642 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1643 continue;
a1255107 1644 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1645 if (r) {
a1255107
AD
1646 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1647 adev->ip_blocks[i].version->funcs->name, r);
72d3f592 1648 goto init_failed;
2c1a2784 1649 }
a1255107 1650 adev->ip_blocks[i].status.sw = true;
bfca0289 1651
d38ceaf9 1652 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1653 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
06ec9070 1654 r = amdgpu_device_vram_scratch_init(adev);
2c1a2784
AD
1655 if (r) {
1656 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
72d3f592 1657 goto init_failed;
2c1a2784 1658 }
a1255107 1659 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1660 if (r) {
1661 DRM_ERROR("hw_init %d failed %d\n", i, r);
72d3f592 1662 goto init_failed;
2c1a2784 1663 }
06ec9070 1664 r = amdgpu_device_wb_init(adev);
2c1a2784 1665 if (r) {
06ec9070 1666 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
72d3f592 1667 goto init_failed;
2c1a2784 1668 }
a1255107 1669 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1670
1671 /* right after GMC hw init, we create CSA */
1672 if (amdgpu_sriov_vf(adev)) {
1e256e27
RZ
1673 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
1674 AMDGPU_GEM_DOMAIN_VRAM,
1675 AMDGPU_CSA_SIZE);
2493664f
ML
1676 if (r) {
1677 DRM_ERROR("allocate CSA failed %d\n", r);
72d3f592 1678 goto init_failed;
2493664f
ML
1679 }
1680 }
d38ceaf9
AD
1681 }
1682 }
1683
c8963ea4
RZ
1684 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
1685 if (r)
72d3f592 1686 goto init_failed;
0a4f2520
RZ
1687
1688 r = amdgpu_device_ip_hw_init_phase1(adev);
1689 if (r)
72d3f592 1690 goto init_failed;
0a4f2520 1691
7a3e0bb2
RZ
1692 r = amdgpu_device_fw_loading(adev);
1693 if (r)
72d3f592 1694 goto init_failed;
7a3e0bb2 1695
0a4f2520
RZ
1696 r = amdgpu_device_ip_hw_init_phase2(adev);
1697 if (r)
72d3f592 1698 goto init_failed;
d38ceaf9 1699
3e2e2ab5
HZ
1700 if (adev->gmc.xgmi.num_physical_nodes > 1)
1701 amdgpu_xgmi_add_device(adev);
1884734a 1702 amdgpu_amdkfd_device_init(adev);
c6332b97 1703
72d3f592 1704init_failed:
d3c117e5 1705 if (amdgpu_sriov_vf(adev)) {
72d3f592
ED
1706 if (!r)
1707 amdgpu_virt_init_data_exchange(adev);
c6332b97 1708 amdgpu_virt_release_full_gpu(adev, true);
d3c117e5 1709 }
c6332b97 1710
72d3f592 1711 return r;
d38ceaf9
AD
1712}
1713
e3ecdffa
AD
1714/**
1715 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1716 *
1717 * @adev: amdgpu_device pointer
1718 *
1719 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
1720 * this function before a GPU reset. If the value is retained after a
1721 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
1722 */
06ec9070 1723static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
0c49e0b8
CZ
1724{
1725 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1726}
1727
e3ecdffa
AD
1728/**
1729 * amdgpu_device_check_vram_lost - check if vram is valid
1730 *
1731 * @adev: amdgpu_device pointer
1732 *
1733 * Checks the reset magic value written to the gart pointer in VRAM.
1734 * The driver calls this after a GPU reset to see if the contents of
1735 * VRAM is lost or now.
1736 * returns true if vram is lost, false if not.
1737 */
06ec9070 1738static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
0c49e0b8
CZ
1739{
1740 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1741 AMDGPU_RESET_MAGIC_NUM);
1742}
1743
e3ecdffa 1744/**
1112a46b 1745 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
e3ecdffa
AD
1746 *
1747 * @adev: amdgpu_device pointer
1748 *
e3ecdffa 1749 * The list of all the hardware IPs that make up the asic is walked and the
1112a46b
RZ
1750 * set_clockgating_state callbacks are run.
1751 * Late initialization pass enabling clockgating for hardware IPs.
1752 * Fini or suspend, pass disabling clockgating for hardware IPs.
e3ecdffa
AD
1753 * Returns 0 on success, negative error code on failure.
1754 */
fdd34271 1755
1112a46b
RZ
1756static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
1757 enum amd_clockgating_state state)
d38ceaf9 1758{
1112a46b 1759 int i, j, r;
d38ceaf9 1760
4a2ba394
SL
1761 if (amdgpu_emu_mode == 1)
1762 return 0;
1763
1112a46b
RZ
1764 for (j = 0; j < adev->num_ip_blocks; j++) {
1765 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
a2d31dc3 1766 if (!adev->ip_blocks[i].status.late_initialized)
d38ceaf9 1767 continue;
4a446d55 1768 /* skip CG for VCE/UVD, it's handled specially */
a1255107 1769 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
57716327 1770 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
34319b32 1771 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
57716327 1772 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
4a446d55 1773 /* enable clockgating to save power */
a1255107 1774 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1112a46b 1775 state);
4a446d55
AD
1776 if (r) {
1777 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1778 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1779 return r;
1780 }
b0b00ff1 1781 }
d38ceaf9 1782 }
06b18f61 1783
c9f96fd5
RZ
1784 return 0;
1785}
1786
1112a46b 1787static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
c9f96fd5 1788{
1112a46b 1789 int i, j, r;
06b18f61 1790
c9f96fd5
RZ
1791 if (amdgpu_emu_mode == 1)
1792 return 0;
1793
1112a46b
RZ
1794 for (j = 0; j < adev->num_ip_blocks; j++) {
1795 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
a2d31dc3 1796 if (!adev->ip_blocks[i].status.late_initialized)
c9f96fd5
RZ
1797 continue;
1798 /* skip CG for VCE/UVD, it's handled specially */
1799 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1800 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1801 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
1802 adev->ip_blocks[i].version->funcs->set_powergating_state) {
1803 /* enable powergating to save power */
1804 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1112a46b 1805 state);
c9f96fd5
RZ
1806 if (r) {
1807 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
1808 adev->ip_blocks[i].version->funcs->name, r);
1809 return r;
1810 }
1811 }
1812 }
2dc80b00
S
1813 return 0;
1814}
1815
e3ecdffa
AD
1816/**
1817 * amdgpu_device_ip_late_init - run late init for hardware IPs
1818 *
1819 * @adev: amdgpu_device pointer
1820 *
1821 * Late initialization pass for hardware IPs. The list of all the hardware
1822 * IPs that make up the asic is walked and the late_init callbacks are run.
1823 * late_init covers any special initialization that an IP requires
1824 * after all of the have been initialized or something that needs to happen
1825 * late in the init process.
1826 * Returns 0 on success, negative error code on failure.
1827 */
06ec9070 1828static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2dc80b00
S
1829{
1830 int i = 0, r;
1831
1832 for (i = 0; i < adev->num_ip_blocks; i++) {
73f847db 1833 if (!adev->ip_blocks[i].status.hw)
2dc80b00
S
1834 continue;
1835 if (adev->ip_blocks[i].version->funcs->late_init) {
1836 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1837 if (r) {
1838 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1839 adev->ip_blocks[i].version->funcs->name, r);
1840 return r;
1841 }
2dc80b00 1842 }
73f847db 1843 adev->ip_blocks[i].status.late_initialized = true;
2dc80b00
S
1844 }
1845
1112a46b
RZ
1846 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
1847 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
916ac57f 1848
2c773de2
S
1849 queue_delayed_work(system_wq, &adev->late_init_work,
1850 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1851
06ec9070 1852 amdgpu_device_fill_reset_magic(adev);
d38ceaf9
AD
1853
1854 return 0;
1855}
1856
e3ecdffa
AD
1857/**
1858 * amdgpu_device_ip_fini - run fini for hardware IPs
1859 *
1860 * @adev: amdgpu_device pointer
1861 *
1862 * Main teardown pass for hardware IPs. The list of all the hardware
1863 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
1864 * are run. hw_fini tears down the hardware associated with each IP
1865 * and sw_fini tears down any software state associated with each IP.
1866 * Returns 0 on success, negative error code on failure.
1867 */
06ec9070 1868static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
d38ceaf9
AD
1869{
1870 int i, r;
1871
a82400b5
AG
1872 if (adev->gmc.xgmi.num_physical_nodes > 1)
1873 amdgpu_xgmi_remove_device(adev);
1874
1884734a 1875 amdgpu_amdkfd_device_fini(adev);
05df1f01
RZ
1876
1877 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
fdd34271
RZ
1878 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
1879
3e96dbfd
AD
1880 /* need to disable SMC first */
1881 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1882 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1883 continue;
fdd34271 1884 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
a1255107 1885 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1886 /* XXX handle errors */
1887 if (r) {
1888 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1889 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1890 }
a1255107 1891 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1892 break;
1893 }
1894 }
1895
d38ceaf9 1896 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1897 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1898 continue;
8201a67a 1899
a1255107 1900 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1901 /* XXX handle errors */
2c1a2784 1902 if (r) {
a1255107
AD
1903 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1904 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1905 }
8201a67a 1906
a1255107 1907 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1908 }
1909
9950cda2 1910
d38ceaf9 1911 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1912 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1913 continue;
c12aba3a
ML
1914
1915 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
c8963ea4 1916 amdgpu_ucode_free_bo(adev);
1e256e27 1917 amdgpu_free_static_csa(&adev->virt.csa_obj);
c12aba3a
ML
1918 amdgpu_device_wb_fini(adev);
1919 amdgpu_device_vram_scratch_fini(adev);
1920 }
1921
a1255107 1922 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1923 /* XXX handle errors */
2c1a2784 1924 if (r) {
a1255107
AD
1925 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1926 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1927 }
a1255107
AD
1928 adev->ip_blocks[i].status.sw = false;
1929 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1930 }
1931
a6dcfd9c 1932 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1933 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1934 continue;
a1255107
AD
1935 if (adev->ip_blocks[i].version->funcs->late_fini)
1936 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1937 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1938 }
1939
030308fc 1940 if (amdgpu_sriov_vf(adev))
24136135
ML
1941 if (amdgpu_virt_release_full_gpu(adev, false))
1942 DRM_ERROR("failed to release exclusive mode on fini\n");
2493664f 1943
d38ceaf9
AD
1944 return 0;
1945}
1946
b55c9e7a
EQ
1947static int amdgpu_device_enable_mgpu_fan_boost(void)
1948{
1949 struct amdgpu_gpu_instance *gpu_ins;
1950 struct amdgpu_device *adev;
1951 int i, ret = 0;
1952
1953 mutex_lock(&mgpu_info.mutex);
1954
1955 /*
1956 * MGPU fan boost feature should be enabled
1957 * only when there are two or more dGPUs in
1958 * the system
1959 */
1960 if (mgpu_info.num_dgpu < 2)
1961 goto out;
1962
1963 for (i = 0; i < mgpu_info.num_dgpu; i++) {
1964 gpu_ins = &(mgpu_info.gpu_ins[i]);
1965 adev = gpu_ins->adev;
1966 if (!(adev->flags & AMD_IS_APU) &&
1967 !gpu_ins->mgpu_fan_enabled &&
1968 adev->powerplay.pp_funcs &&
1969 adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
1970 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
1971 if (ret)
1972 break;
1973
1974 gpu_ins->mgpu_fan_enabled = 1;
1975 }
1976 }
1977
1978out:
1979 mutex_unlock(&mgpu_info.mutex);
1980
1981 return ret;
1982}
1983
e3ecdffa 1984/**
1112a46b 1985 * amdgpu_device_ip_late_init_func_handler - work handler for ib test
e3ecdffa 1986 *
1112a46b 1987 * @work: work_struct.
e3ecdffa 1988 */
06ec9070 1989static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
2dc80b00
S
1990{
1991 struct amdgpu_device *adev =
1992 container_of(work, struct amdgpu_device, late_init_work.work);
916ac57f
RZ
1993 int r;
1994
1995 r = amdgpu_ib_ring_tests(adev);
1996 if (r)
1997 DRM_ERROR("ib ring test failed (%d).\n", r);
b55c9e7a
EQ
1998
1999 r = amdgpu_device_enable_mgpu_fan_boost();
2000 if (r)
2001 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
9b638f97 2002
2003 /*set to low pstate by default */
2004 amdgpu_xgmi_set_pstate(adev, 0);
2dc80b00
S
2005}
2006
1e317b99
RZ
2007static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2008{
2009 struct amdgpu_device *adev =
2010 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2011
2012 mutex_lock(&adev->gfx.gfx_off_mutex);
2013 if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2014 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2015 adev->gfx.gfx_off_state = true;
2016 }
2017 mutex_unlock(&adev->gfx.gfx_off_mutex);
2018}
2019
e3ecdffa 2020/**
e7854a03 2021 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
e3ecdffa
AD
2022 *
2023 * @adev: amdgpu_device pointer
2024 *
2025 * Main suspend function for hardware IPs. The list of all the hardware
2026 * IPs that make up the asic is walked, clockgating is disabled and the
2027 * suspend callbacks are run. suspend puts the hardware and software state
2028 * in each IP into a state suitable for suspend.
2029 * Returns 0 on success, negative error code on failure.
2030 */
e7854a03
AD
2031static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2032{
2033 int i, r;
2034
05df1f01 2035 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
fdd34271 2036 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
05df1f01 2037
e7854a03
AD
2038 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2039 if (!adev->ip_blocks[i].status.valid)
2040 continue;
2041 /* displays are handled separately */
2042 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
e7854a03
AD
2043 /* XXX handle errors */
2044 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2045 /* XXX handle errors */
2046 if (r) {
2047 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2048 adev->ip_blocks[i].version->funcs->name, r);
2049 }
2050 }
2051 }
2052
e7854a03
AD
2053 return 0;
2054}
2055
2056/**
2057 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2058 *
2059 * @adev: amdgpu_device pointer
2060 *
2061 * Main suspend function for hardware IPs. The list of all the hardware
2062 * IPs that make up the asic is walked, clockgating is disabled and the
2063 * suspend callbacks are run. suspend puts the hardware and software state
2064 * in each IP into a state suitable for suspend.
2065 * Returns 0 on success, negative error code on failure.
2066 */
2067static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2068{
2069 int i, r;
2070
2071 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2072 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2073 continue;
e7854a03
AD
2074 /* displays are handled in phase1 */
2075 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2076 continue;
d38ceaf9 2077 /* XXX handle errors */
a1255107 2078 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 2079 /* XXX handle errors */
2c1a2784 2080 if (r) {
a1255107
AD
2081 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2082 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 2083 }
d38ceaf9
AD
2084 }
2085
2086 return 0;
2087}
2088
e7854a03
AD
2089/**
2090 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2091 *
2092 * @adev: amdgpu_device pointer
2093 *
2094 * Main suspend function for hardware IPs. The list of all the hardware
2095 * IPs that make up the asic is walked, clockgating is disabled and the
2096 * suspend callbacks are run. suspend puts the hardware and software state
2097 * in each IP into a state suitable for suspend.
2098 * Returns 0 on success, negative error code on failure.
2099 */
2100int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2101{
2102 int r;
2103
e7819644
YT
2104 if (amdgpu_sriov_vf(adev))
2105 amdgpu_virt_request_full_gpu(adev, false);
2106
e7854a03
AD
2107 r = amdgpu_device_ip_suspend_phase1(adev);
2108 if (r)
2109 return r;
2110 r = amdgpu_device_ip_suspend_phase2(adev);
2111
e7819644
YT
2112 if (amdgpu_sriov_vf(adev))
2113 amdgpu_virt_release_full_gpu(adev, false);
2114
e7854a03
AD
2115 return r;
2116}
2117
06ec9070 2118static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
2119{
2120 int i, r;
2121
2cb681b6
ML
2122 static enum amd_ip_block_type ip_order[] = {
2123 AMD_IP_BLOCK_TYPE_GMC,
2124 AMD_IP_BLOCK_TYPE_COMMON,
39186aef 2125 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
2126 AMD_IP_BLOCK_TYPE_IH,
2127 };
a90ad3c2 2128
2cb681b6
ML
2129 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2130 int j;
2131 struct amdgpu_ip_block *block;
a90ad3c2 2132
2cb681b6
ML
2133 for (j = 0; j < adev->num_ip_blocks; j++) {
2134 block = &adev->ip_blocks[j];
2135
2136 if (block->version->type != ip_order[i] ||
2137 !block->status.valid)
2138 continue;
2139
2140 r = block->version->funcs->hw_init(adev);
0aaeefcc 2141 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
c41d1cf6
ML
2142 if (r)
2143 return r;
a90ad3c2
ML
2144 }
2145 }
2146
2147 return 0;
2148}
2149
06ec9070 2150static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
2151{
2152 int i, r;
2153
2cb681b6
ML
2154 static enum amd_ip_block_type ip_order[] = {
2155 AMD_IP_BLOCK_TYPE_SMC,
2156 AMD_IP_BLOCK_TYPE_DCE,
2157 AMD_IP_BLOCK_TYPE_GFX,
2158 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
2159 AMD_IP_BLOCK_TYPE_UVD,
2160 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 2161 };
a90ad3c2 2162
2cb681b6
ML
2163 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2164 int j;
2165 struct amdgpu_ip_block *block;
a90ad3c2 2166
2cb681b6
ML
2167 for (j = 0; j < adev->num_ip_blocks; j++) {
2168 block = &adev->ip_blocks[j];
2169
2170 if (block->version->type != ip_order[i] ||
2171 !block->status.valid)
2172 continue;
2173
2174 r = block->version->funcs->hw_init(adev);
0aaeefcc 2175 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
c41d1cf6
ML
2176 if (r)
2177 return r;
a90ad3c2
ML
2178 }
2179 }
2180
2181 return 0;
2182}
2183
e3ecdffa
AD
2184/**
2185 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2186 *
2187 * @adev: amdgpu_device pointer
2188 *
2189 * First resume function for hardware IPs. The list of all the hardware
2190 * IPs that make up the asic is walked and the resume callbacks are run for
2191 * COMMON, GMC, and IH. resume puts the hardware into a functional state
2192 * after a suspend and updates the software state as necessary. This
2193 * function is also used for restoring the GPU after a GPU reset.
2194 * Returns 0 on success, negative error code on failure.
2195 */
06ec9070 2196static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
2197{
2198 int i, r;
2199
a90ad3c2
ML
2200 for (i = 0; i < adev->num_ip_blocks; i++) {
2201 if (!adev->ip_blocks[i].status.valid)
2202 continue;
a90ad3c2 2203 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa
AD
2204 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2205 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
fcf0649f
CZ
2206 r = adev->ip_blocks[i].version->funcs->resume(adev);
2207 if (r) {
2208 DRM_ERROR("resume of IP block <%s> failed %d\n",
2209 adev->ip_blocks[i].version->funcs->name, r);
2210 return r;
2211 }
a90ad3c2
ML
2212 }
2213 }
2214
2215 return 0;
2216}
2217
e3ecdffa
AD
2218/**
2219 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2220 *
2221 * @adev: amdgpu_device pointer
2222 *
2223 * First resume function for hardware IPs. The list of all the hardware
2224 * IPs that make up the asic is walked and the resume callbacks are run for
2225 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
2226 * functional state after a suspend and updates the software state as
2227 * necessary. This function is also used for restoring the GPU after a GPU
2228 * reset.
2229 * Returns 0 on success, negative error code on failure.
2230 */
06ec9070 2231static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2232{
2233 int i, r;
2234
2235 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2236 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2237 continue;
fcf0649f 2238 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa 2239 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
7a3e0bb2
RZ
2240 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2241 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
fcf0649f 2242 continue;
a1255107 2243 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 2244 if (r) {
a1255107
AD
2245 DRM_ERROR("resume of IP block <%s> failed %d\n",
2246 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 2247 return r;
2c1a2784 2248 }
d38ceaf9
AD
2249 }
2250
2251 return 0;
2252}
2253
e3ecdffa
AD
2254/**
2255 * amdgpu_device_ip_resume - run resume for hardware IPs
2256 *
2257 * @adev: amdgpu_device pointer
2258 *
2259 * Main resume function for hardware IPs. The hardware IPs
2260 * are split into two resume functions because they are
2261 * are also used in in recovering from a GPU reset and some additional
2262 * steps need to be take between them. In this case (S3/S4) they are
2263 * run sequentially.
2264 * Returns 0 on success, negative error code on failure.
2265 */
06ec9070 2266static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
fcf0649f
CZ
2267{
2268 int r;
2269
06ec9070 2270 r = amdgpu_device_ip_resume_phase1(adev);
fcf0649f
CZ
2271 if (r)
2272 return r;
7a3e0bb2
RZ
2273
2274 r = amdgpu_device_fw_loading(adev);
2275 if (r)
2276 return r;
2277
06ec9070 2278 r = amdgpu_device_ip_resume_phase2(adev);
fcf0649f
CZ
2279
2280 return r;
2281}
2282
e3ecdffa
AD
2283/**
2284 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2285 *
2286 * @adev: amdgpu_device pointer
2287 *
2288 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2289 */
4e99a44e 2290static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 2291{
6867e1b5
ML
2292 if (amdgpu_sriov_vf(adev)) {
2293 if (adev->is_atom_fw) {
2294 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2295 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2296 } else {
2297 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2298 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2299 }
2300
2301 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2302 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 2303 }
048765ad
AR
2304}
2305
e3ecdffa
AD
2306/**
2307 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2308 *
2309 * @asic_type: AMD asic type
2310 *
2311 * Check if there is DC (new modesetting infrastructre) support for an asic.
2312 * returns true if DC has support, false if not.
2313 */
4562236b
HW
2314bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2315{
2316 switch (asic_type) {
2317#if defined(CONFIG_DRM_AMD_DC)
2318 case CHIP_BONAIRE:
0d6fbccb 2319 case CHIP_KAVERI:
367e6687
AD
2320 case CHIP_KABINI:
2321 case CHIP_MULLINS:
d9fda248
HW
2322 /*
2323 * We have systems in the wild with these ASICs that require
2324 * LVDS and VGA support which is not supported with DC.
2325 *
2326 * Fallback to the non-DC driver here by default so as not to
2327 * cause regressions.
2328 */
2329 return amdgpu_dc > 0;
2330 case CHIP_HAWAII:
4562236b
HW
2331 case CHIP_CARRIZO:
2332 case CHIP_STONEY:
4562236b 2333 case CHIP_POLARIS10:
675fd32b 2334 case CHIP_POLARIS11:
2c8ad2d5 2335 case CHIP_POLARIS12:
675fd32b 2336 case CHIP_VEGAM:
4562236b
HW
2337 case CHIP_TONGA:
2338 case CHIP_FIJI:
42f8ffa1 2339 case CHIP_VEGA10:
dca7b401 2340 case CHIP_VEGA12:
c6034aa2 2341 case CHIP_VEGA20:
dc37a9a0 2342#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 2343 case CHIP_RAVEN:
42f8ffa1 2344#endif
fd187853 2345 return amdgpu_dc != 0;
4562236b
HW
2346#endif
2347 default:
2348 return false;
2349 }
2350}
2351
2352/**
2353 * amdgpu_device_has_dc_support - check if dc is supported
2354 *
2355 * @adev: amdgpu_device_pointer
2356 *
2357 * Returns true for supported, false for not supported
2358 */
2359bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2360{
2555039d
XY
2361 if (amdgpu_sriov_vf(adev))
2362 return false;
2363
4562236b
HW
2364 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2365}
2366
d4535e2c
AG
2367
2368static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
2369{
2370 struct amdgpu_device *adev =
2371 container_of(__work, struct amdgpu_device, xgmi_reset_work);
2372
2373 adev->asic_reset_res = amdgpu_asic_reset(adev);
2374 if (adev->asic_reset_res)
2375 DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
2376 adev->asic_reset_res, adev->ddev->unique);
2377}
2378
2379
d38ceaf9
AD
2380/**
2381 * amdgpu_device_init - initialize the driver
2382 *
2383 * @adev: amdgpu_device pointer
87e3f136 2384 * @ddev: drm dev pointer
d38ceaf9
AD
2385 * @pdev: pci dev pointer
2386 * @flags: driver flags
2387 *
2388 * Initializes the driver info and hw (all asics).
2389 * Returns 0 for success or an error on failure.
2390 * Called at driver startup.
2391 */
2392int amdgpu_device_init(struct amdgpu_device *adev,
2393 struct drm_device *ddev,
2394 struct pci_dev *pdev,
2395 uint32_t flags)
2396{
2397 int r, i;
2398 bool runtime = false;
95844d20 2399 u32 max_MBps;
d38ceaf9
AD
2400
2401 adev->shutdown = false;
2402 adev->dev = &pdev->dev;
2403 adev->ddev = ddev;
2404 adev->pdev = pdev;
2405 adev->flags = flags;
2f7d10b3 2406 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 2407 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
593aa2d2
SL
2408 if (amdgpu_emu_mode == 1)
2409 adev->usec_timeout *= 2;
770d13b1 2410 adev->gmc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
2411 adev->accel_working = false;
2412 adev->num_rings = 0;
2413 adev->mman.buffer_funcs = NULL;
2414 adev->mman.buffer_funcs_ring = NULL;
2415 adev->vm_manager.vm_pte_funcs = NULL;
3798e9a6 2416 adev->vm_manager.vm_pte_num_rqs = 0;
132f34e4 2417 adev->gmc.gmc_funcs = NULL;
f54d1867 2418 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 2419 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
2420
2421 adev->smc_rreg = &amdgpu_invalid_rreg;
2422 adev->smc_wreg = &amdgpu_invalid_wreg;
2423 adev->pcie_rreg = &amdgpu_invalid_rreg;
2424 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
2425 adev->pciep_rreg = &amdgpu_invalid_rreg;
2426 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2427 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2428 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2429 adev->didt_rreg = &amdgpu_invalid_rreg;
2430 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
2431 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2432 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2433 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2434 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2435
3e39ab90
AD
2436 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2437 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2438 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
2439
2440 /* mutex initialization are all done here so we
2441 * can recall function without having locking issues */
d38ceaf9 2442 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 2443 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
2444 mutex_init(&adev->pm.mutex);
2445 mutex_init(&adev->gfx.gpu_clock_mutex);
2446 mutex_init(&adev->srbm_mutex);
b8866c26 2447 mutex_init(&adev->gfx.pipe_reserve_mutex);
d23ee13f 2448 mutex_init(&adev->gfx.gfx_off_mutex);
d38ceaf9 2449 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 2450 mutex_init(&adev->mn_lock);
e23b74aa 2451 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 2452 hash_init(adev->mn_hash);
13a752e3 2453 mutex_init(&adev->lock_reset);
d38ceaf9 2454
06ec9070 2455 amdgpu_device_check_arguments(adev);
d38ceaf9 2456
d38ceaf9
AD
2457 spin_lock_init(&adev->mmio_idx_lock);
2458 spin_lock_init(&adev->smc_idx_lock);
2459 spin_lock_init(&adev->pcie_idx_lock);
2460 spin_lock_init(&adev->uvd_ctx_idx_lock);
2461 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 2462 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 2463 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 2464 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 2465 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 2466
0c4e7fa5
CZ
2467 INIT_LIST_HEAD(&adev->shadow_list);
2468 mutex_init(&adev->shadow_list_lock);
2469
795f2813
AR
2470 INIT_LIST_HEAD(&adev->ring_lru_list);
2471 spin_lock_init(&adev->ring_lru_list_lock);
2472
06ec9070
AD
2473 INIT_DELAYED_WORK(&adev->late_init_work,
2474 amdgpu_device_ip_late_init_func_handler);
1e317b99
RZ
2475 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
2476 amdgpu_device_delay_enable_gfx_off);
2dc80b00 2477
d4535e2c
AG
2478 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
2479
d23ee13f 2480 adev->gfx.gfx_off_req_count = 1;
b1ddf548
RZ
2481 adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
2482
0fa49558
AX
2483 /* Registers mapping */
2484 /* TODO: block userspace mapping of io register */
da69c161
KW
2485 if (adev->asic_type >= CHIP_BONAIRE) {
2486 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2487 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2488 } else {
2489 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2490 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2491 }
d38ceaf9 2492
d38ceaf9
AD
2493 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2494 if (adev->rmmio == NULL) {
2495 return -ENOMEM;
2496 }
2497 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2498 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2499
d38ceaf9
AD
2500 /* io port mapping */
2501 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2502 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2503 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2504 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2505 break;
2506 }
2507 }
2508 if (adev->rio_mem == NULL)
b64a18c5 2509 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9 2510
5494d864
AD
2511 amdgpu_device_get_pcie_info(adev);
2512
d38ceaf9 2513 /* early init functions */
06ec9070 2514 r = amdgpu_device_ip_early_init(adev);
d38ceaf9
AD
2515 if (r)
2516 return r;
2517
6585661d
OZ
2518 /* doorbell bar mapping and doorbell index init*/
2519 amdgpu_device_doorbell_init(adev);
2520
d38ceaf9
AD
2521 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2522 /* this will fail for cards that aren't VGA class devices, just
2523 * ignore it */
06ec9070 2524 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
d38ceaf9 2525
e9bef455 2526 if (amdgpu_device_is_px(ddev))
d38ceaf9 2527 runtime = true;
84c8b22e
LW
2528 if (!pci_is_thunderbolt_attached(adev->pdev))
2529 vga_switcheroo_register_client(adev->pdev,
2530 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
2531 if (runtime)
2532 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2533
9475a943
SL
2534 if (amdgpu_emu_mode == 1) {
2535 /* post the asic on emulation mode */
2536 emu_soc_asic_init(adev);
bfca0289 2537 goto fence_driver_init;
9475a943 2538 }
bfca0289 2539
d38ceaf9 2540 /* Read BIOS */
83ba126a
AD
2541 if (!amdgpu_get_bios(adev)) {
2542 r = -EINVAL;
2543 goto failed;
2544 }
f7e9e9fe 2545
d38ceaf9 2546 r = amdgpu_atombios_init(adev);
2c1a2784
AD
2547 if (r) {
2548 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 2549 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 2550 goto failed;
2c1a2784 2551 }
d38ceaf9 2552
4e99a44e
ML
2553 /* detect if we are with an SRIOV vbios */
2554 amdgpu_device_detect_sriov_bios(adev);
048765ad 2555
95e8e59e
AD
2556 /* check if we need to reset the asic
2557 * E.g., driver was not cleanly unloaded previously, etc.
2558 */
f14899fd 2559 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
95e8e59e
AD
2560 r = amdgpu_asic_reset(adev);
2561 if (r) {
2562 dev_err(adev->dev, "asic reset on init failed\n");
2563 goto failed;
2564 }
2565 }
2566
d38ceaf9 2567 /* Post card if necessary */
39c640c0 2568 if (amdgpu_device_need_post(adev)) {
d38ceaf9 2569 if (!adev->bios) {
bec86378 2570 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
2571 r = -EINVAL;
2572 goto failed;
d38ceaf9 2573 }
bec86378 2574 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
2575 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2576 if (r) {
2577 dev_err(adev->dev, "gpu post error!\n");
2578 goto failed;
2579 }
d38ceaf9
AD
2580 }
2581
88b64e95
AD
2582 if (adev->is_atom_fw) {
2583 /* Initialize clocks */
2584 r = amdgpu_atomfirmware_get_clock_info(adev);
2585 if (r) {
2586 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 2587 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
2588 goto failed;
2589 }
2590 } else {
a5bde2f9
AD
2591 /* Initialize clocks */
2592 r = amdgpu_atombios_get_clock_info(adev);
2593 if (r) {
2594 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 2595 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 2596 goto failed;
a5bde2f9
AD
2597 }
2598 /* init i2c buses */
4562236b
HW
2599 if (!amdgpu_device_has_dc_support(adev))
2600 amdgpu_atombios_i2c_init(adev);
2c1a2784 2601 }
d38ceaf9 2602
bfca0289 2603fence_driver_init:
d38ceaf9
AD
2604 /* Fence driver */
2605 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
2606 if (r) {
2607 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 2608 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 2609 goto failed;
2c1a2784 2610 }
d38ceaf9
AD
2611
2612 /* init the mode config */
2613 drm_mode_config_init(adev->ddev);
2614
06ec9070 2615 r = amdgpu_device_ip_init(adev);
d38ceaf9 2616 if (r) {
8840a387 2617 /* failed in exclusive mode due to timeout */
2618 if (amdgpu_sriov_vf(adev) &&
2619 !amdgpu_sriov_runtime(adev) &&
2620 amdgpu_virt_mmio_blocked(adev) &&
2621 !amdgpu_virt_wait_reset(adev)) {
2622 dev_err(adev->dev, "VF exclusive mode timeout\n");
1daee8b4
PD
2623 /* Don't send request since VF is inactive. */
2624 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2625 adev->virt.ops = NULL;
8840a387 2626 r = -EAGAIN;
2627 goto failed;
2628 }
06ec9070 2629 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
e23b74aa 2630 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
72d3f592
ED
2631 if (amdgpu_virt_request_full_gpu(adev, false))
2632 amdgpu_virt_release_full_gpu(adev, false);
83ba126a 2633 goto failed;
d38ceaf9
AD
2634 }
2635
2636 adev->accel_working = true;
2637
e59c0205
AX
2638 amdgpu_vm_check_compute_bug(adev);
2639
95844d20
MO
2640 /* Initialize the buffer migration limit. */
2641 if (amdgpu_moverate >= 0)
2642 max_MBps = amdgpu_moverate;
2643 else
2644 max_MBps = 8; /* Allow 8 MB/s. */
2645 /* Get a log2 for easy divisions. */
2646 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2647
d38ceaf9
AD
2648 r = amdgpu_ib_pool_init(adev);
2649 if (r) {
2650 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 2651 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 2652 goto failed;
d38ceaf9
AD
2653 }
2654
9bc92b9c
ML
2655 amdgpu_fbdev_init(adev);
2656
d2f52ac8
RZ
2657 r = amdgpu_pm_sysfs_init(adev);
2658 if (r)
2659 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2660
75758255 2661 r = amdgpu_debugfs_gem_init(adev);
3f14e623 2662 if (r)
d38ceaf9 2663 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2664
2665 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2666 if (r)
d38ceaf9 2667 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2668
50ab2533 2669 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2670 if (r)
50ab2533 2671 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2672
763efb6c 2673 r = amdgpu_debugfs_init(adev);
db95e218 2674 if (r)
763efb6c 2675 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
db95e218 2676
d38ceaf9
AD
2677 if ((amdgpu_testing & 1)) {
2678 if (adev->accel_working)
2679 amdgpu_test_moves(adev);
2680 else
2681 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2682 }
d38ceaf9
AD
2683 if (amdgpu_benchmarking) {
2684 if (adev->accel_working)
2685 amdgpu_benchmark(adev, amdgpu_benchmarking);
2686 else
2687 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2688 }
2689
2690 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2691 * explicit gating rather than handling it automatically.
2692 */
06ec9070 2693 r = amdgpu_device_ip_late_init(adev);
2c1a2784 2694 if (r) {
06ec9070 2695 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
e23b74aa 2696 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2697 goto failed;
2c1a2784 2698 }
d38ceaf9
AD
2699
2700 return 0;
83ba126a
AD
2701
2702failed:
89041940 2703 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2704 if (runtime)
2705 vga_switcheroo_fini_domain_pm_ops(adev->dev);
8840a387 2706
83ba126a 2707 return r;
d38ceaf9
AD
2708}
2709
d38ceaf9
AD
2710/**
2711 * amdgpu_device_fini - tear down the driver
2712 *
2713 * @adev: amdgpu_device pointer
2714 *
2715 * Tear down the driver info (all asics).
2716 * Called at driver shutdown.
2717 */
2718void amdgpu_device_fini(struct amdgpu_device *adev)
2719{
2720 int r;
2721
2722 DRM_INFO("amdgpu: finishing device.\n");
2723 adev->shutdown = true;
e5b03032
ML
2724 /* disable all interrupts */
2725 amdgpu_irq_disable_all(adev);
ff97cba8
ML
2726 if (adev->mode_info.mode_config_initialized){
2727 if (!amdgpu_device_has_dc_support(adev))
c2d88e06 2728 drm_helper_force_disable_all(adev->ddev);
ff97cba8
ML
2729 else
2730 drm_atomic_helper_shutdown(adev->ddev);
2731 }
d38ceaf9
AD
2732 amdgpu_ib_pool_fini(adev);
2733 amdgpu_fence_driver_fini(adev);
58e955d9 2734 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2735 amdgpu_fbdev_fini(adev);
06ec9070 2736 r = amdgpu_device_ip_fini(adev);
ab4fe3e1
HR
2737 if (adev->firmware.gpu_info_fw) {
2738 release_firmware(adev->firmware.gpu_info_fw);
2739 adev->firmware.gpu_info_fw = NULL;
2740 }
d38ceaf9 2741 adev->accel_working = false;
2dc80b00 2742 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2743 /* free i2c buses */
4562236b
HW
2744 if (!amdgpu_device_has_dc_support(adev))
2745 amdgpu_i2c_fini(adev);
bfca0289
SL
2746
2747 if (amdgpu_emu_mode != 1)
2748 amdgpu_atombios_fini(adev);
2749
d38ceaf9
AD
2750 kfree(adev->bios);
2751 adev->bios = NULL;
84c8b22e
LW
2752 if (!pci_is_thunderbolt_attached(adev->pdev))
2753 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2754 if (adev->flags & AMD_IS_PX)
2755 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2756 vga_client_register(adev->pdev, NULL, NULL, NULL);
2757 if (adev->rio_mem)
2758 pci_iounmap(adev->pdev, adev->rio_mem);
2759 adev->rio_mem = NULL;
2760 iounmap(adev->rmmio);
2761 adev->rmmio = NULL;
06ec9070 2762 amdgpu_device_doorbell_fini(adev);
d38ceaf9 2763 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2764}
2765
2766
2767/*
2768 * Suspend & resume.
2769 */
2770/**
810ddc3a 2771 * amdgpu_device_suspend - initiate device suspend
d38ceaf9 2772 *
87e3f136
DP
2773 * @dev: drm dev pointer
2774 * @suspend: suspend state
2775 * @fbcon : notify the fbdev of suspend
d38ceaf9
AD
2776 *
2777 * Puts the hw in the suspend state (all asics).
2778 * Returns 0 for success or an error on failure.
2779 * Called at driver suspend.
2780 */
810ddc3a 2781int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2782{
2783 struct amdgpu_device *adev;
2784 struct drm_crtc *crtc;
2785 struct drm_connector *connector;
5ceb54c6 2786 int r;
d38ceaf9
AD
2787
2788 if (dev == NULL || dev->dev_private == NULL) {
2789 return -ENODEV;
2790 }
2791
2792 adev = dev->dev_private;
2793
2794 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2795 return 0;
2796
44779b43 2797 adev->in_suspend = true;
d38ceaf9
AD
2798 drm_kms_helper_poll_disable(dev);
2799
5f818173
S
2800 if (fbcon)
2801 amdgpu_fbdev_set_suspend(adev, 1);
2802
a5459475
RZ
2803 cancel_delayed_work_sync(&adev->late_init_work);
2804
4562236b
HW
2805 if (!amdgpu_device_has_dc_support(adev)) {
2806 /* turn off display hw */
2807 drm_modeset_lock_all(dev);
2808 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2809 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2810 }
2811 drm_modeset_unlock_all(dev);
fe1053b7
AD
2812 /* unpin the front buffers and cursors */
2813 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2814 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2815 struct drm_framebuffer *fb = crtc->primary->fb;
2816 struct amdgpu_bo *robj;
2817
91334223 2818 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
fe1053b7
AD
2819 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2820 r = amdgpu_bo_reserve(aobj, true);
2821 if (r == 0) {
2822 amdgpu_bo_unpin(aobj);
2823 amdgpu_bo_unreserve(aobj);
2824 }
756e6880 2825 }
756e6880 2826
fe1053b7
AD
2827 if (fb == NULL || fb->obj[0] == NULL) {
2828 continue;
2829 }
2830 robj = gem_to_amdgpu_bo(fb->obj[0]);
2831 /* don't unpin kernel fb objects */
2832 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2833 r = amdgpu_bo_reserve(robj, true);
2834 if (r == 0) {
2835 amdgpu_bo_unpin(robj);
2836 amdgpu_bo_unreserve(robj);
2837 }
d38ceaf9
AD
2838 }
2839 }
2840 }
fe1053b7
AD
2841
2842 amdgpu_amdkfd_suspend(adev);
2843
2844 r = amdgpu_device_ip_suspend_phase1(adev);
2845
d38ceaf9
AD
2846 /* evict vram memory */
2847 amdgpu_bo_evict_vram(adev);
2848
5ceb54c6 2849 amdgpu_fence_driver_suspend(adev);
d38ceaf9 2850
fe1053b7 2851 r = amdgpu_device_ip_suspend_phase2(adev);
d38ceaf9 2852
a0a71e49
AD
2853 /* evict remaining vram memory
2854 * This second call to evict vram is to evict the gart page table
2855 * using the CPU.
2856 */
d38ceaf9
AD
2857 amdgpu_bo_evict_vram(adev);
2858
2859 pci_save_state(dev->pdev);
2860 if (suspend) {
2861 /* Shut down the device */
2862 pci_disable_device(dev->pdev);
2863 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2864 } else {
2865 r = amdgpu_asic_reset(adev);
2866 if (r)
2867 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2868 }
2869
d38ceaf9
AD
2870 return 0;
2871}
2872
2873/**
810ddc3a 2874 * amdgpu_device_resume - initiate device resume
d38ceaf9 2875 *
87e3f136
DP
2876 * @dev: drm dev pointer
2877 * @resume: resume state
2878 * @fbcon : notify the fbdev of resume
d38ceaf9
AD
2879 *
2880 * Bring the hw back to operating state (all asics).
2881 * Returns 0 for success or an error on failure.
2882 * Called at driver resume.
2883 */
810ddc3a 2884int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2885{
2886 struct drm_connector *connector;
2887 struct amdgpu_device *adev = dev->dev_private;
756e6880 2888 struct drm_crtc *crtc;
03161a6e 2889 int r = 0;
d38ceaf9
AD
2890
2891 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2892 return 0;
2893
d38ceaf9
AD
2894 if (resume) {
2895 pci_set_power_state(dev->pdev, PCI_D0);
2896 pci_restore_state(dev->pdev);
74b0b157 2897 r = pci_enable_device(dev->pdev);
03161a6e 2898 if (r)
4d3b9ae5 2899 return r;
d38ceaf9
AD
2900 }
2901
2902 /* post card */
39c640c0 2903 if (amdgpu_device_need_post(adev)) {
74b0b157 2904 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2905 if (r)
2906 DRM_ERROR("amdgpu asic init failed\n");
2907 }
d38ceaf9 2908
06ec9070 2909 r = amdgpu_device_ip_resume(adev);
e6707218 2910 if (r) {
06ec9070 2911 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
4d3b9ae5 2912 return r;
e6707218 2913 }
5ceb54c6
AD
2914 amdgpu_fence_driver_resume(adev);
2915
d38ceaf9 2916
06ec9070 2917 r = amdgpu_device_ip_late_init(adev);
03161a6e 2918 if (r)
4d3b9ae5 2919 return r;
d38ceaf9 2920
fe1053b7
AD
2921 if (!amdgpu_device_has_dc_support(adev)) {
2922 /* pin cursors */
2923 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2924 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2925
91334223 2926 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
fe1053b7
AD
2927 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2928 r = amdgpu_bo_reserve(aobj, true);
2929 if (r == 0) {
2930 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2931 if (r != 0)
2932 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2933 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2934 amdgpu_bo_unreserve(aobj);
2935 }
756e6880
AD
2936 }
2937 }
2938 }
ba997709
YZ
2939 r = amdgpu_amdkfd_resume(adev);
2940 if (r)
2941 return r;
756e6880 2942
96a5d8d4
LL
2943 /* Make sure IB tests flushed */
2944 flush_delayed_work(&adev->late_init_work);
2945
d38ceaf9
AD
2946 /* blat the mode back in */
2947 if (fbcon) {
4562236b
HW
2948 if (!amdgpu_device_has_dc_support(adev)) {
2949 /* pre DCE11 */
2950 drm_helper_resume_force_mode(dev);
2951
2952 /* turn on display hw */
2953 drm_modeset_lock_all(dev);
2954 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2955 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2956 }
2957 drm_modeset_unlock_all(dev);
d38ceaf9 2958 }
4d3b9ae5 2959 amdgpu_fbdev_set_suspend(adev, 0);
d38ceaf9
AD
2960 }
2961
2962 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2963
2964 /*
2965 * Most of the connector probing functions try to acquire runtime pm
2966 * refs to ensure that the GPU is powered on when connector polling is
2967 * performed. Since we're calling this from a runtime PM callback,
2968 * trying to acquire rpm refs will cause us to deadlock.
2969 *
2970 * Since we're guaranteed to be holding the rpm lock, it's safe to
2971 * temporarily disable the rpm helpers so this doesn't deadlock us.
2972 */
2973#ifdef CONFIG_PM
2974 dev->dev->power.disable_depth++;
2975#endif
4562236b
HW
2976 if (!amdgpu_device_has_dc_support(adev))
2977 drm_helper_hpd_irq_event(dev);
2978 else
2979 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2980#ifdef CONFIG_PM
2981 dev->dev->power.disable_depth--;
2982#endif
44779b43
RZ
2983 adev->in_suspend = false;
2984
4d3b9ae5 2985 return 0;
d38ceaf9
AD
2986}
2987
e3ecdffa
AD
2988/**
2989 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
2990 *
2991 * @adev: amdgpu_device pointer
2992 *
2993 * The list of all the hardware IPs that make up the asic is walked and
2994 * the check_soft_reset callbacks are run. check_soft_reset determines
2995 * if the asic is still hung or not.
2996 * Returns true if any of the IPs are still in a hung state, false if not.
2997 */
06ec9070 2998static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
63fbf42f
CZ
2999{
3000 int i;
3001 bool asic_hang = false;
3002
f993d628
ML
3003 if (amdgpu_sriov_vf(adev))
3004 return true;
3005
8bc04c29
AD
3006 if (amdgpu_asic_need_full_reset(adev))
3007 return true;
3008
63fbf42f 3009 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 3010 if (!adev->ip_blocks[i].status.valid)
63fbf42f 3011 continue;
a1255107
AD
3012 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3013 adev->ip_blocks[i].status.hang =
3014 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3015 if (adev->ip_blocks[i].status.hang) {
3016 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
3017 asic_hang = true;
3018 }
3019 }
3020 return asic_hang;
3021}
3022
e3ecdffa
AD
3023/**
3024 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3025 *
3026 * @adev: amdgpu_device pointer
3027 *
3028 * The list of all the hardware IPs that make up the asic is walked and the
3029 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
3030 * handles any IP specific hardware or software state changes that are
3031 * necessary for a soft reset to succeed.
3032 * Returns 0 on success, negative error code on failure.
3033 */
06ec9070 3034static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
3035{
3036 int i, r = 0;
3037
3038 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 3039 if (!adev->ip_blocks[i].status.valid)
d31a501e 3040 continue;
a1255107
AD
3041 if (adev->ip_blocks[i].status.hang &&
3042 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3043 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
3044 if (r)
3045 return r;
3046 }
3047 }
3048
3049 return 0;
3050}
3051
e3ecdffa
AD
3052/**
3053 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3054 *
3055 * @adev: amdgpu_device pointer
3056 *
3057 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
3058 * reset is necessary to recover.
3059 * Returns true if a full asic reset is required, false if not.
3060 */
06ec9070 3061static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
35d782fe 3062{
da146d3b
AD
3063 int i;
3064
8bc04c29
AD
3065 if (amdgpu_asic_need_full_reset(adev))
3066 return true;
3067
da146d3b 3068 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 3069 if (!adev->ip_blocks[i].status.valid)
da146d3b 3070 continue;
a1255107
AD
3071 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3072 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3073 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
3074 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3075 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 3076 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
3077 DRM_INFO("Some block need full reset!\n");
3078 return true;
3079 }
3080 }
35d782fe
CZ
3081 }
3082 return false;
3083}
3084
e3ecdffa
AD
3085/**
3086 * amdgpu_device_ip_soft_reset - do a soft reset
3087 *
3088 * @adev: amdgpu_device pointer
3089 *
3090 * The list of all the hardware IPs that make up the asic is walked and the
3091 * soft_reset callbacks are run if the block is hung. soft_reset handles any
3092 * IP specific hardware or software state changes that are necessary to soft
3093 * reset the IP.
3094 * Returns 0 on success, negative error code on failure.
3095 */
06ec9070 3096static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
3097{
3098 int i, r = 0;
3099
3100 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 3101 if (!adev->ip_blocks[i].status.valid)
35d782fe 3102 continue;
a1255107
AD
3103 if (adev->ip_blocks[i].status.hang &&
3104 adev->ip_blocks[i].version->funcs->soft_reset) {
3105 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
3106 if (r)
3107 return r;
3108 }
3109 }
3110
3111 return 0;
3112}
3113
e3ecdffa
AD
3114/**
3115 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3116 *
3117 * @adev: amdgpu_device pointer
3118 *
3119 * The list of all the hardware IPs that make up the asic is walked and the
3120 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
3121 * handles any IP specific hardware or software state changes that are
3122 * necessary after the IP has been soft reset.
3123 * Returns 0 on success, negative error code on failure.
3124 */
06ec9070 3125static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
3126{
3127 int i, r = 0;
3128
3129 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 3130 if (!adev->ip_blocks[i].status.valid)
35d782fe 3131 continue;
a1255107
AD
3132 if (adev->ip_blocks[i].status.hang &&
3133 adev->ip_blocks[i].version->funcs->post_soft_reset)
3134 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
3135 if (r)
3136 return r;
3137 }
3138
3139 return 0;
3140}
3141
e3ecdffa 3142/**
c33adbc7 3143 * amdgpu_device_recover_vram - Recover some VRAM contents
e3ecdffa
AD
3144 *
3145 * @adev: amdgpu_device pointer
3146 *
3147 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
3148 * restore things like GPUVM page tables after a GPU reset where
3149 * the contents of VRAM might be lost.
403009bf
CK
3150 *
3151 * Returns:
3152 * 0 on success, negative error code on failure.
e3ecdffa 3153 */
c33adbc7 3154static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
c41d1cf6 3155{
c41d1cf6 3156 struct dma_fence *fence = NULL, *next = NULL;
403009bf
CK
3157 struct amdgpu_bo *shadow;
3158 long r = 1, tmo;
c41d1cf6
ML
3159
3160 if (amdgpu_sriov_runtime(adev))
b045d3af 3161 tmo = msecs_to_jiffies(8000);
c41d1cf6
ML
3162 else
3163 tmo = msecs_to_jiffies(100);
3164
3165 DRM_INFO("recover vram bo from shadow start\n");
3166 mutex_lock(&adev->shadow_list_lock);
403009bf
CK
3167 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
3168
3169 /* No need to recover an evicted BO */
3170 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
3171 shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
3172 continue;
3173
3174 r = amdgpu_bo_restore_shadow(shadow, &next);
3175 if (r)
3176 break;
3177
c41d1cf6
ML
3178 if (fence) {
3179 r = dma_fence_wait_timeout(fence, false, tmo);
403009bf
CK
3180 dma_fence_put(fence);
3181 fence = next;
3182 if (r <= 0)
c41d1cf6 3183 break;
403009bf
CK
3184 } else {
3185 fence = next;
c41d1cf6 3186 }
c41d1cf6
ML
3187 }
3188 mutex_unlock(&adev->shadow_list_lock);
3189
403009bf
CK
3190 if (fence)
3191 tmo = dma_fence_wait_timeout(fence, false, tmo);
c41d1cf6
ML
3192 dma_fence_put(fence);
3193
403009bf 3194 if (r <= 0 || tmo <= 0) {
c41d1cf6 3195 DRM_ERROR("recover vram bo from shadow failed\n");
403009bf
CK
3196 return -EIO;
3197 }
c41d1cf6 3198
403009bf
CK
3199 DRM_INFO("recover vram bo from shadow done\n");
3200 return 0;
c41d1cf6
ML
3201}
3202
a90ad3c2 3203
e3ecdffa 3204/**
06ec9070 3205 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5740682e
ML
3206 *
3207 * @adev: amdgpu device pointer
87e3f136 3208 * @from_hypervisor: request from hypervisor
5740682e
ML
3209 *
3210 * do VF FLR and reinitialize Asic
3f48c681 3211 * return 0 means succeeded otherwise failed
e3ecdffa
AD
3212 */
3213static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3214 bool from_hypervisor)
5740682e
ML
3215{
3216 int r;
3217
3218 if (from_hypervisor)
3219 r = amdgpu_virt_request_full_gpu(adev, true);
3220 else
3221 r = amdgpu_virt_reset_gpu(adev);
3222 if (r)
3223 return r;
a90ad3c2
ML
3224
3225 /* Resume IP prior to SMC */
06ec9070 3226 r = amdgpu_device_ip_reinit_early_sriov(adev);
5740682e
ML
3227 if (r)
3228 goto error;
a90ad3c2
ML
3229
3230 /* we need recover gart prior to run SMC/CP/SDMA resume */
c1c7ce8f 3231 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
a90ad3c2 3232
7a3e0bb2
RZ
3233 r = amdgpu_device_fw_loading(adev);
3234 if (r)
3235 return r;
3236
a90ad3c2 3237 /* now we are okay to resume SMC/CP/SDMA */
06ec9070 3238 r = amdgpu_device_ip_reinit_late_sriov(adev);
5740682e
ML
3239 if (r)
3240 goto error;
a90ad3c2
ML
3241
3242 amdgpu_irq_gpu_reset_resume_helper(adev);
5740682e 3243 r = amdgpu_ib_ring_tests(adev);
a90ad3c2 3244
abc34253 3245error:
d3c117e5 3246 amdgpu_virt_init_data_exchange(adev);
abc34253 3247 amdgpu_virt_release_full_gpu(adev, true);
c41d1cf6
ML
3248 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3249 atomic_inc(&adev->vram_lost_counter);
c33adbc7 3250 r = amdgpu_device_recover_vram(adev);
a90ad3c2
ML
3251 }
3252
3253 return r;
3254}
3255
12938fad
CK
3256/**
3257 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
3258 *
3259 * @adev: amdgpu device pointer
3260 *
3261 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
3262 * a hung GPU.
3263 */
3264bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
3265{
3266 if (!amdgpu_device_ip_check_soft_reset(adev)) {
3267 DRM_INFO("Timeout, but no hardware hang detected.\n");
3268 return false;
3269 }
3270
3ba7b418
AG
3271 if (amdgpu_gpu_recovery == 0)
3272 goto disabled;
3273
3274 if (amdgpu_sriov_vf(adev))
3275 return true;
3276
3277 if (amdgpu_gpu_recovery == -1) {
3278 switch (adev->asic_type) {
fc42d47c
AG
3279 case CHIP_BONAIRE:
3280 case CHIP_HAWAII:
3ba7b418
AG
3281 case CHIP_TOPAZ:
3282 case CHIP_TONGA:
3283 case CHIP_FIJI:
3284 case CHIP_POLARIS10:
3285 case CHIP_POLARIS11:
3286 case CHIP_POLARIS12:
3287 case CHIP_VEGAM:
3288 case CHIP_VEGA20:
3289 case CHIP_VEGA10:
3290 case CHIP_VEGA12:
3291 break;
3292 default:
3293 goto disabled;
3294 }
12938fad
CK
3295 }
3296
3297 return true;
3ba7b418
AG
3298
3299disabled:
3300 DRM_INFO("GPU recovery disabled.\n");
3301 return false;
12938fad
CK
3302}
3303
5c6dd71e 3304
26bc5340
AG
3305static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
3306 struct amdgpu_job *job,
3307 bool *need_full_reset_arg)
3308{
3309 int i, r = 0;
3310 bool need_full_reset = *need_full_reset_arg;
71182665 3311
71182665 3312 /* block all schedulers and reset given job's ring */
0875dc9e
CZ
3313 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3314 struct amdgpu_ring *ring = adev->rings[i];
3315
51687759 3316 if (!ring || !ring->sched.thread)
0875dc9e 3317 continue;
5740682e 3318
222b5f04 3319 drm_sched_stop(&ring->sched);
5740682e 3320
2f9d4084
ML
3321 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3322 amdgpu_fence_driver_force_completion(ring);
0875dc9e 3323 }
d38ceaf9 3324
222b5f04
AG
3325 if(job)
3326 drm_sched_increase_karma(&job->base);
3327
26bc5340
AG
3328
3329
3330 if (!amdgpu_sriov_vf(adev)) {
3331
3332 if (!need_full_reset)
3333 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
3334
3335 if (!need_full_reset) {
3336 amdgpu_device_ip_pre_soft_reset(adev);
3337 r = amdgpu_device_ip_soft_reset(adev);
3338 amdgpu_device_ip_post_soft_reset(adev);
3339 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3340 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3341 need_full_reset = true;
3342 }
3343 }
3344
3345 if (need_full_reset)
3346 r = amdgpu_device_ip_suspend(adev);
3347
3348 *need_full_reset_arg = need_full_reset;
3349 }
3350
3351 return r;
3352}
3353
3354static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
3355 struct list_head *device_list_handle,
3356 bool *need_full_reset_arg)
3357{
3358 struct amdgpu_device *tmp_adev = NULL;
3359 bool need_full_reset = *need_full_reset_arg, vram_lost = false;
3360 int r = 0;
3361
3362 /*
3363 * ASIC reset has to be done on all HGMI hive nodes ASAP
3364 * to allow proper links negotiation in FW (within 1 sec)
3365 */
3366 if (need_full_reset) {
3367 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
d4535e2c
AG
3368 /* For XGMI run all resets in parallel to speed up the process */
3369 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3370 if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
3371 r = -EALREADY;
3372 } else
3373 r = amdgpu_asic_reset(tmp_adev);
3374
3375 if (r) {
3376 DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s",
26bc5340 3377 r, tmp_adev->ddev->unique);
d4535e2c
AG
3378 break;
3379 }
3380 }
3381
3382 /* For XGMI wait for all PSP resets to complete before proceed */
3383 if (!r) {
3384 list_for_each_entry(tmp_adev, device_list_handle,
3385 gmc.xgmi.head) {
3386 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3387 flush_work(&tmp_adev->xgmi_reset_work);
3388 r = tmp_adev->asic_reset_res;
3389 if (r)
3390 break;
3391 }
3392 }
26bc5340
AG
3393 }
3394 }
3395
3396
3397 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3398 if (need_full_reset) {
3399 /* post card */
3400 if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
3401 DRM_WARN("asic atom init failed!");
3402
3403 if (!r) {
3404 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
3405 r = amdgpu_device_ip_resume_phase1(tmp_adev);
3406 if (r)
3407 goto out;
3408
3409 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
3410 if (vram_lost) {
3411 DRM_ERROR("VRAM is lost!\n");
3412 atomic_inc(&tmp_adev->vram_lost_counter);
3413 }
3414
3415 r = amdgpu_gtt_mgr_recover(
3416 &tmp_adev->mman.bdev.man[TTM_PL_TT]);
3417 if (r)
3418 goto out;
3419
3420 r = amdgpu_device_fw_loading(tmp_adev);
3421 if (r)
3422 return r;
3423
3424 r = amdgpu_device_ip_resume_phase2(tmp_adev);
3425 if (r)
3426 goto out;
3427
3428 if (vram_lost)
3429 amdgpu_device_fill_reset_magic(tmp_adev);
3430
3431 /* Update PSP FW topology after reset */
3432 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
3433 r = amdgpu_xgmi_update_topology(hive, tmp_adev);
3434 }
3435 }
3436
3437
3438out:
3439 if (!r) {
3440 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
3441 r = amdgpu_ib_ring_tests(tmp_adev);
3442 if (r) {
3443 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
3444 r = amdgpu_device_ip_suspend(tmp_adev);
3445 need_full_reset = true;
3446 r = -EAGAIN;
3447 goto end;
3448 }
3449 }
3450
3451 if (!r)
3452 r = amdgpu_device_recover_vram(tmp_adev);
3453 else
3454 tmp_adev->asic_reset_res = r;
3455 }
3456
3457end:
3458 *need_full_reset_arg = need_full_reset;
3459 return r;
3460}
3461
3462static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
3463 struct amdgpu_job *job)
3464{
3465 int i;
5740682e 3466
71182665
ML
3467 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3468 struct amdgpu_ring *ring = adev->rings[i];
53cdccd5 3469
71182665
ML
3470 if (!ring || !ring->sched.thread)
3471 continue;
5740682e 3472
222b5f04
AG
3473 if (!adev->asic_reset_res)
3474 drm_sched_resubmit_jobs(&ring->sched);
5740682e 3475
222b5f04 3476 drm_sched_start(&ring->sched, !adev->asic_reset_res);
d38ceaf9
AD
3477 }
3478
bf830604 3479 if (!amdgpu_device_has_dc_support(adev)) {
4562236b 3480 drm_helper_resume_force_mode(adev->ddev);
5740682e 3481 }
d38ceaf9 3482
26bc5340
AG
3483 adev->asic_reset_res = 0;
3484}
5740682e 3485
26bc5340
AG
3486static void amdgpu_device_lock_adev(struct amdgpu_device *adev)
3487{
3488 mutex_lock(&adev->lock_reset);
3489 atomic_inc(&adev->gpu_reset_counter);
3490 adev->in_gpu_reset = 1;
7b184b00 3491 /* Block kfd: SRIOV would do it separately */
3492 if (!amdgpu_sriov_vf(adev))
3493 amdgpu_amdkfd_pre_reset(adev);
26bc5340 3494}
d38ceaf9 3495
26bc5340
AG
3496static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
3497{
7b184b00 3498 /*unlock kfd: SRIOV would do it separately */
3499 if (!amdgpu_sriov_vf(adev))
3500 amdgpu_amdkfd_post_reset(adev);
89041940 3501 amdgpu_vf_error_trans_all(adev);
13a752e3
ML
3502 adev->in_gpu_reset = 0;
3503 mutex_unlock(&adev->lock_reset);
26bc5340
AG
3504}
3505
3506
3507/**
3508 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
3509 *
3510 * @adev: amdgpu device pointer
3511 * @job: which job trigger hang
3512 *
3513 * Attempt to reset the GPU if it has hung (all asics).
3514 * Attempt to do soft-reset or full-reset and reinitialize Asic
3515 * Returns 0 for success or an error on failure.
3516 */
3517
3518int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3519 struct amdgpu_job *job)
3520{
3521 int r;
3522 struct amdgpu_hive_info *hive = NULL;
3523 bool need_full_reset = false;
3524 struct amdgpu_device *tmp_adev = NULL;
3525 struct list_head device_list, *device_list_handle = NULL;
3526
3527 INIT_LIST_HEAD(&device_list);
3528
3529 dev_info(adev->dev, "GPU reset begin!\n");
3530
3531 /*
3532 * In case of XGMI hive disallow concurrent resets to be triggered
3533 * by different nodes. No point also since the one node already executing
3534 * reset will also reset all the other nodes in the hive.
3535 */
22d6575b 3536 hive = amdgpu_get_xgmi_hive(adev, 0);
26bc5340 3537 if (hive && adev->gmc.xgmi.num_physical_nodes > 1 &&
22d6575b 3538 !mutex_trylock(&hive->reset_lock))
26bc5340
AG
3539 return 0;
3540
3541 /* Start with adev pre asic reset first for soft reset check.*/
3542 amdgpu_device_lock_adev(adev);
3543 r = amdgpu_device_pre_asic_reset(adev,
3544 job,
3545 &need_full_reset);
3546 if (r) {
3547 /*TODO Should we stop ?*/
3548 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
3549 r, adev->ddev->unique);
3550 adev->asic_reset_res = r;
3551 }
3552
3553 /* Build list of devices to reset */
3554 if (need_full_reset && adev->gmc.xgmi.num_physical_nodes > 1) {
3555 if (!hive) {
3556 amdgpu_device_unlock_adev(adev);
3557 return -ENODEV;
3558 }
3559
3560 /*
3561 * In case we are in XGMI hive mode device reset is done for all the
3562 * nodes in the hive to retrain all XGMI links and hence the reset
3563 * sequence is executed in loop on all nodes.
3564 */
3565 device_list_handle = &hive->device_list;
3566 } else {
3567 list_add_tail(&adev->gmc.xgmi.head, &device_list);
3568 device_list_handle = &device_list;
3569 }
3570
3571retry: /* Rest of adevs pre asic reset from XGMI hive. */
3572 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3573
3574 if (tmp_adev == adev)
3575 continue;
3576
26bc5340
AG
3577 amdgpu_device_lock_adev(tmp_adev);
3578 r = amdgpu_device_pre_asic_reset(tmp_adev,
3579 NULL,
3580 &need_full_reset);
3581 /*TODO Should we stop ?*/
3582 if (r) {
3583 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
3584 r, tmp_adev->ddev->unique);
3585 tmp_adev->asic_reset_res = r;
3586 }
3587 }
3588
3589 /* Actual ASIC resets if needed.*/
3590 /* TODO Implement XGMI hive reset logic for SRIOV */
3591 if (amdgpu_sriov_vf(adev)) {
3592 r = amdgpu_device_reset_sriov(adev, job ? false : true);
3593 if (r)
3594 adev->asic_reset_res = r;
3595 } else {
3596 r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
3597 if (r && r == -EAGAIN)
3598 goto retry;
3599 }
3600
3601 /* Post ASIC reset for all devs .*/
3602 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3603 amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL);
3604
3605 if (r) {
3606 /* bad news, how to tell it to userspace ? */
3607 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3608 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3609 } else {
3610 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
3611 }
3612
3613 amdgpu_device_unlock_adev(tmp_adev);
3614 }
3615
3616 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
22d6575b 3617 mutex_unlock(&hive->reset_lock);
26bc5340
AG
3618
3619 if (r)
3620 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
d38ceaf9
AD
3621 return r;
3622}
3623
c5313457
HK
3624static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
3625 enum pci_bus_speed *speed,
3626 enum pcie_link_width *width)
3627{
3628 struct pci_dev *pdev = adev->pdev;
3629 enum pci_bus_speed cur_speed;
3630 enum pcie_link_width cur_width;
3631
3632 *speed = PCI_SPEED_UNKNOWN;
3633 *width = PCIE_LNK_WIDTH_UNKNOWN;
3634
3635 while (pdev) {
3636 cur_speed = pcie_get_speed_cap(pdev);
3637 cur_width = pcie_get_width_cap(pdev);
3638
3639 if (cur_speed != PCI_SPEED_UNKNOWN) {
3640 if (*speed == PCI_SPEED_UNKNOWN)
3641 *speed = cur_speed;
3642 else if (cur_speed < *speed)
3643 *speed = cur_speed;
3644 }
3645
3646 if (cur_width != PCIE_LNK_WIDTH_UNKNOWN) {
3647 if (*width == PCIE_LNK_WIDTH_UNKNOWN)
3648 *width = cur_width;
3649 else if (cur_width < *width)
3650 *width = cur_width;
3651 }
3652 pdev = pci_upstream_bridge(pdev);
3653 }
3654}
3655
e3ecdffa
AD
3656/**
3657 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
3658 *
3659 * @adev: amdgpu_device pointer
3660 *
3661 * Fetchs and stores in the driver the PCIE capabilities (gen speed
3662 * and lanes) of the slot the device is in. Handles APUs and
3663 * virtualized environments where PCIE config space may not be available.
3664 */
5494d864 3665static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
d0dd7f0c 3666{
5d9a6330 3667 struct pci_dev *pdev;
c5313457
HK
3668 enum pci_bus_speed speed_cap, platform_speed_cap;
3669 enum pcie_link_width platform_link_width;
d0dd7f0c 3670
cd474ba0
AD
3671 if (amdgpu_pcie_gen_cap)
3672 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 3673
cd474ba0
AD
3674 if (amdgpu_pcie_lane_cap)
3675 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 3676
cd474ba0
AD
3677 /* covers APUs as well */
3678 if (pci_is_root_bus(adev->pdev->bus)) {
3679 if (adev->pm.pcie_gen_mask == 0)
3680 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3681 if (adev->pm.pcie_mlw_mask == 0)
3682 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 3683 return;
cd474ba0 3684 }
d0dd7f0c 3685
c5313457
HK
3686 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
3687 return;
3688
3689 amdgpu_device_get_min_pci_speed_width(adev, &platform_speed_cap,
3690 &platform_link_width);
3691
cd474ba0 3692 if (adev->pm.pcie_gen_mask == 0) {
5d9a6330
AD
3693 /* asic caps */
3694 pdev = adev->pdev;
3695 speed_cap = pcie_get_speed_cap(pdev);
3696 if (speed_cap == PCI_SPEED_UNKNOWN) {
3697 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
cd474ba0
AD
3698 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3699 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
cd474ba0 3700 } else {
5d9a6330
AD
3701 if (speed_cap == PCIE_SPEED_16_0GT)
3702 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3703 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3704 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
3705 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
3706 else if (speed_cap == PCIE_SPEED_8_0GT)
3707 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3708 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3709 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3710 else if (speed_cap == PCIE_SPEED_5_0GT)
3711 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3712 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
3713 else
3714 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
3715 }
3716 /* platform caps */
c5313457 3717 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5d9a6330
AD
3718 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3719 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
3720 } else {
c5313457 3721 if (platform_speed_cap == PCIE_SPEED_16_0GT)
5d9a6330
AD
3722 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3723 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3724 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
3725 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
c5313457 3726 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5d9a6330
AD
3727 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3728 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3729 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
c5313457 3730 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5d9a6330
AD
3731 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3732 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
3733 else
3734 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3735
cd474ba0
AD
3736 }
3737 }
3738 if (adev->pm.pcie_mlw_mask == 0) {
c5313457 3739 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5d9a6330
AD
3740 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
3741 } else {
c5313457 3742 switch (platform_link_width) {
5d9a6330 3743 case PCIE_LNK_X32:
cd474ba0
AD
3744 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3745 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3746 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3747 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3748 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3749 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3750 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3751 break;
5d9a6330 3752 case PCIE_LNK_X16:
cd474ba0
AD
3753 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3754 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3755 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3756 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3757 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3758 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3759 break;
5d9a6330 3760 case PCIE_LNK_X12:
cd474ba0
AD
3761 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3762 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3763 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3764 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3765 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3766 break;
5d9a6330 3767 case PCIE_LNK_X8:
cd474ba0
AD
3768 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3769 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3770 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3771 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3772 break;
5d9a6330 3773 case PCIE_LNK_X4:
cd474ba0
AD
3774 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3775 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3776 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3777 break;
5d9a6330 3778 case PCIE_LNK_X2:
cd474ba0
AD
3779 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3780 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3781 break;
5d9a6330 3782 case PCIE_LNK_X1:
cd474ba0
AD
3783 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3784 break;
3785 default:
3786 break;
3787 }
d0dd7f0c
AD
3788 }
3789 }
3790}
d38ceaf9 3791