drm/amdgpu: add a amdgpu_device_supports_baco helper
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
b1ddf548 28#include <linux/power_supply.h>
0875dc9e 29#include <linux/kthread.h>
fdf2f6c5 30#include <linux/module.h>
d38ceaf9
AD
31#include <linux/console.h>
32#include <linux/slab.h>
fdf2f6c5 33
4562236b 34#include <drm/drm_atomic_helper.h>
fcd70cd3 35#include <drm/drm_probe_helper.h>
d38ceaf9
AD
36#include <drm/amdgpu_drm.h>
37#include <linux/vgaarb.h>
38#include <linux/vga_switcheroo.h>
39#include <linux/efi.h>
40#include "amdgpu.h"
f4b373f4 41#include "amdgpu_trace.h"
d38ceaf9
AD
42#include "amdgpu_i2c.h"
43#include "atom.h"
44#include "amdgpu_atombios.h"
a5bde2f9 45#include "amdgpu_atomfirmware.h"
d0dd7f0c 46#include "amd_pcie.h"
33f34802
KW
47#ifdef CONFIG_DRM_AMDGPU_SI
48#include "si.h"
49#endif
a2e73f56
AD
50#ifdef CONFIG_DRM_AMDGPU_CIK
51#include "cik.h"
52#endif
aaa36a97 53#include "vi.h"
460826e6 54#include "soc15.h"
0a5b8c7b 55#include "nv.h"
d38ceaf9 56#include "bif/bif_4_1_d.h"
9accf2fd 57#include <linux/pci.h>
bec86378 58#include <linux/firmware.h>
89041940 59#include "amdgpu_vf_error.h"
d38ceaf9 60
ba997709 61#include "amdgpu_amdkfd.h"
d2f52ac8 62#include "amdgpu_pm.h"
d38ceaf9 63
5183411b 64#include "amdgpu_xgmi.h"
c030f2e4 65#include "amdgpu_ras.h"
9c7c85f7 66#include "amdgpu_pmu.h"
5183411b 67
d5ea093e
AG
68#include <linux/suspend.h>
69
e2a75f88 70MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
3f76dced 71MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
2d2e5e7e 72MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
ad5a67a7 73MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
54c4d17e 74MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
65e60f6e 75MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
b51a26a0 76MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
23c6268e 77MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
ed42cfe1 78MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
42b325e5 79MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
e2a75f88 80
2dc80b00
S
81#define AMDGPU_RESUME_MS 2000
82
050091ab 83const char *amdgpu_asic_name[] = {
da69c161
KW
84 "TAHITI",
85 "PITCAIRN",
86 "VERDE",
87 "OLAND",
88 "HAINAN",
d38ceaf9
AD
89 "BONAIRE",
90 "KAVERI",
91 "KABINI",
92 "HAWAII",
93 "MULLINS",
94 "TOPAZ",
95 "TONGA",
48299f95 96 "FIJI",
d38ceaf9 97 "CARRIZO",
139f4917 98 "STONEY",
2cc0c0b5
FC
99 "POLARIS10",
100 "POLARIS11",
c4642a47 101 "POLARIS12",
48ff108d 102 "VEGAM",
d4196f01 103 "VEGA10",
8fab806a 104 "VEGA12",
956fcddc 105 "VEGA20",
2ca8a5d2 106 "RAVEN",
d6c3b24e 107 "ARCTURUS",
1eee4228 108 "RENOIR",
852a6626 109 "NAVI10",
87dbad02 110 "NAVI14",
9802f5d7 111 "NAVI12",
d38ceaf9
AD
112 "LAST",
113};
114
dcea6e65
KR
115/**
116 * DOC: pcie_replay_count
117 *
118 * The amdgpu driver provides a sysfs API for reporting the total number
119 * of PCIe replays (NAKs)
120 * The file pcie_replay_count is used for this and returns the total
121 * number of replays as a sum of the NAKs generated and NAKs received
122 */
123
124static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
125 struct device_attribute *attr, char *buf)
126{
127 struct drm_device *ddev = dev_get_drvdata(dev);
128 struct amdgpu_device *adev = ddev->dev_private;
129 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
130
131 return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
132}
133
134static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
135 amdgpu_device_get_pcie_replay_count, NULL);
136
5494d864
AD
137static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
138
e3ecdffa
AD
139/**
140 * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
141 *
142 * @dev: drm_device pointer
143 *
144 * Returns true if the device is a dGPU with HG/PX power control,
145 * otherwise return false.
146 */
d38ceaf9
AD
147bool amdgpu_device_is_px(struct drm_device *dev)
148{
149 struct amdgpu_device *adev = dev->dev_private;
150
2f7d10b3 151 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
152 return true;
153 return false;
154}
155
a69cba42
AD
156/**
157 * amdgpu_device_supports_baco - Does the device support BACO
158 *
159 * @dev: drm_device pointer
160 *
161 * Returns true if the device supporte BACO,
162 * otherwise return false.
163 */
164bool amdgpu_device_supports_baco(struct drm_device *dev)
165{
166 struct amdgpu_device *adev = dev->dev_private;
167
168 return amdgpu_asic_supports_baco(adev);
169}
170
e35e2b11
TY
171/**
172 * VRAM access helper functions.
173 *
174 * amdgpu_device_vram_access - read/write a buffer in vram
175 *
176 * @adev: amdgpu_device pointer
177 * @pos: offset of the buffer in vram
178 * @buf: virtual address of the buffer in system memory
179 * @size: read/write size, sizeof(@buf) must > @size
180 * @write: true - write to vram, otherwise - read from vram
181 */
182void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
183 uint32_t *buf, size_t size, bool write)
184{
185 uint64_t last;
186 unsigned long flags;
187
188 last = size - 4;
189 for (last += pos; pos <= last; pos += 4) {
190 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
191 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
192 WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
193 if (write)
194 WREG32_NO_KIQ(mmMM_DATA, *buf++);
195 else
196 *buf++ = RREG32_NO_KIQ(mmMM_DATA);
197 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
198 }
199}
200
d38ceaf9
AD
201/*
202 * MMIO register access helper functions.
203 */
e3ecdffa
AD
204/**
205 * amdgpu_mm_rreg - read a memory mapped IO register
206 *
207 * @adev: amdgpu_device pointer
208 * @reg: dword aligned register offset
209 * @acc_flags: access flags which require special behavior
210 *
211 * Returns the 32 bit value from the offset specified.
212 */
d38ceaf9 213uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 214 uint32_t acc_flags)
d38ceaf9 215{
f4b373f4
TSD
216 uint32_t ret;
217
43ca8efa 218 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 219 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 220
15d72fd7 221 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 222 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
223 else {
224 unsigned long flags;
d38ceaf9
AD
225
226 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
227 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
228 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
229 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 230 }
f4b373f4
TSD
231 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
232 return ret;
d38ceaf9
AD
233}
234
421a2a30
ML
235/*
236 * MMIO register read with bytes helper functions
237 * @offset:bytes offset from MMIO start
238 *
239*/
240
e3ecdffa
AD
241/**
242 * amdgpu_mm_rreg8 - read a memory mapped IO register
243 *
244 * @adev: amdgpu_device pointer
245 * @offset: byte aligned register offset
246 *
247 * Returns the 8 bit value from the offset specified.
248 */
421a2a30
ML
249uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
250 if (offset < adev->rmmio_size)
251 return (readb(adev->rmmio + offset));
252 BUG();
253}
254
255/*
256 * MMIO register write with bytes helper functions
257 * @offset:bytes offset from MMIO start
258 * @value: the value want to be written to the register
259 *
260*/
e3ecdffa
AD
261/**
262 * amdgpu_mm_wreg8 - read a memory mapped IO register
263 *
264 * @adev: amdgpu_device pointer
265 * @offset: byte aligned register offset
266 * @value: 8 bit value to write
267 *
268 * Writes the value specified to the offset specified.
269 */
421a2a30
ML
270void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
271 if (offset < adev->rmmio_size)
272 writeb(value, adev->rmmio + offset);
273 else
274 BUG();
275}
276
e3ecdffa
AD
277/**
278 * amdgpu_mm_wreg - write to a memory mapped IO register
279 *
280 * @adev: amdgpu_device pointer
281 * @reg: dword aligned register offset
282 * @v: 32 bit value to write to the register
283 * @acc_flags: access flags which require special behavior
284 *
285 * Writes the value specified to the offset specified.
286 */
d38ceaf9 287void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 288 uint32_t acc_flags)
d38ceaf9 289{
f4b373f4 290 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 291
47ed4e1c
KW
292 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
293 adev->last_mm_index = v;
294 }
295
43ca8efa 296 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 297 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 298
15d72fd7 299 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
300 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
301 else {
302 unsigned long flags;
303
304 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
305 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
306 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
307 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
308 }
47ed4e1c
KW
309
310 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
311 udelay(500);
312 }
d38ceaf9
AD
313}
314
e3ecdffa
AD
315/**
316 * amdgpu_io_rreg - read an IO register
317 *
318 * @adev: amdgpu_device pointer
319 * @reg: dword aligned register offset
320 *
321 * Returns the 32 bit value from the offset specified.
322 */
d38ceaf9
AD
323u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
324{
325 if ((reg * 4) < adev->rio_mem_size)
326 return ioread32(adev->rio_mem + (reg * 4));
327 else {
328 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
329 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
330 }
331}
332
e3ecdffa
AD
333/**
334 * amdgpu_io_wreg - write to an IO register
335 *
336 * @adev: amdgpu_device pointer
337 * @reg: dword aligned register offset
338 * @v: 32 bit value to write to the register
339 *
340 * Writes the value specified to the offset specified.
341 */
d38ceaf9
AD
342void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
343{
47ed4e1c
KW
344 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
345 adev->last_mm_index = v;
346 }
d38ceaf9
AD
347
348 if ((reg * 4) < adev->rio_mem_size)
349 iowrite32(v, adev->rio_mem + (reg * 4));
350 else {
351 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
352 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
353 }
47ed4e1c
KW
354
355 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
356 udelay(500);
357 }
d38ceaf9
AD
358}
359
360/**
361 * amdgpu_mm_rdoorbell - read a doorbell dword
362 *
363 * @adev: amdgpu_device pointer
364 * @index: doorbell index
365 *
366 * Returns the value in the doorbell aperture at the
367 * requested doorbell index (CIK).
368 */
369u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
370{
371 if (index < adev->doorbell.num_doorbells) {
372 return readl(adev->doorbell.ptr + index);
373 } else {
374 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
375 return 0;
376 }
377}
378
379/**
380 * amdgpu_mm_wdoorbell - write a doorbell dword
381 *
382 * @adev: amdgpu_device pointer
383 * @index: doorbell index
384 * @v: value to write
385 *
386 * Writes @v to the doorbell aperture at the
387 * requested doorbell index (CIK).
388 */
389void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
390{
391 if (index < adev->doorbell.num_doorbells) {
392 writel(v, adev->doorbell.ptr + index);
393 } else {
394 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
395 }
396}
397
832be404
KW
398/**
399 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
400 *
401 * @adev: amdgpu_device pointer
402 * @index: doorbell index
403 *
404 * Returns the value in the doorbell aperture at the
405 * requested doorbell index (VEGA10+).
406 */
407u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
408{
409 if (index < adev->doorbell.num_doorbells) {
410 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
411 } else {
412 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
413 return 0;
414 }
415}
416
417/**
418 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
419 *
420 * @adev: amdgpu_device pointer
421 * @index: doorbell index
422 * @v: value to write
423 *
424 * Writes @v to the doorbell aperture at the
425 * requested doorbell index (VEGA10+).
426 */
427void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
428{
429 if (index < adev->doorbell.num_doorbells) {
430 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
431 } else {
432 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
433 }
434}
435
d38ceaf9
AD
436/**
437 * amdgpu_invalid_rreg - dummy reg read function
438 *
439 * @adev: amdgpu device pointer
440 * @reg: offset of register
441 *
442 * Dummy register read function. Used for register blocks
443 * that certain asics don't have (all asics).
444 * Returns the value in the register.
445 */
446static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
447{
448 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
449 BUG();
450 return 0;
451}
452
453/**
454 * amdgpu_invalid_wreg - dummy reg write function
455 *
456 * @adev: amdgpu device pointer
457 * @reg: offset of register
458 * @v: value to write to the register
459 *
460 * Dummy register read function. Used for register blocks
461 * that certain asics don't have (all asics).
462 */
463static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
464{
465 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
466 reg, v);
467 BUG();
468}
469
4fa1c6a6
TZ
470/**
471 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
472 *
473 * @adev: amdgpu device pointer
474 * @reg: offset of register
475 *
476 * Dummy register read function. Used for register blocks
477 * that certain asics don't have (all asics).
478 * Returns the value in the register.
479 */
480static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
481{
482 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
483 BUG();
484 return 0;
485}
486
487/**
488 * amdgpu_invalid_wreg64 - dummy reg write function
489 *
490 * @adev: amdgpu device pointer
491 * @reg: offset of register
492 * @v: value to write to the register
493 *
494 * Dummy register read function. Used for register blocks
495 * that certain asics don't have (all asics).
496 */
497static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
498{
499 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
500 reg, v);
501 BUG();
502}
503
d38ceaf9
AD
504/**
505 * amdgpu_block_invalid_rreg - dummy reg read function
506 *
507 * @adev: amdgpu device pointer
508 * @block: offset of instance
509 * @reg: offset of register
510 *
511 * Dummy register read function. Used for register blocks
512 * that certain asics don't have (all asics).
513 * Returns the value in the register.
514 */
515static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
516 uint32_t block, uint32_t reg)
517{
518 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
519 reg, block);
520 BUG();
521 return 0;
522}
523
524/**
525 * amdgpu_block_invalid_wreg - dummy reg write function
526 *
527 * @adev: amdgpu device pointer
528 * @block: offset of instance
529 * @reg: offset of register
530 * @v: value to write to the register
531 *
532 * Dummy register read function. Used for register blocks
533 * that certain asics don't have (all asics).
534 */
535static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
536 uint32_t block,
537 uint32_t reg, uint32_t v)
538{
539 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
540 reg, block, v);
541 BUG();
542}
543
e3ecdffa
AD
544/**
545 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
546 *
547 * @adev: amdgpu device pointer
548 *
549 * Allocates a scratch page of VRAM for use by various things in the
550 * driver.
551 */
06ec9070 552static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
d38ceaf9 553{
a4a02777
CK
554 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
555 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
556 &adev->vram_scratch.robj,
557 &adev->vram_scratch.gpu_addr,
558 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
559}
560
e3ecdffa
AD
561/**
562 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
563 *
564 * @adev: amdgpu device pointer
565 *
566 * Frees the VRAM scratch page.
567 */
06ec9070 568static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
d38ceaf9 569{
078af1a3 570 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
571}
572
573/**
9c3f2b54 574 * amdgpu_device_program_register_sequence - program an array of registers.
d38ceaf9
AD
575 *
576 * @adev: amdgpu_device pointer
577 * @registers: pointer to the register array
578 * @array_size: size of the register array
579 *
580 * Programs an array or registers with and and or masks.
581 * This is a helper for setting golden registers.
582 */
9c3f2b54
AD
583void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
584 const u32 *registers,
585 const u32 array_size)
d38ceaf9
AD
586{
587 u32 tmp, reg, and_mask, or_mask;
588 int i;
589
590 if (array_size % 3)
591 return;
592
593 for (i = 0; i < array_size; i +=3) {
594 reg = registers[i + 0];
595 and_mask = registers[i + 1];
596 or_mask = registers[i + 2];
597
598 if (and_mask == 0xffffffff) {
599 tmp = or_mask;
600 } else {
601 tmp = RREG32(reg);
602 tmp &= ~and_mask;
e0d07657
HZ
603 if (adev->family >= AMDGPU_FAMILY_AI)
604 tmp |= (or_mask & and_mask);
605 else
606 tmp |= or_mask;
d38ceaf9
AD
607 }
608 WREG32(reg, tmp);
609 }
610}
611
e3ecdffa
AD
612/**
613 * amdgpu_device_pci_config_reset - reset the GPU
614 *
615 * @adev: amdgpu_device pointer
616 *
617 * Resets the GPU using the pci config reset sequence.
618 * Only applicable to asics prior to vega10.
619 */
8111c387 620void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
d38ceaf9
AD
621{
622 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
623}
624
625/*
626 * GPU doorbell aperture helpers function.
627 */
628/**
06ec9070 629 * amdgpu_device_doorbell_init - Init doorbell driver information.
d38ceaf9
AD
630 *
631 * @adev: amdgpu_device pointer
632 *
633 * Init doorbell driver information (CIK)
634 * Returns 0 on success, error on failure.
635 */
06ec9070 636static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
d38ceaf9 637{
6585661d 638
705e519e
CK
639 /* No doorbell on SI hardware generation */
640 if (adev->asic_type < CHIP_BONAIRE) {
641 adev->doorbell.base = 0;
642 adev->doorbell.size = 0;
643 adev->doorbell.num_doorbells = 0;
644 adev->doorbell.ptr = NULL;
645 return 0;
646 }
647
d6895ad3
CK
648 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
649 return -EINVAL;
650
22357775
AD
651 amdgpu_asic_init_doorbell_index(adev);
652
d38ceaf9
AD
653 /* doorbell bar mapping */
654 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
655 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
656
edf600da 657 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
9564f192 658 adev->doorbell_index.max_assignment+1);
d38ceaf9
AD
659 if (adev->doorbell.num_doorbells == 0)
660 return -EINVAL;
661
ec3db8a6 662 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
88dc26e4
OZ
663 * paging queue doorbell use the second page. The
664 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
665 * doorbells are in the first page. So with paging queue enabled,
666 * the max num_doorbells should + 1 page (0x400 in dword)
ec3db8a6
PY
667 */
668 if (adev->asic_type >= CHIP_VEGA10)
88dc26e4 669 adev->doorbell.num_doorbells += 0x400;
ec3db8a6 670
8972e5d2
CK
671 adev->doorbell.ptr = ioremap(adev->doorbell.base,
672 adev->doorbell.num_doorbells *
673 sizeof(u32));
674 if (adev->doorbell.ptr == NULL)
d38ceaf9 675 return -ENOMEM;
d38ceaf9
AD
676
677 return 0;
678}
679
680/**
06ec9070 681 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
d38ceaf9
AD
682 *
683 * @adev: amdgpu_device pointer
684 *
685 * Tear down doorbell driver information (CIK)
686 */
06ec9070 687static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
d38ceaf9
AD
688{
689 iounmap(adev->doorbell.ptr);
690 adev->doorbell.ptr = NULL;
691}
692
22cb0164 693
d38ceaf9
AD
694
695/*
06ec9070 696 * amdgpu_device_wb_*()
455a7bc2 697 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 698 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
699 */
700
701/**
06ec9070 702 * amdgpu_device_wb_fini - Disable Writeback and free memory
d38ceaf9
AD
703 *
704 * @adev: amdgpu_device pointer
705 *
706 * Disables Writeback and frees the Writeback memory (all asics).
707 * Used at driver shutdown.
708 */
06ec9070 709static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
d38ceaf9
AD
710{
711 if (adev->wb.wb_obj) {
a76ed485
AD
712 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
713 &adev->wb.gpu_addr,
714 (void **)&adev->wb.wb);
d38ceaf9
AD
715 adev->wb.wb_obj = NULL;
716 }
717}
718
719/**
06ec9070 720 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
d38ceaf9
AD
721 *
722 * @adev: amdgpu_device pointer
723 *
455a7bc2 724 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
725 * Used at driver startup.
726 * Returns 0 on success or an -error on failure.
727 */
06ec9070 728static int amdgpu_device_wb_init(struct amdgpu_device *adev)
d38ceaf9
AD
729{
730 int r;
731
732 if (adev->wb.wb_obj == NULL) {
97407b63
AD
733 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
734 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
735 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
736 &adev->wb.wb_obj, &adev->wb.gpu_addr,
737 (void **)&adev->wb.wb);
d38ceaf9
AD
738 if (r) {
739 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
740 return r;
741 }
d38ceaf9
AD
742
743 adev->wb.num_wb = AMDGPU_MAX_WB;
744 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
745
746 /* clear wb memory */
73469585 747 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
d38ceaf9
AD
748 }
749
750 return 0;
751}
752
753/**
131b4b36 754 * amdgpu_device_wb_get - Allocate a wb entry
d38ceaf9
AD
755 *
756 * @adev: amdgpu_device pointer
757 * @wb: wb index
758 *
759 * Allocate a wb slot for use by the driver (all asics).
760 * Returns 0 on success or -EINVAL on failure.
761 */
131b4b36 762int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
d38ceaf9
AD
763{
764 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 765
97407b63 766 if (offset < adev->wb.num_wb) {
7014285a 767 __set_bit(offset, adev->wb.used);
63ae07ca 768 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
769 return 0;
770 } else {
771 return -EINVAL;
772 }
773}
774
d38ceaf9 775/**
131b4b36 776 * amdgpu_device_wb_free - Free a wb entry
d38ceaf9
AD
777 *
778 * @adev: amdgpu_device pointer
779 * @wb: wb index
780 *
781 * Free a wb slot allocated for use by the driver (all asics)
782 */
131b4b36 783void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
d38ceaf9 784{
73469585 785 wb >>= 3;
d38ceaf9 786 if (wb < adev->wb.num_wb)
73469585 787 __clear_bit(wb, adev->wb.used);
d38ceaf9
AD
788}
789
d6895ad3
CK
790/**
791 * amdgpu_device_resize_fb_bar - try to resize FB BAR
792 *
793 * @adev: amdgpu_device pointer
794 *
795 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
796 * to fail, but if any of the BARs is not accessible after the size we abort
797 * driver loading by returning -ENODEV.
798 */
799int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
800{
770d13b1 801 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
d6895ad3 802 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
31b8adab
CK
803 struct pci_bus *root;
804 struct resource *res;
805 unsigned i;
d6895ad3
CK
806 u16 cmd;
807 int r;
808
0c03b912 809 /* Bypass for VF */
810 if (amdgpu_sriov_vf(adev))
811 return 0;
812
31b8adab
CK
813 /* Check if the root BUS has 64bit memory resources */
814 root = adev->pdev->bus;
815 while (root->parent)
816 root = root->parent;
817
818 pci_bus_for_each_resource(root, res, i) {
0ebb7c54 819 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
31b8adab
CK
820 res->start > 0x100000000ull)
821 break;
822 }
823
824 /* Trying to resize is pointless without a root hub window above 4GB */
825 if (!res)
826 return 0;
827
d6895ad3
CK
828 /* Disable memory decoding while we change the BAR addresses and size */
829 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
830 pci_write_config_word(adev->pdev, PCI_COMMAND,
831 cmd & ~PCI_COMMAND_MEMORY);
832
833 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
06ec9070 834 amdgpu_device_doorbell_fini(adev);
d6895ad3
CK
835 if (adev->asic_type >= CHIP_BONAIRE)
836 pci_release_resource(adev->pdev, 2);
837
838 pci_release_resource(adev->pdev, 0);
839
840 r = pci_resize_resource(adev->pdev, 0, rbar_size);
841 if (r == -ENOSPC)
842 DRM_INFO("Not enough PCI address space for a large BAR.");
843 else if (r && r != -ENOTSUPP)
844 DRM_ERROR("Problem resizing BAR0 (%d).", r);
845
846 pci_assign_unassigned_bus_resources(adev->pdev->bus);
847
848 /* When the doorbell or fb BAR isn't available we have no chance of
849 * using the device.
850 */
06ec9070 851 r = amdgpu_device_doorbell_init(adev);
d6895ad3
CK
852 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
853 return -ENODEV;
854
855 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
856
857 return 0;
858}
a05502e5 859
d38ceaf9
AD
860/*
861 * GPU helpers function.
862 */
863/**
39c640c0 864 * amdgpu_device_need_post - check if the hw need post or not
d38ceaf9
AD
865 *
866 * @adev: amdgpu_device pointer
867 *
c836fec5
JQ
868 * Check if the asic has been initialized (all asics) at driver startup
869 * or post is needed if hw reset is performed.
870 * Returns true if need or false if not.
d38ceaf9 871 */
39c640c0 872bool amdgpu_device_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
873{
874 uint32_t reg;
875
bec86378
ML
876 if (amdgpu_sriov_vf(adev))
877 return false;
878
879 if (amdgpu_passthrough(adev)) {
1da2c326
ML
880 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
881 * some old smc fw still need driver do vPost otherwise gpu hang, while
882 * those smc fw version above 22.15 doesn't have this flaw, so we force
883 * vpost executed for smc version below 22.15
bec86378
ML
884 */
885 if (adev->asic_type == CHIP_FIJI) {
886 int err;
887 uint32_t fw_ver;
888 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
889 /* force vPost if error occured */
890 if (err)
891 return true;
892
893 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
894 if (fw_ver < 0x00160e00)
895 return true;
bec86378 896 }
bec86378 897 }
91fe77eb 898
899 if (adev->has_hw_reset) {
900 adev->has_hw_reset = false;
901 return true;
902 }
903
904 /* bios scratch used on CIK+ */
905 if (adev->asic_type >= CHIP_BONAIRE)
906 return amdgpu_atombios_scratch_need_asic_init(adev);
907
908 /* check MEM_SIZE for older asics */
909 reg = amdgpu_asic_get_config_memsize(adev);
910
911 if ((reg != 0) && (reg != 0xffffffff))
912 return false;
913
914 return true;
bec86378
ML
915}
916
d38ceaf9
AD
917/* if we get transitioned to only one device, take VGA back */
918/**
06ec9070 919 * amdgpu_device_vga_set_decode - enable/disable vga decode
d38ceaf9
AD
920 *
921 * @cookie: amdgpu_device pointer
922 * @state: enable/disable vga decode
923 *
924 * Enable/disable vga decode (all asics).
925 * Returns VGA resource flags.
926 */
06ec9070 927static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
d38ceaf9
AD
928{
929 struct amdgpu_device *adev = cookie;
930 amdgpu_asic_set_vga_state(adev, state);
931 if (state)
932 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
933 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
934 else
935 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
936}
937
e3ecdffa
AD
938/**
939 * amdgpu_device_check_block_size - validate the vm block size
940 *
941 * @adev: amdgpu_device pointer
942 *
943 * Validates the vm block size specified via module parameter.
944 * The vm block size defines number of bits in page table versus page directory,
945 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
946 * page table and the remaining bits are in the page directory.
947 */
06ec9070 948static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
949{
950 /* defines number of bits in page table versus page directory,
951 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
952 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
953 if (amdgpu_vm_block_size == -1)
954 return;
a1adf8be 955
bab4fee7 956 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
957 dev_warn(adev->dev, "VM page table size (%d) too small\n",
958 amdgpu_vm_block_size);
97489129 959 amdgpu_vm_block_size = -1;
a1adf8be 960 }
a1adf8be
CZ
961}
962
e3ecdffa
AD
963/**
964 * amdgpu_device_check_vm_size - validate the vm size
965 *
966 * @adev: amdgpu_device pointer
967 *
968 * Validates the vm size in GB specified via module parameter.
969 * The VM size is the size of the GPU virtual memory space in GB.
970 */
06ec9070 971static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
83ca145d 972{
64dab074
AD
973 /* no need to check the default value */
974 if (amdgpu_vm_size == -1)
975 return;
976
83ca145d
ZJ
977 if (amdgpu_vm_size < 1) {
978 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
979 amdgpu_vm_size);
f3368128 980 amdgpu_vm_size = -1;
83ca145d 981 }
83ca145d
ZJ
982}
983
7951e376
RZ
984static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
985{
986 struct sysinfo si;
987 bool is_os_64 = (sizeof(void *) == 8) ? true : false;
988 uint64_t total_memory;
989 uint64_t dram_size_seven_GB = 0x1B8000000;
990 uint64_t dram_size_three_GB = 0xB8000000;
991
992 if (amdgpu_smu_memory_pool_size == 0)
993 return;
994
995 if (!is_os_64) {
996 DRM_WARN("Not 64-bit OS, feature not supported\n");
997 goto def_value;
998 }
999 si_meminfo(&si);
1000 total_memory = (uint64_t)si.totalram * si.mem_unit;
1001
1002 if ((amdgpu_smu_memory_pool_size == 1) ||
1003 (amdgpu_smu_memory_pool_size == 2)) {
1004 if (total_memory < dram_size_three_GB)
1005 goto def_value1;
1006 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1007 (amdgpu_smu_memory_pool_size == 8)) {
1008 if (total_memory < dram_size_seven_GB)
1009 goto def_value1;
1010 } else {
1011 DRM_WARN("Smu memory pool size not supported\n");
1012 goto def_value;
1013 }
1014 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1015
1016 return;
1017
1018def_value1:
1019 DRM_WARN("No enough system memory\n");
1020def_value:
1021 adev->pm.smu_prv_buffer_size = 0;
1022}
1023
d38ceaf9 1024/**
06ec9070 1025 * amdgpu_device_check_arguments - validate module params
d38ceaf9
AD
1026 *
1027 * @adev: amdgpu_device pointer
1028 *
1029 * Validates certain module parameters and updates
1030 * the associated values used by the driver (all asics).
1031 */
912dfc84 1032static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
d38ceaf9 1033{
912dfc84
EQ
1034 int ret = 0;
1035
5b011235
CZ
1036 if (amdgpu_sched_jobs < 4) {
1037 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1038 amdgpu_sched_jobs);
1039 amdgpu_sched_jobs = 4;
76117507 1040 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
1041 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1042 amdgpu_sched_jobs);
1043 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1044 }
d38ceaf9 1045
83e74db6 1046 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
1047 /* gart size must be greater or equal to 32M */
1048 dev_warn(adev->dev, "gart size (%d) too small\n",
1049 amdgpu_gart_size);
83e74db6 1050 amdgpu_gart_size = -1;
d38ceaf9
AD
1051 }
1052
36d38372 1053 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 1054 /* gtt size must be greater or equal to 32M */
36d38372
CK
1055 dev_warn(adev->dev, "gtt size (%d) too small\n",
1056 amdgpu_gtt_size);
1057 amdgpu_gtt_size = -1;
d38ceaf9
AD
1058 }
1059
d07f14be
RH
1060 /* valid range is between 4 and 9 inclusive */
1061 if (amdgpu_vm_fragment_size != -1 &&
1062 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1063 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1064 amdgpu_vm_fragment_size = -1;
1065 }
1066
7951e376
RZ
1067 amdgpu_device_check_smu_prv_buffer_size(adev);
1068
06ec9070 1069 amdgpu_device_check_vm_size(adev);
d38ceaf9 1070
06ec9070 1071 amdgpu_device_check_block_size(adev);
6a7f76e7 1072
19aede77 1073 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
912dfc84
EQ
1074
1075 return ret;
d38ceaf9
AD
1076}
1077
1078/**
1079 * amdgpu_switcheroo_set_state - set switcheroo state
1080 *
1081 * @pdev: pci dev pointer
1694467b 1082 * @state: vga_switcheroo state
d38ceaf9
AD
1083 *
1084 * Callback for the switcheroo driver. Suspends or resumes the
1085 * the asics before or after it is powered up using ACPI methods.
1086 */
1087static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1088{
1089 struct drm_device *dev = pci_get_drvdata(pdev);
1090
1091 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1092 return;
1093
1094 if (state == VGA_SWITCHEROO_ON) {
7ca85295 1095 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1096 /* don't suspend or resume card normally */
1097 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1098
810ddc3a 1099 amdgpu_device_resume(dev, true, true);
d38ceaf9 1100
d38ceaf9
AD
1101 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1102 drm_kms_helper_poll_enable(dev);
1103 } else {
7ca85295 1104 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1105 drm_kms_helper_poll_disable(dev);
1106 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1107 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1108 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1109 }
1110}
1111
1112/**
1113 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1114 *
1115 * @pdev: pci dev pointer
1116 *
1117 * Callback for the switcheroo driver. Check of the switcheroo
1118 * state can be changed.
1119 * Returns true if the state can be changed, false if not.
1120 */
1121static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1122{
1123 struct drm_device *dev = pci_get_drvdata(pdev);
1124
1125 /*
1126 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1127 * locking inversion with the driver load path. And the access here is
1128 * completely racy anyway. So don't bother with locking for now.
1129 */
1130 return dev->open_count == 0;
1131}
1132
1133static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1134 .set_gpu_state = amdgpu_switcheroo_set_state,
1135 .reprobe = NULL,
1136 .can_switch = amdgpu_switcheroo_can_switch,
1137};
1138
e3ecdffa
AD
1139/**
1140 * amdgpu_device_ip_set_clockgating_state - set the CG state
1141 *
87e3f136 1142 * @dev: amdgpu_device pointer
e3ecdffa
AD
1143 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1144 * @state: clockgating state (gate or ungate)
1145 *
1146 * Sets the requested clockgating state for all instances of
1147 * the hardware IP specified.
1148 * Returns the error code from the last instance.
1149 */
43fa561f 1150int amdgpu_device_ip_set_clockgating_state(void *dev,
2990a1fc
AD
1151 enum amd_ip_block_type block_type,
1152 enum amd_clockgating_state state)
d38ceaf9 1153{
43fa561f 1154 struct amdgpu_device *adev = dev;
d38ceaf9
AD
1155 int i, r = 0;
1156
1157 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1158 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1159 continue;
c722865a
RZ
1160 if (adev->ip_blocks[i].version->type != block_type)
1161 continue;
1162 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1163 continue;
1164 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1165 (void *)adev, state);
1166 if (r)
1167 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1168 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1169 }
1170 return r;
1171}
1172
e3ecdffa
AD
1173/**
1174 * amdgpu_device_ip_set_powergating_state - set the PG state
1175 *
87e3f136 1176 * @dev: amdgpu_device pointer
e3ecdffa
AD
1177 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1178 * @state: powergating state (gate or ungate)
1179 *
1180 * Sets the requested powergating state for all instances of
1181 * the hardware IP specified.
1182 * Returns the error code from the last instance.
1183 */
43fa561f 1184int amdgpu_device_ip_set_powergating_state(void *dev,
2990a1fc
AD
1185 enum amd_ip_block_type block_type,
1186 enum amd_powergating_state state)
d38ceaf9 1187{
43fa561f 1188 struct amdgpu_device *adev = dev;
d38ceaf9
AD
1189 int i, r = 0;
1190
1191 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1192 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1193 continue;
c722865a
RZ
1194 if (adev->ip_blocks[i].version->type != block_type)
1195 continue;
1196 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1197 continue;
1198 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1199 (void *)adev, state);
1200 if (r)
1201 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1202 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1203 }
1204 return r;
1205}
1206
e3ecdffa
AD
1207/**
1208 * amdgpu_device_ip_get_clockgating_state - get the CG state
1209 *
1210 * @adev: amdgpu_device pointer
1211 * @flags: clockgating feature flags
1212 *
1213 * Walks the list of IPs on the device and updates the clockgating
1214 * flags for each IP.
1215 * Updates @flags with the feature flags for each hardware IP where
1216 * clockgating is enabled.
1217 */
2990a1fc
AD
1218void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1219 u32 *flags)
6cb2d4e4
HR
1220{
1221 int i;
1222
1223 for (i = 0; i < adev->num_ip_blocks; i++) {
1224 if (!adev->ip_blocks[i].status.valid)
1225 continue;
1226 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1227 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1228 }
1229}
1230
e3ecdffa
AD
1231/**
1232 * amdgpu_device_ip_wait_for_idle - wait for idle
1233 *
1234 * @adev: amdgpu_device pointer
1235 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1236 *
1237 * Waits for the request hardware IP to be idle.
1238 * Returns 0 for success or a negative error code on failure.
1239 */
2990a1fc
AD
1240int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1241 enum amd_ip_block_type block_type)
5dbbb60b
AD
1242{
1243 int i, r;
1244
1245 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1246 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1247 continue;
a1255107
AD
1248 if (adev->ip_blocks[i].version->type == block_type) {
1249 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1250 if (r)
1251 return r;
1252 break;
1253 }
1254 }
1255 return 0;
1256
1257}
1258
e3ecdffa
AD
1259/**
1260 * amdgpu_device_ip_is_idle - is the hardware IP idle
1261 *
1262 * @adev: amdgpu_device pointer
1263 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1264 *
1265 * Check if the hardware IP is idle or not.
1266 * Returns true if it the IP is idle, false if not.
1267 */
2990a1fc
AD
1268bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1269 enum amd_ip_block_type block_type)
5dbbb60b
AD
1270{
1271 int i;
1272
1273 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1274 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1275 continue;
a1255107
AD
1276 if (adev->ip_blocks[i].version->type == block_type)
1277 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1278 }
1279 return true;
1280
1281}
1282
e3ecdffa
AD
1283/**
1284 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1285 *
1286 * @adev: amdgpu_device pointer
87e3f136 1287 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
e3ecdffa
AD
1288 *
1289 * Returns a pointer to the hardware IP block structure
1290 * if it exists for the asic, otherwise NULL.
1291 */
2990a1fc
AD
1292struct amdgpu_ip_block *
1293amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1294 enum amd_ip_block_type type)
d38ceaf9
AD
1295{
1296 int i;
1297
1298 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1299 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1300 return &adev->ip_blocks[i];
1301
1302 return NULL;
1303}
1304
1305/**
2990a1fc 1306 * amdgpu_device_ip_block_version_cmp
d38ceaf9
AD
1307 *
1308 * @adev: amdgpu_device pointer
5fc3aeeb 1309 * @type: enum amd_ip_block_type
d38ceaf9
AD
1310 * @major: major version
1311 * @minor: minor version
1312 *
1313 * return 0 if equal or greater
1314 * return 1 if smaller or the ip_block doesn't exist
1315 */
2990a1fc
AD
1316int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1317 enum amd_ip_block_type type,
1318 u32 major, u32 minor)
d38ceaf9 1319{
2990a1fc 1320 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
d38ceaf9 1321
a1255107
AD
1322 if (ip_block && ((ip_block->version->major > major) ||
1323 ((ip_block->version->major == major) &&
1324 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1325 return 0;
1326
1327 return 1;
1328}
1329
a1255107 1330/**
2990a1fc 1331 * amdgpu_device_ip_block_add
a1255107
AD
1332 *
1333 * @adev: amdgpu_device pointer
1334 * @ip_block_version: pointer to the IP to add
1335 *
1336 * Adds the IP block driver information to the collection of IPs
1337 * on the asic.
1338 */
2990a1fc
AD
1339int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1340 const struct amdgpu_ip_block_version *ip_block_version)
a1255107
AD
1341{
1342 if (!ip_block_version)
1343 return -EINVAL;
1344
e966a725 1345 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
a0bae357
HR
1346 ip_block_version->funcs->name);
1347
a1255107
AD
1348 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1349
1350 return 0;
1351}
1352
e3ecdffa
AD
1353/**
1354 * amdgpu_device_enable_virtual_display - enable virtual display feature
1355 *
1356 * @adev: amdgpu_device pointer
1357 *
1358 * Enabled the virtual display feature if the user has enabled it via
1359 * the module parameter virtual_display. This feature provides a virtual
1360 * display hardware on headless boards or in virtualized environments.
1361 * This function parses and validates the configuration string specified by
1362 * the user and configues the virtual display configuration (number of
1363 * virtual connectors, crtcs, etc.) specified.
1364 */
483ef985 1365static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1366{
1367 adev->enable_virtual_display = false;
1368
1369 if (amdgpu_virtual_display) {
1370 struct drm_device *ddev = adev->ddev;
1371 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1372 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1373
1374 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1375 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1376 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1377 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1378 if (!strcmp("all", pciaddname)
1379 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1380 long num_crtc;
1381 int res = -1;
1382
9accf2fd 1383 adev->enable_virtual_display = true;
0f66356d
ED
1384
1385 if (pciaddname_tmp)
1386 res = kstrtol(pciaddname_tmp, 10,
1387 &num_crtc);
1388
1389 if (!res) {
1390 if (num_crtc < 1)
1391 num_crtc = 1;
1392 if (num_crtc > 6)
1393 num_crtc = 6;
1394 adev->mode_info.num_crtc = num_crtc;
1395 } else {
1396 adev->mode_info.num_crtc = 1;
1397 }
9accf2fd
ED
1398 break;
1399 }
1400 }
1401
0f66356d
ED
1402 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1403 amdgpu_virtual_display, pci_address_name,
1404 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1405
1406 kfree(pciaddstr);
1407 }
1408}
1409
e3ecdffa
AD
1410/**
1411 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1412 *
1413 * @adev: amdgpu_device pointer
1414 *
1415 * Parses the asic configuration parameters specified in the gpu info
1416 * firmware and makes them availale to the driver for use in configuring
1417 * the asic.
1418 * Returns 0 on success, -EINVAL on failure.
1419 */
e2a75f88
AD
1420static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1421{
e2a75f88
AD
1422 const char *chip_name;
1423 char fw_name[30];
1424 int err;
1425 const struct gpu_info_firmware_header_v1_0 *hdr;
1426
ab4fe3e1
HR
1427 adev->firmware.gpu_info_fw = NULL;
1428
e2a75f88
AD
1429 switch (adev->asic_type) {
1430 case CHIP_TOPAZ:
1431 case CHIP_TONGA:
1432 case CHIP_FIJI:
e2a75f88 1433 case CHIP_POLARIS10:
cc07f18d 1434 case CHIP_POLARIS11:
e2a75f88 1435 case CHIP_POLARIS12:
cc07f18d 1436 case CHIP_VEGAM:
e2a75f88
AD
1437 case CHIP_CARRIZO:
1438 case CHIP_STONEY:
1439#ifdef CONFIG_DRM_AMDGPU_SI
1440 case CHIP_VERDE:
1441 case CHIP_TAHITI:
1442 case CHIP_PITCAIRN:
1443 case CHIP_OLAND:
1444 case CHIP_HAINAN:
1445#endif
1446#ifdef CONFIG_DRM_AMDGPU_CIK
1447 case CHIP_BONAIRE:
1448 case CHIP_HAWAII:
1449 case CHIP_KAVERI:
1450 case CHIP_KABINI:
1451 case CHIP_MULLINS:
1452#endif
27c0bc71 1453 case CHIP_VEGA20:
e2a75f88
AD
1454 default:
1455 return 0;
1456 case CHIP_VEGA10:
1457 chip_name = "vega10";
1458 break;
3f76dced
AD
1459 case CHIP_VEGA12:
1460 chip_name = "vega12";
1461 break;
2d2e5e7e 1462 case CHIP_RAVEN:
54c4d17e
FX
1463 if (adev->rev_id >= 8)
1464 chip_name = "raven2";
741deade
AD
1465 else if (adev->pdev->device == 0x15d8)
1466 chip_name = "picasso";
54c4d17e
FX
1467 else
1468 chip_name = "raven";
2d2e5e7e 1469 break;
65e60f6e
LM
1470 case CHIP_ARCTURUS:
1471 chip_name = "arcturus";
1472 break;
b51a26a0
HR
1473 case CHIP_RENOIR:
1474 chip_name = "renoir";
1475 break;
23c6268e
HR
1476 case CHIP_NAVI10:
1477 chip_name = "navi10";
1478 break;
ed42cfe1
XY
1479 case CHIP_NAVI14:
1480 chip_name = "navi14";
1481 break;
42b325e5
XY
1482 case CHIP_NAVI12:
1483 chip_name = "navi12";
1484 break;
e2a75f88
AD
1485 }
1486
1487 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1488 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1489 if (err) {
1490 dev_err(adev->dev,
1491 "Failed to load gpu_info firmware \"%s\"\n",
1492 fw_name);
1493 goto out;
1494 }
ab4fe3e1 1495 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1496 if (err) {
1497 dev_err(adev->dev,
1498 "Failed to validate gpu_info firmware \"%s\"\n",
1499 fw_name);
1500 goto out;
1501 }
1502
ab4fe3e1 1503 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1504 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1505
1506 switch (hdr->version_major) {
1507 case 1:
1508 {
1509 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1510 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1511 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1512
ec51d3fa
XY
1513 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
1514 goto parse_soc_bounding_box;
1515
b5ab16bf
AD
1516 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1517 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1518 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1519 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1520 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1521 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1522 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1523 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1524 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1525 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1526 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1527 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1528 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1529 adev->gfx.cu_info.max_waves_per_simd =
1530 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1531 adev->gfx.cu_info.max_scratch_slots_per_cu =
1532 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1533 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
48321c3d 1534 if (hdr->version_minor >= 1) {
35c2e910
HZ
1535 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1536 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1537 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1538 adev->gfx.config.num_sc_per_sh =
1539 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1540 adev->gfx.config.num_packer_per_sc =
1541 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1542 }
ec51d3fa
XY
1543
1544parse_soc_bounding_box:
ec51d3fa
XY
1545 /*
1546 * soc bounding box info is not integrated in disocovery table,
1547 * we always need to parse it from gpu info firmware.
1548 */
48321c3d
HW
1549 if (hdr->version_minor == 2) {
1550 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1551 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1552 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1553 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1554 }
e2a75f88
AD
1555 break;
1556 }
1557 default:
1558 dev_err(adev->dev,
1559 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1560 err = -EINVAL;
1561 goto out;
1562 }
1563out:
e2a75f88
AD
1564 return err;
1565}
1566
e3ecdffa
AD
1567/**
1568 * amdgpu_device_ip_early_init - run early init for hardware IPs
1569 *
1570 * @adev: amdgpu_device pointer
1571 *
1572 * Early initialization pass for hardware IPs. The hardware IPs that make
1573 * up each asic are discovered each IP's early_init callback is run. This
1574 * is the first stage in initializing the asic.
1575 * Returns 0 on success, negative error code on failure.
1576 */
06ec9070 1577static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
d38ceaf9 1578{
aaa36a97 1579 int i, r;
d38ceaf9 1580
483ef985 1581 amdgpu_device_enable_virtual_display(adev);
a6be7570 1582
d38ceaf9 1583 switch (adev->asic_type) {
aaa36a97
AD
1584 case CHIP_TOPAZ:
1585 case CHIP_TONGA:
48299f95 1586 case CHIP_FIJI:
2cc0c0b5 1587 case CHIP_POLARIS10:
32cc7e53 1588 case CHIP_POLARIS11:
c4642a47 1589 case CHIP_POLARIS12:
32cc7e53 1590 case CHIP_VEGAM:
aaa36a97 1591 case CHIP_CARRIZO:
39bb0c92
SL
1592 case CHIP_STONEY:
1593 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1594 adev->family = AMDGPU_FAMILY_CZ;
1595 else
1596 adev->family = AMDGPU_FAMILY_VI;
1597
1598 r = vi_set_ip_blocks(adev);
1599 if (r)
1600 return r;
1601 break;
33f34802
KW
1602#ifdef CONFIG_DRM_AMDGPU_SI
1603 case CHIP_VERDE:
1604 case CHIP_TAHITI:
1605 case CHIP_PITCAIRN:
1606 case CHIP_OLAND:
1607 case CHIP_HAINAN:
295d0daf 1608 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1609 r = si_set_ip_blocks(adev);
1610 if (r)
1611 return r;
1612 break;
1613#endif
a2e73f56
AD
1614#ifdef CONFIG_DRM_AMDGPU_CIK
1615 case CHIP_BONAIRE:
1616 case CHIP_HAWAII:
1617 case CHIP_KAVERI:
1618 case CHIP_KABINI:
1619 case CHIP_MULLINS:
1620 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1621 adev->family = AMDGPU_FAMILY_CI;
1622 else
1623 adev->family = AMDGPU_FAMILY_KV;
1624
1625 r = cik_set_ip_blocks(adev);
1626 if (r)
1627 return r;
1628 break;
1629#endif
e48a3cd9
AD
1630 case CHIP_VEGA10:
1631 case CHIP_VEGA12:
e4bd8170 1632 case CHIP_VEGA20:
e48a3cd9 1633 case CHIP_RAVEN:
61cf44c1 1634 case CHIP_ARCTURUS:
b51a26a0
HR
1635 case CHIP_RENOIR:
1636 if (adev->asic_type == CHIP_RAVEN ||
1637 adev->asic_type == CHIP_RENOIR)
2ca8a5d2
CZ
1638 adev->family = AMDGPU_FAMILY_RV;
1639 else
1640 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1641
1642 r = soc15_set_ip_blocks(adev);
1643 if (r)
1644 return r;
1645 break;
0a5b8c7b 1646 case CHIP_NAVI10:
7ecb5cd4 1647 case CHIP_NAVI14:
4808cf9c 1648 case CHIP_NAVI12:
0a5b8c7b
HR
1649 adev->family = AMDGPU_FAMILY_NV;
1650
1651 r = nv_set_ip_blocks(adev);
1652 if (r)
1653 return r;
1654 break;
d38ceaf9
AD
1655 default:
1656 /* FIXME: not supported yet */
1657 return -EINVAL;
1658 }
1659
e2a75f88
AD
1660 r = amdgpu_device_parse_gpu_info_fw(adev);
1661 if (r)
1662 return r;
1663
ec51d3fa
XY
1664 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
1665 amdgpu_discovery_get_gfx_info(adev);
1666
1884734a 1667 amdgpu_amdkfd_device_probe(adev);
1668
3149d9da
XY
1669 if (amdgpu_sriov_vf(adev)) {
1670 r = amdgpu_virt_request_full_gpu(adev, true);
1671 if (r)
5ffa61c1 1672 return -EAGAIN;
3149d9da
XY
1673 }
1674
3b94fb10 1675 adev->pm.pp_feature = amdgpu_pp_feature_mask;
a35ad98b 1676 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
00544006 1677 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
00f54b97 1678
d38ceaf9
AD
1679 for (i = 0; i < adev->num_ip_blocks; i++) {
1680 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1681 DRM_ERROR("disabled ip block: %d <%s>\n",
1682 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1683 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1684 } else {
a1255107
AD
1685 if (adev->ip_blocks[i].version->funcs->early_init) {
1686 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1687 if (r == -ENOENT) {
a1255107 1688 adev->ip_blocks[i].status.valid = false;
2c1a2784 1689 } else if (r) {
a1255107
AD
1690 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1691 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1692 return r;
2c1a2784 1693 } else {
a1255107 1694 adev->ip_blocks[i].status.valid = true;
2c1a2784 1695 }
974e6b64 1696 } else {
a1255107 1697 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1698 }
d38ceaf9 1699 }
21a249ca
AD
1700 /* get the vbios after the asic_funcs are set up */
1701 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
1702 /* Read BIOS */
1703 if (!amdgpu_get_bios(adev))
1704 return -EINVAL;
1705
1706 r = amdgpu_atombios_init(adev);
1707 if (r) {
1708 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1709 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
1710 return r;
1711 }
1712 }
d38ceaf9
AD
1713 }
1714
395d1fb9
NH
1715 adev->cg_flags &= amdgpu_cg_mask;
1716 adev->pg_flags &= amdgpu_pg_mask;
1717
d38ceaf9
AD
1718 return 0;
1719}
1720
0a4f2520
RZ
1721static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
1722{
1723 int i, r;
1724
1725 for (i = 0; i < adev->num_ip_blocks; i++) {
1726 if (!adev->ip_blocks[i].status.sw)
1727 continue;
1728 if (adev->ip_blocks[i].status.hw)
1729 continue;
1730 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2d11fd3f 1731 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
0a4f2520
RZ
1732 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
1733 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1734 if (r) {
1735 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1736 adev->ip_blocks[i].version->funcs->name, r);
1737 return r;
1738 }
1739 adev->ip_blocks[i].status.hw = true;
1740 }
1741 }
1742
1743 return 0;
1744}
1745
1746static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
1747{
1748 int i, r;
1749
1750 for (i = 0; i < adev->num_ip_blocks; i++) {
1751 if (!adev->ip_blocks[i].status.sw)
1752 continue;
1753 if (adev->ip_blocks[i].status.hw)
1754 continue;
1755 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1756 if (r) {
1757 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1758 adev->ip_blocks[i].version->funcs->name, r);
1759 return r;
1760 }
1761 adev->ip_blocks[i].status.hw = true;
1762 }
1763
1764 return 0;
1765}
1766
7a3e0bb2
RZ
1767static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1768{
1769 int r = 0;
1770 int i;
80f41f84 1771 uint32_t smu_version;
7a3e0bb2
RZ
1772
1773 if (adev->asic_type >= CHIP_VEGA10) {
1774 for (i = 0; i < adev->num_ip_blocks; i++) {
482f0e53
ML
1775 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
1776 continue;
1777
1778 /* no need to do the fw loading again if already done*/
1779 if (adev->ip_blocks[i].status.hw == true)
1780 break;
1781
1782 if (adev->in_gpu_reset || adev->in_suspend) {
1783 r = adev->ip_blocks[i].version->funcs->resume(adev);
1784 if (r) {
1785 DRM_ERROR("resume of IP block <%s> failed %d\n",
7a3e0bb2 1786 adev->ip_blocks[i].version->funcs->name, r);
482f0e53
ML
1787 return r;
1788 }
1789 } else {
1790 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1791 if (r) {
1792 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1793 adev->ip_blocks[i].version->funcs->name, r);
1794 return r;
7a3e0bb2 1795 }
7a3e0bb2 1796 }
482f0e53
ML
1797
1798 adev->ip_blocks[i].status.hw = true;
1799 break;
7a3e0bb2
RZ
1800 }
1801 }
482f0e53 1802
80f41f84 1803 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
7a3e0bb2 1804
80f41f84 1805 return r;
7a3e0bb2
RZ
1806}
1807
e3ecdffa
AD
1808/**
1809 * amdgpu_device_ip_init - run init for hardware IPs
1810 *
1811 * @adev: amdgpu_device pointer
1812 *
1813 * Main initialization pass for hardware IPs. The list of all the hardware
1814 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1815 * are run. sw_init initializes the software state associated with each IP
1816 * and hw_init initializes the hardware associated with each IP.
1817 * Returns 0 on success, negative error code on failure.
1818 */
06ec9070 1819static int amdgpu_device_ip_init(struct amdgpu_device *adev)
d38ceaf9
AD
1820{
1821 int i, r;
1822
c030f2e4 1823 r = amdgpu_ras_init(adev);
1824 if (r)
1825 return r;
1826
d38ceaf9 1827 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1828 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1829 continue;
a1255107 1830 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1831 if (r) {
a1255107
AD
1832 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1833 adev->ip_blocks[i].version->funcs->name, r);
72d3f592 1834 goto init_failed;
2c1a2784 1835 }
a1255107 1836 adev->ip_blocks[i].status.sw = true;
bfca0289 1837
d38ceaf9 1838 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1839 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
06ec9070 1840 r = amdgpu_device_vram_scratch_init(adev);
2c1a2784
AD
1841 if (r) {
1842 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
72d3f592 1843 goto init_failed;
2c1a2784 1844 }
a1255107 1845 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1846 if (r) {
1847 DRM_ERROR("hw_init %d failed %d\n", i, r);
72d3f592 1848 goto init_failed;
2c1a2784 1849 }
06ec9070 1850 r = amdgpu_device_wb_init(adev);
2c1a2784 1851 if (r) {
06ec9070 1852 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
72d3f592 1853 goto init_failed;
2c1a2784 1854 }
a1255107 1855 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1856
1857 /* right after GMC hw init, we create CSA */
f92d5c61 1858 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1e256e27
RZ
1859 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
1860 AMDGPU_GEM_DOMAIN_VRAM,
1861 AMDGPU_CSA_SIZE);
2493664f
ML
1862 if (r) {
1863 DRM_ERROR("allocate CSA failed %d\n", r);
72d3f592 1864 goto init_failed;
2493664f
ML
1865 }
1866 }
d38ceaf9
AD
1867 }
1868 }
1869
533aed27
AG
1870 r = amdgpu_ib_pool_init(adev);
1871 if (r) {
1872 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
1873 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
1874 goto init_failed;
1875 }
1876
c8963ea4
RZ
1877 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
1878 if (r)
72d3f592 1879 goto init_failed;
0a4f2520
RZ
1880
1881 r = amdgpu_device_ip_hw_init_phase1(adev);
1882 if (r)
72d3f592 1883 goto init_failed;
0a4f2520 1884
7a3e0bb2
RZ
1885 r = amdgpu_device_fw_loading(adev);
1886 if (r)
72d3f592 1887 goto init_failed;
7a3e0bb2 1888
0a4f2520
RZ
1889 r = amdgpu_device_ip_hw_init_phase2(adev);
1890 if (r)
72d3f592 1891 goto init_failed;
d38ceaf9 1892
121a2bc6
AG
1893 /*
1894 * retired pages will be loaded from eeprom and reserved here,
1895 * it should be called after amdgpu_device_ip_hw_init_phase2 since
1896 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
1897 * for I2C communication which only true at this point.
1898 * recovery_init may fail, but it can free all resources allocated by
1899 * itself and its failure should not stop amdgpu init process.
1900 *
1901 * Note: theoretically, this should be called before all vram allocations
1902 * to protect retired page from abusing
1903 */
1904 amdgpu_ras_recovery_init(adev);
1905
3e2e2ab5
HZ
1906 if (adev->gmc.xgmi.num_physical_nodes > 1)
1907 amdgpu_xgmi_add_device(adev);
1884734a 1908 amdgpu_amdkfd_device_init(adev);
c6332b97 1909
72d3f592 1910init_failed:
d3c117e5 1911 if (amdgpu_sriov_vf(adev)) {
72d3f592
ED
1912 if (!r)
1913 amdgpu_virt_init_data_exchange(adev);
c6332b97 1914 amdgpu_virt_release_full_gpu(adev, true);
d3c117e5 1915 }
c6332b97 1916
72d3f592 1917 return r;
d38ceaf9
AD
1918}
1919
e3ecdffa
AD
1920/**
1921 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1922 *
1923 * @adev: amdgpu_device pointer
1924 *
1925 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
1926 * this function before a GPU reset. If the value is retained after a
1927 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
1928 */
06ec9070 1929static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
0c49e0b8
CZ
1930{
1931 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1932}
1933
e3ecdffa
AD
1934/**
1935 * amdgpu_device_check_vram_lost - check if vram is valid
1936 *
1937 * @adev: amdgpu_device pointer
1938 *
1939 * Checks the reset magic value written to the gart pointer in VRAM.
1940 * The driver calls this after a GPU reset to see if the contents of
1941 * VRAM is lost or now.
1942 * returns true if vram is lost, false if not.
1943 */
06ec9070 1944static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
0c49e0b8
CZ
1945{
1946 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1947 AMDGPU_RESET_MAGIC_NUM);
1948}
1949
e3ecdffa 1950/**
1112a46b 1951 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
e3ecdffa
AD
1952 *
1953 * @adev: amdgpu_device pointer
b8b72130 1954 * @state: clockgating state (gate or ungate)
e3ecdffa 1955 *
e3ecdffa 1956 * The list of all the hardware IPs that make up the asic is walked and the
1112a46b
RZ
1957 * set_clockgating_state callbacks are run.
1958 * Late initialization pass enabling clockgating for hardware IPs.
1959 * Fini or suspend, pass disabling clockgating for hardware IPs.
e3ecdffa
AD
1960 * Returns 0 on success, negative error code on failure.
1961 */
fdd34271 1962
1112a46b
RZ
1963static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
1964 enum amd_clockgating_state state)
d38ceaf9 1965{
1112a46b 1966 int i, j, r;
d38ceaf9 1967
4a2ba394
SL
1968 if (amdgpu_emu_mode == 1)
1969 return 0;
1970
1112a46b
RZ
1971 for (j = 0; j < adev->num_ip_blocks; j++) {
1972 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
a2d31dc3 1973 if (!adev->ip_blocks[i].status.late_initialized)
d38ceaf9 1974 continue;
4a446d55 1975 /* skip CG for VCE/UVD, it's handled specially */
a1255107 1976 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
57716327 1977 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
34319b32 1978 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
52f2e779 1979 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
57716327 1980 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
4a446d55 1981 /* enable clockgating to save power */
a1255107 1982 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1112a46b 1983 state);
4a446d55
AD
1984 if (r) {
1985 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1986 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1987 return r;
1988 }
b0b00ff1 1989 }
d38ceaf9 1990 }
06b18f61 1991
c9f96fd5
RZ
1992 return 0;
1993}
1994
1112a46b 1995static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
c9f96fd5 1996{
1112a46b 1997 int i, j, r;
06b18f61 1998
c9f96fd5
RZ
1999 if (amdgpu_emu_mode == 1)
2000 return 0;
2001
1112a46b
RZ
2002 for (j = 0; j < adev->num_ip_blocks; j++) {
2003 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
a2d31dc3 2004 if (!adev->ip_blocks[i].status.late_initialized)
c9f96fd5
RZ
2005 continue;
2006 /* skip CG for VCE/UVD, it's handled specially */
2007 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2008 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2009 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
52f2e779 2010 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
c9f96fd5
RZ
2011 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2012 /* enable powergating to save power */
2013 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1112a46b 2014 state);
c9f96fd5
RZ
2015 if (r) {
2016 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2017 adev->ip_blocks[i].version->funcs->name, r);
2018 return r;
2019 }
2020 }
2021 }
2dc80b00
S
2022 return 0;
2023}
2024
beff74bc
AD
2025static int amdgpu_device_enable_mgpu_fan_boost(void)
2026{
2027 struct amdgpu_gpu_instance *gpu_ins;
2028 struct amdgpu_device *adev;
2029 int i, ret = 0;
2030
2031 mutex_lock(&mgpu_info.mutex);
2032
2033 /*
2034 * MGPU fan boost feature should be enabled
2035 * only when there are two or more dGPUs in
2036 * the system
2037 */
2038 if (mgpu_info.num_dgpu < 2)
2039 goto out;
2040
2041 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2042 gpu_ins = &(mgpu_info.gpu_ins[i]);
2043 adev = gpu_ins->adev;
2044 if (!(adev->flags & AMD_IS_APU) &&
2045 !gpu_ins->mgpu_fan_enabled &&
2046 adev->powerplay.pp_funcs &&
2047 adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
2048 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2049 if (ret)
2050 break;
2051
2052 gpu_ins->mgpu_fan_enabled = 1;
2053 }
2054 }
2055
2056out:
2057 mutex_unlock(&mgpu_info.mutex);
2058
2059 return ret;
2060}
2061
e3ecdffa
AD
2062/**
2063 * amdgpu_device_ip_late_init - run late init for hardware IPs
2064 *
2065 * @adev: amdgpu_device pointer
2066 *
2067 * Late initialization pass for hardware IPs. The list of all the hardware
2068 * IPs that make up the asic is walked and the late_init callbacks are run.
2069 * late_init covers any special initialization that an IP requires
2070 * after all of the have been initialized or something that needs to happen
2071 * late in the init process.
2072 * Returns 0 on success, negative error code on failure.
2073 */
06ec9070 2074static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2dc80b00 2075{
60599a03 2076 struct amdgpu_gpu_instance *gpu_instance;
2dc80b00
S
2077 int i = 0, r;
2078
2079 for (i = 0; i < adev->num_ip_blocks; i++) {
73f847db 2080 if (!adev->ip_blocks[i].status.hw)
2dc80b00
S
2081 continue;
2082 if (adev->ip_blocks[i].version->funcs->late_init) {
2083 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2084 if (r) {
2085 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2086 adev->ip_blocks[i].version->funcs->name, r);
2087 return r;
2088 }
2dc80b00 2089 }
73f847db 2090 adev->ip_blocks[i].status.late_initialized = true;
2dc80b00
S
2091 }
2092
1112a46b
RZ
2093 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2094 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
916ac57f 2095
06ec9070 2096 amdgpu_device_fill_reset_magic(adev);
d38ceaf9 2097
beff74bc
AD
2098 r = amdgpu_device_enable_mgpu_fan_boost();
2099 if (r)
2100 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2101
60599a03
EQ
2102
2103 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2104 mutex_lock(&mgpu_info.mutex);
2105
2106 /*
2107 * Reset device p-state to low as this was booted with high.
2108 *
2109 * This should be performed only after all devices from the same
2110 * hive get initialized.
2111 *
2112 * However, it's unknown how many device in the hive in advance.
2113 * As this is counted one by one during devices initializations.
2114 *
2115 * So, we wait for all XGMI interlinked devices initialized.
2116 * This may bring some delays as those devices may come from
2117 * different hives. But that should be OK.
2118 */
2119 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2120 for (i = 0; i < mgpu_info.num_gpu; i++) {
2121 gpu_instance = &(mgpu_info.gpu_ins[i]);
2122 if (gpu_instance->adev->flags & AMD_IS_APU)
2123 continue;
2124
2125 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0);
2126 if (r) {
2127 DRM_ERROR("pstate setting failed (%d).\n", r);
2128 break;
2129 }
2130 }
2131 }
2132
2133 mutex_unlock(&mgpu_info.mutex);
2134 }
2135
d38ceaf9
AD
2136 return 0;
2137}
2138
e3ecdffa
AD
2139/**
2140 * amdgpu_device_ip_fini - run fini for hardware IPs
2141 *
2142 * @adev: amdgpu_device pointer
2143 *
2144 * Main teardown pass for hardware IPs. The list of all the hardware
2145 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2146 * are run. hw_fini tears down the hardware associated with each IP
2147 * and sw_fini tears down any software state associated with each IP.
2148 * Returns 0 on success, negative error code on failure.
2149 */
06ec9070 2150static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
d38ceaf9
AD
2151{
2152 int i, r;
2153
c030f2e4 2154 amdgpu_ras_pre_fini(adev);
2155
a82400b5
AG
2156 if (adev->gmc.xgmi.num_physical_nodes > 1)
2157 amdgpu_xgmi_remove_device(adev);
2158
1884734a 2159 amdgpu_amdkfd_device_fini(adev);
05df1f01
RZ
2160
2161 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
fdd34271
RZ
2162 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2163
3e96dbfd
AD
2164 /* need to disable SMC first */
2165 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2166 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 2167 continue;
fdd34271 2168 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
a1255107 2169 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
2170 /* XXX handle errors */
2171 if (r) {
2172 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 2173 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 2174 }
a1255107 2175 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
2176 break;
2177 }
2178 }
2179
d38ceaf9 2180 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2181 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 2182 continue;
8201a67a 2183
a1255107 2184 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 2185 /* XXX handle errors */
2c1a2784 2186 if (r) {
a1255107
AD
2187 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2188 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 2189 }
8201a67a 2190
a1255107 2191 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
2192 }
2193
9950cda2 2194
d38ceaf9 2195 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2196 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 2197 continue;
c12aba3a
ML
2198
2199 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
c8963ea4 2200 amdgpu_ucode_free_bo(adev);
1e256e27 2201 amdgpu_free_static_csa(&adev->virt.csa_obj);
c12aba3a
ML
2202 amdgpu_device_wb_fini(adev);
2203 amdgpu_device_vram_scratch_fini(adev);
533aed27 2204 amdgpu_ib_pool_fini(adev);
c12aba3a
ML
2205 }
2206
a1255107 2207 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 2208 /* XXX handle errors */
2c1a2784 2209 if (r) {
a1255107
AD
2210 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2211 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 2212 }
a1255107
AD
2213 adev->ip_blocks[i].status.sw = false;
2214 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
2215 }
2216
a6dcfd9c 2217 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2218 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 2219 continue;
a1255107
AD
2220 if (adev->ip_blocks[i].version->funcs->late_fini)
2221 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2222 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
2223 }
2224
c030f2e4 2225 amdgpu_ras_fini(adev);
2226
030308fc 2227 if (amdgpu_sriov_vf(adev))
24136135
ML
2228 if (amdgpu_virt_release_full_gpu(adev, false))
2229 DRM_ERROR("failed to release exclusive mode on fini\n");
2493664f 2230
d38ceaf9
AD
2231 return 0;
2232}
2233
e3ecdffa 2234/**
beff74bc 2235 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
e3ecdffa 2236 *
1112a46b 2237 * @work: work_struct.
e3ecdffa 2238 */
beff74bc 2239static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2dc80b00
S
2240{
2241 struct amdgpu_device *adev =
beff74bc 2242 container_of(work, struct amdgpu_device, delayed_init_work.work);
916ac57f
RZ
2243 int r;
2244
2245 r = amdgpu_ib_ring_tests(adev);
2246 if (r)
2247 DRM_ERROR("ib ring test failed (%d).\n", r);
2dc80b00
S
2248}
2249
1e317b99
RZ
2250static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2251{
2252 struct amdgpu_device *adev =
2253 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2254
2255 mutex_lock(&adev->gfx.gfx_off_mutex);
2256 if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2257 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2258 adev->gfx.gfx_off_state = true;
2259 }
2260 mutex_unlock(&adev->gfx.gfx_off_mutex);
2261}
2262
e3ecdffa 2263/**
e7854a03 2264 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
e3ecdffa
AD
2265 *
2266 * @adev: amdgpu_device pointer
2267 *
2268 * Main suspend function for hardware IPs. The list of all the hardware
2269 * IPs that make up the asic is walked, clockgating is disabled and the
2270 * suspend callbacks are run. suspend puts the hardware and software state
2271 * in each IP into a state suitable for suspend.
2272 * Returns 0 on success, negative error code on failure.
2273 */
e7854a03
AD
2274static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2275{
2276 int i, r;
2277
05df1f01 2278 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
fdd34271 2279 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
05df1f01 2280
e7854a03
AD
2281 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2282 if (!adev->ip_blocks[i].status.valid)
2283 continue;
2284 /* displays are handled separately */
2285 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
e7854a03
AD
2286 /* XXX handle errors */
2287 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2288 /* XXX handle errors */
2289 if (r) {
2290 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2291 adev->ip_blocks[i].version->funcs->name, r);
482f0e53 2292 return r;
e7854a03 2293 }
482f0e53 2294 adev->ip_blocks[i].status.hw = false;
e7854a03
AD
2295 }
2296 }
2297
e7854a03
AD
2298 return 0;
2299}
2300
2301/**
2302 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2303 *
2304 * @adev: amdgpu_device pointer
2305 *
2306 * Main suspend function for hardware IPs. The list of all the hardware
2307 * IPs that make up the asic is walked, clockgating is disabled and the
2308 * suspend callbacks are run. suspend puts the hardware and software state
2309 * in each IP into a state suitable for suspend.
2310 * Returns 0 on success, negative error code on failure.
2311 */
2312static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2313{
2314 int i, r;
2315
2316 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2317 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2318 continue;
e7854a03
AD
2319 /* displays are handled in phase1 */
2320 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2321 continue;
bff77e86
LM
2322 /* PSP lost connection when err_event_athub occurs */
2323 if (amdgpu_ras_intr_triggered() &&
2324 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2325 adev->ip_blocks[i].status.hw = false;
2326 continue;
2327 }
d38ceaf9 2328 /* XXX handle errors */
a1255107 2329 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 2330 /* XXX handle errors */
2c1a2784 2331 if (r) {
a1255107
AD
2332 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2333 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 2334 }
876923fb 2335 adev->ip_blocks[i].status.hw = false;
a3a09142
AD
2336 /* handle putting the SMC in the appropriate state */
2337 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2338 if (is_support_sw_smu(adev)) {
0e0b89c0 2339 r = smu_set_mp1_state(&adev->smu, adev->mp1_state);
a3a09142 2340 } else if (adev->powerplay.pp_funcs &&
482f0e53 2341 adev->powerplay.pp_funcs->set_mp1_state) {
a3a09142
AD
2342 r = adev->powerplay.pp_funcs->set_mp1_state(
2343 adev->powerplay.pp_handle,
2344 adev->mp1_state);
0e0b89c0
EQ
2345 }
2346 if (r) {
2347 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2348 adev->mp1_state, r);
2349 return r;
a3a09142
AD
2350 }
2351 }
b5507c7e
AG
2352
2353 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
2354 }
2355
2356 return 0;
2357}
2358
e7854a03
AD
2359/**
2360 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2361 *
2362 * @adev: amdgpu_device pointer
2363 *
2364 * Main suspend function for hardware IPs. The list of all the hardware
2365 * IPs that make up the asic is walked, clockgating is disabled and the
2366 * suspend callbacks are run. suspend puts the hardware and software state
2367 * in each IP into a state suitable for suspend.
2368 * Returns 0 on success, negative error code on failure.
2369 */
2370int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2371{
2372 int r;
2373
e7819644
YT
2374 if (amdgpu_sriov_vf(adev))
2375 amdgpu_virt_request_full_gpu(adev, false);
2376
e7854a03
AD
2377 r = amdgpu_device_ip_suspend_phase1(adev);
2378 if (r)
2379 return r;
2380 r = amdgpu_device_ip_suspend_phase2(adev);
2381
e7819644
YT
2382 if (amdgpu_sriov_vf(adev))
2383 amdgpu_virt_release_full_gpu(adev, false);
2384
e7854a03
AD
2385 return r;
2386}
2387
06ec9070 2388static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
2389{
2390 int i, r;
2391
2cb681b6
ML
2392 static enum amd_ip_block_type ip_order[] = {
2393 AMD_IP_BLOCK_TYPE_GMC,
2394 AMD_IP_BLOCK_TYPE_COMMON,
39186aef 2395 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
2396 AMD_IP_BLOCK_TYPE_IH,
2397 };
a90ad3c2 2398
2cb681b6
ML
2399 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2400 int j;
2401 struct amdgpu_ip_block *block;
a90ad3c2 2402
2cb681b6
ML
2403 for (j = 0; j < adev->num_ip_blocks; j++) {
2404 block = &adev->ip_blocks[j];
2405
482f0e53 2406 block->status.hw = false;
2cb681b6
ML
2407 if (block->version->type != ip_order[i] ||
2408 !block->status.valid)
2409 continue;
2410
2411 r = block->version->funcs->hw_init(adev);
0aaeefcc 2412 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
c41d1cf6
ML
2413 if (r)
2414 return r;
482f0e53 2415 block->status.hw = true;
a90ad3c2
ML
2416 }
2417 }
2418
2419 return 0;
2420}
2421
06ec9070 2422static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
2423{
2424 int i, r;
2425
2cb681b6
ML
2426 static enum amd_ip_block_type ip_order[] = {
2427 AMD_IP_BLOCK_TYPE_SMC,
2428 AMD_IP_BLOCK_TYPE_DCE,
2429 AMD_IP_BLOCK_TYPE_GFX,
2430 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
2431 AMD_IP_BLOCK_TYPE_UVD,
2432 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 2433 };
a90ad3c2 2434
2cb681b6
ML
2435 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2436 int j;
2437 struct amdgpu_ip_block *block;
a90ad3c2 2438
2cb681b6
ML
2439 for (j = 0; j < adev->num_ip_blocks; j++) {
2440 block = &adev->ip_blocks[j];
2441
2442 if (block->version->type != ip_order[i] ||
482f0e53
ML
2443 !block->status.valid ||
2444 block->status.hw)
2cb681b6
ML
2445 continue;
2446
2447 r = block->version->funcs->hw_init(adev);
0aaeefcc 2448 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
c41d1cf6
ML
2449 if (r)
2450 return r;
482f0e53 2451 block->status.hw = true;
a90ad3c2
ML
2452 }
2453 }
2454
2455 return 0;
2456}
2457
e3ecdffa
AD
2458/**
2459 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2460 *
2461 * @adev: amdgpu_device pointer
2462 *
2463 * First resume function for hardware IPs. The list of all the hardware
2464 * IPs that make up the asic is walked and the resume callbacks are run for
2465 * COMMON, GMC, and IH. resume puts the hardware into a functional state
2466 * after a suspend and updates the software state as necessary. This
2467 * function is also used for restoring the GPU after a GPU reset.
2468 * Returns 0 on success, negative error code on failure.
2469 */
06ec9070 2470static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
2471{
2472 int i, r;
2473
a90ad3c2 2474 for (i = 0; i < adev->num_ip_blocks; i++) {
482f0e53 2475 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
a90ad3c2 2476 continue;
a90ad3c2 2477 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa
AD
2478 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2479 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
482f0e53 2480
fcf0649f
CZ
2481 r = adev->ip_blocks[i].version->funcs->resume(adev);
2482 if (r) {
2483 DRM_ERROR("resume of IP block <%s> failed %d\n",
2484 adev->ip_blocks[i].version->funcs->name, r);
2485 return r;
2486 }
482f0e53 2487 adev->ip_blocks[i].status.hw = true;
a90ad3c2
ML
2488 }
2489 }
2490
2491 return 0;
2492}
2493
e3ecdffa
AD
2494/**
2495 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2496 *
2497 * @adev: amdgpu_device pointer
2498 *
2499 * First resume function for hardware IPs. The list of all the hardware
2500 * IPs that make up the asic is walked and the resume callbacks are run for
2501 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
2502 * functional state after a suspend and updates the software state as
2503 * necessary. This function is also used for restoring the GPU after a GPU
2504 * reset.
2505 * Returns 0 on success, negative error code on failure.
2506 */
06ec9070 2507static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2508{
2509 int i, r;
2510
2511 for (i = 0; i < adev->num_ip_blocks; i++) {
482f0e53 2512 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
d38ceaf9 2513 continue;
fcf0649f 2514 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa 2515 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
7a3e0bb2
RZ
2516 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2517 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
fcf0649f 2518 continue;
a1255107 2519 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 2520 if (r) {
a1255107
AD
2521 DRM_ERROR("resume of IP block <%s> failed %d\n",
2522 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 2523 return r;
2c1a2784 2524 }
482f0e53 2525 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
2526 }
2527
2528 return 0;
2529}
2530
e3ecdffa
AD
2531/**
2532 * amdgpu_device_ip_resume - run resume for hardware IPs
2533 *
2534 * @adev: amdgpu_device pointer
2535 *
2536 * Main resume function for hardware IPs. The hardware IPs
2537 * are split into two resume functions because they are
2538 * are also used in in recovering from a GPU reset and some additional
2539 * steps need to be take between them. In this case (S3/S4) they are
2540 * run sequentially.
2541 * Returns 0 on success, negative error code on failure.
2542 */
06ec9070 2543static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
fcf0649f
CZ
2544{
2545 int r;
2546
06ec9070 2547 r = amdgpu_device_ip_resume_phase1(adev);
fcf0649f
CZ
2548 if (r)
2549 return r;
7a3e0bb2
RZ
2550
2551 r = amdgpu_device_fw_loading(adev);
2552 if (r)
2553 return r;
2554
06ec9070 2555 r = amdgpu_device_ip_resume_phase2(adev);
fcf0649f
CZ
2556
2557 return r;
2558}
2559
e3ecdffa
AD
2560/**
2561 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2562 *
2563 * @adev: amdgpu_device pointer
2564 *
2565 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2566 */
4e99a44e 2567static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 2568{
6867e1b5
ML
2569 if (amdgpu_sriov_vf(adev)) {
2570 if (adev->is_atom_fw) {
2571 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2572 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2573 } else {
2574 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2575 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2576 }
2577
2578 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2579 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 2580 }
048765ad
AR
2581}
2582
e3ecdffa
AD
2583/**
2584 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2585 *
2586 * @asic_type: AMD asic type
2587 *
2588 * Check if there is DC (new modesetting infrastructre) support for an asic.
2589 * returns true if DC has support, false if not.
2590 */
4562236b
HW
2591bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2592{
2593 switch (asic_type) {
2594#if defined(CONFIG_DRM_AMD_DC)
2595 case CHIP_BONAIRE:
0d6fbccb 2596 case CHIP_KAVERI:
367e6687
AD
2597 case CHIP_KABINI:
2598 case CHIP_MULLINS:
d9fda248
HW
2599 /*
2600 * We have systems in the wild with these ASICs that require
2601 * LVDS and VGA support which is not supported with DC.
2602 *
2603 * Fallback to the non-DC driver here by default so as not to
2604 * cause regressions.
2605 */
2606 return amdgpu_dc > 0;
2607 case CHIP_HAWAII:
4562236b
HW
2608 case CHIP_CARRIZO:
2609 case CHIP_STONEY:
4562236b 2610 case CHIP_POLARIS10:
675fd32b 2611 case CHIP_POLARIS11:
2c8ad2d5 2612 case CHIP_POLARIS12:
675fd32b 2613 case CHIP_VEGAM:
4562236b
HW
2614 case CHIP_TONGA:
2615 case CHIP_FIJI:
42f8ffa1 2616 case CHIP_VEGA10:
dca7b401 2617 case CHIP_VEGA12:
c6034aa2 2618 case CHIP_VEGA20:
b86a1aa3 2619#if defined(CONFIG_DRM_AMD_DC_DCN)
fd187853 2620 case CHIP_RAVEN:
b4f199c7 2621 case CHIP_NAVI10:
8fceceb6 2622 case CHIP_NAVI14:
078655d9 2623 case CHIP_NAVI12:
e1c14c43 2624 case CHIP_RENOIR:
42f8ffa1 2625#endif
fd187853 2626 return amdgpu_dc != 0;
4562236b
HW
2627#endif
2628 default:
2629 return false;
2630 }
2631}
2632
2633/**
2634 * amdgpu_device_has_dc_support - check if dc is supported
2635 *
2636 * @adev: amdgpu_device_pointer
2637 *
2638 * Returns true for supported, false for not supported
2639 */
2640bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2641{
2555039d
XY
2642 if (amdgpu_sriov_vf(adev))
2643 return false;
2644
4562236b
HW
2645 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2646}
2647
d4535e2c
AG
2648
2649static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
2650{
2651 struct amdgpu_device *adev =
2652 container_of(__work, struct amdgpu_device, xgmi_reset_work);
2653
2654 adev->asic_reset_res = amdgpu_asic_reset(adev);
2655 if (adev->asic_reset_res)
fed184e9 2656 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
d4535e2c
AG
2657 adev->asic_reset_res, adev->ddev->unique);
2658}
2659
71f98027
AD
2660static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
2661{
2662 char *input = amdgpu_lockup_timeout;
2663 char *timeout_setting = NULL;
2664 int index = 0;
2665 long timeout;
2666 int ret = 0;
2667
2668 /*
2669 * By default timeout for non compute jobs is 10000.
2670 * And there is no timeout enforced on compute jobs.
2671 * In SR-IOV or passthrough mode, timeout for compute
2672 * jobs are 10000 by default.
2673 */
2674 adev->gfx_timeout = msecs_to_jiffies(10000);
2675 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
2676 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
2677 adev->compute_timeout = adev->gfx_timeout;
2678 else
2679 adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
2680
f440ff44 2681 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
71f98027 2682 while ((timeout_setting = strsep(&input, ",")) &&
f440ff44 2683 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
71f98027
AD
2684 ret = kstrtol(timeout_setting, 0, &timeout);
2685 if (ret)
2686 return ret;
2687
2688 if (timeout == 0) {
2689 index++;
2690 continue;
2691 } else if (timeout < 0) {
2692 timeout = MAX_SCHEDULE_TIMEOUT;
2693 } else {
2694 timeout = msecs_to_jiffies(timeout);
2695 }
2696
2697 switch (index++) {
2698 case 0:
2699 adev->gfx_timeout = timeout;
2700 break;
2701 case 1:
2702 adev->compute_timeout = timeout;
2703 break;
2704 case 2:
2705 adev->sdma_timeout = timeout;
2706 break;
2707 case 3:
2708 adev->video_timeout = timeout;
2709 break;
2710 default:
2711 break;
2712 }
2713 }
2714 /*
2715 * There is only one value specified and
2716 * it should apply to all non-compute jobs.
2717 */
bcccee89 2718 if (index == 1) {
71f98027 2719 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
bcccee89
ED
2720 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
2721 adev->compute_timeout = adev->gfx_timeout;
2722 }
71f98027
AD
2723 }
2724
2725 return ret;
2726}
d4535e2c 2727
d38ceaf9
AD
2728/**
2729 * amdgpu_device_init - initialize the driver
2730 *
2731 * @adev: amdgpu_device pointer
87e3f136 2732 * @ddev: drm dev pointer
d38ceaf9
AD
2733 * @pdev: pci dev pointer
2734 * @flags: driver flags
2735 *
2736 * Initializes the driver info and hw (all asics).
2737 * Returns 0 for success or an error on failure.
2738 * Called at driver startup.
2739 */
2740int amdgpu_device_init(struct amdgpu_device *adev,
2741 struct drm_device *ddev,
2742 struct pci_dev *pdev,
2743 uint32_t flags)
2744{
2745 int r, i;
2746 bool runtime = false;
95844d20 2747 u32 max_MBps;
d38ceaf9
AD
2748
2749 adev->shutdown = false;
2750 adev->dev = &pdev->dev;
2751 adev->ddev = ddev;
2752 adev->pdev = pdev;
2753 adev->flags = flags;
4e66d7d2
YZ
2754
2755 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
2756 adev->asic_type = amdgpu_force_asic_type;
2757 else
2758 adev->asic_type = flags & AMD_ASIC_MASK;
2759
d38ceaf9 2760 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
593aa2d2
SL
2761 if (amdgpu_emu_mode == 1)
2762 adev->usec_timeout *= 2;
770d13b1 2763 adev->gmc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
2764 adev->accel_working = false;
2765 adev->num_rings = 0;
2766 adev->mman.buffer_funcs = NULL;
2767 adev->mman.buffer_funcs_ring = NULL;
2768 adev->vm_manager.vm_pte_funcs = NULL;
3798e9a6 2769 adev->vm_manager.vm_pte_num_rqs = 0;
132f34e4 2770 adev->gmc.gmc_funcs = NULL;
f54d1867 2771 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 2772 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
2773
2774 adev->smc_rreg = &amdgpu_invalid_rreg;
2775 adev->smc_wreg = &amdgpu_invalid_wreg;
2776 adev->pcie_rreg = &amdgpu_invalid_rreg;
2777 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
2778 adev->pciep_rreg = &amdgpu_invalid_rreg;
2779 adev->pciep_wreg = &amdgpu_invalid_wreg;
4fa1c6a6
TZ
2780 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
2781 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
d38ceaf9
AD
2782 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2783 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2784 adev->didt_rreg = &amdgpu_invalid_rreg;
2785 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
2786 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2787 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2788 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2789 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2790
3e39ab90
AD
2791 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2792 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2793 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
2794
2795 /* mutex initialization are all done here so we
2796 * can recall function without having locking issues */
d38ceaf9 2797 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 2798 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
2799 mutex_init(&adev->pm.mutex);
2800 mutex_init(&adev->gfx.gpu_clock_mutex);
2801 mutex_init(&adev->srbm_mutex);
b8866c26 2802 mutex_init(&adev->gfx.pipe_reserve_mutex);
d23ee13f 2803 mutex_init(&adev->gfx.gfx_off_mutex);
d38ceaf9 2804 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 2805 mutex_init(&adev->mn_lock);
e23b74aa 2806 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 2807 hash_init(adev->mn_hash);
13a752e3 2808 mutex_init(&adev->lock_reset);
bb5a2bdf 2809 mutex_init(&adev->virt.dpm_mutex);
32eaeae0 2810 mutex_init(&adev->psp.mutex);
d38ceaf9 2811
912dfc84
EQ
2812 r = amdgpu_device_check_arguments(adev);
2813 if (r)
2814 return r;
d38ceaf9 2815
d38ceaf9
AD
2816 spin_lock_init(&adev->mmio_idx_lock);
2817 spin_lock_init(&adev->smc_idx_lock);
2818 spin_lock_init(&adev->pcie_idx_lock);
2819 spin_lock_init(&adev->uvd_ctx_idx_lock);
2820 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 2821 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 2822 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 2823 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 2824 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 2825
0c4e7fa5
CZ
2826 INIT_LIST_HEAD(&adev->shadow_list);
2827 mutex_init(&adev->shadow_list_lock);
2828
795f2813
AR
2829 INIT_LIST_HEAD(&adev->ring_lru_list);
2830 spin_lock_init(&adev->ring_lru_list_lock);
2831
beff74bc
AD
2832 INIT_DELAYED_WORK(&adev->delayed_init_work,
2833 amdgpu_device_delayed_init_work_handler);
1e317b99
RZ
2834 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
2835 amdgpu_device_delay_enable_gfx_off);
2dc80b00 2836
d4535e2c
AG
2837 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
2838
d23ee13f 2839 adev->gfx.gfx_off_req_count = 1;
b1ddf548
RZ
2840 adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
2841
0fa49558
AX
2842 /* Registers mapping */
2843 /* TODO: block userspace mapping of io register */
da69c161
KW
2844 if (adev->asic_type >= CHIP_BONAIRE) {
2845 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2846 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2847 } else {
2848 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2849 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2850 }
d38ceaf9 2851
d38ceaf9
AD
2852 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2853 if (adev->rmmio == NULL) {
2854 return -ENOMEM;
2855 }
2856 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2857 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2858
d38ceaf9
AD
2859 /* io port mapping */
2860 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2861 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2862 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2863 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2864 break;
2865 }
2866 }
2867 if (adev->rio_mem == NULL)
b64a18c5 2868 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9 2869
b2109d8e
JX
2870 /* enable PCIE atomic ops */
2871 r = pci_enable_atomic_ops_to_root(adev->pdev,
2872 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
2873 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
2874 if (r) {
2875 adev->have_atomics_support = false;
2876 DRM_INFO("PCIE atomic ops is not supported\n");
2877 } else {
2878 adev->have_atomics_support = true;
2879 }
2880
5494d864
AD
2881 amdgpu_device_get_pcie_info(adev);
2882
b239c017
JX
2883 if (amdgpu_mcbp)
2884 DRM_INFO("MCBP is enabled\n");
2885
5f84cc63
JX
2886 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
2887 adev->enable_mes = true;
2888
f54eeab4 2889 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
a190d1c7
XY
2890 r = amdgpu_discovery_init(adev);
2891 if (r) {
2892 dev_err(adev->dev, "amdgpu_discovery_init failed\n");
2893 return r;
2894 }
2895 }
2896
d38ceaf9 2897 /* early init functions */
06ec9070 2898 r = amdgpu_device_ip_early_init(adev);
d38ceaf9
AD
2899 if (r)
2900 return r;
2901
df99ac0f
JZ
2902 r = amdgpu_device_get_job_timeout_settings(adev);
2903 if (r) {
2904 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
2905 return r;
2906 }
2907
6585661d
OZ
2908 /* doorbell bar mapping and doorbell index init*/
2909 amdgpu_device_doorbell_init(adev);
2910
d38ceaf9
AD
2911 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2912 /* this will fail for cards that aren't VGA class devices, just
2913 * ignore it */
06ec9070 2914 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
d38ceaf9 2915
e9bef455 2916 if (amdgpu_device_is_px(ddev))
d38ceaf9 2917 runtime = true;
84c8b22e
LW
2918 if (!pci_is_thunderbolt_attached(adev->pdev))
2919 vga_switcheroo_register_client(adev->pdev,
2920 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
2921 if (runtime)
2922 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2923
9475a943
SL
2924 if (amdgpu_emu_mode == 1) {
2925 /* post the asic on emulation mode */
2926 emu_soc_asic_init(adev);
bfca0289 2927 goto fence_driver_init;
9475a943 2928 }
bfca0289 2929
4e99a44e
ML
2930 /* detect if we are with an SRIOV vbios */
2931 amdgpu_device_detect_sriov_bios(adev);
048765ad 2932
95e8e59e
AD
2933 /* check if we need to reset the asic
2934 * E.g., driver was not cleanly unloaded previously, etc.
2935 */
f14899fd 2936 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
95e8e59e
AD
2937 r = amdgpu_asic_reset(adev);
2938 if (r) {
2939 dev_err(adev->dev, "asic reset on init failed\n");
2940 goto failed;
2941 }
2942 }
2943
d38ceaf9 2944 /* Post card if necessary */
39c640c0 2945 if (amdgpu_device_need_post(adev)) {
d38ceaf9 2946 if (!adev->bios) {
bec86378 2947 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
2948 r = -EINVAL;
2949 goto failed;
d38ceaf9 2950 }
bec86378 2951 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
2952 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2953 if (r) {
2954 dev_err(adev->dev, "gpu post error!\n");
2955 goto failed;
2956 }
d38ceaf9
AD
2957 }
2958
88b64e95
AD
2959 if (adev->is_atom_fw) {
2960 /* Initialize clocks */
2961 r = amdgpu_atomfirmware_get_clock_info(adev);
2962 if (r) {
2963 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 2964 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
2965 goto failed;
2966 }
2967 } else {
a5bde2f9
AD
2968 /* Initialize clocks */
2969 r = amdgpu_atombios_get_clock_info(adev);
2970 if (r) {
2971 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 2972 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 2973 goto failed;
a5bde2f9
AD
2974 }
2975 /* init i2c buses */
4562236b
HW
2976 if (!amdgpu_device_has_dc_support(adev))
2977 amdgpu_atombios_i2c_init(adev);
2c1a2784 2978 }
d38ceaf9 2979
bfca0289 2980fence_driver_init:
d38ceaf9
AD
2981 /* Fence driver */
2982 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
2983 if (r) {
2984 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 2985 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 2986 goto failed;
2c1a2784 2987 }
d38ceaf9
AD
2988
2989 /* init the mode config */
2990 drm_mode_config_init(adev->ddev);
2991
06ec9070 2992 r = amdgpu_device_ip_init(adev);
d38ceaf9 2993 if (r) {
8840a387 2994 /* failed in exclusive mode due to timeout */
2995 if (amdgpu_sriov_vf(adev) &&
2996 !amdgpu_sriov_runtime(adev) &&
2997 amdgpu_virt_mmio_blocked(adev) &&
2998 !amdgpu_virt_wait_reset(adev)) {
2999 dev_err(adev->dev, "VF exclusive mode timeout\n");
1daee8b4
PD
3000 /* Don't send request since VF is inactive. */
3001 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3002 adev->virt.ops = NULL;
8840a387 3003 r = -EAGAIN;
3004 goto failed;
3005 }
06ec9070 3006 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
e23b74aa 3007 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
72d3f592
ED
3008 if (amdgpu_virt_request_full_gpu(adev, false))
3009 amdgpu_virt_release_full_gpu(adev, false);
83ba126a 3010 goto failed;
d38ceaf9
AD
3011 }
3012
3013 adev->accel_working = true;
3014
e59c0205
AX
3015 amdgpu_vm_check_compute_bug(adev);
3016
95844d20
MO
3017 /* Initialize the buffer migration limit. */
3018 if (amdgpu_moverate >= 0)
3019 max_MBps = amdgpu_moverate;
3020 else
3021 max_MBps = 8; /* Allow 8 MB/s. */
3022 /* Get a log2 for easy divisions. */
3023 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3024
9bc92b9c
ML
3025 amdgpu_fbdev_init(adev);
3026
e9bc1bf7
YT
3027 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
3028 amdgpu_pm_virt_sysfs_init(adev);
3029
d2f52ac8
RZ
3030 r = amdgpu_pm_sysfs_init(adev);
3031 if (r)
3032 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3033
5bb23532
OM
3034 r = amdgpu_ucode_sysfs_init(adev);
3035 if (r)
3036 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3037
75758255 3038 r = amdgpu_debugfs_gem_init(adev);
3f14e623 3039 if (r)
d38ceaf9 3040 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
3041
3042 r = amdgpu_debugfs_regs_init(adev);
3f14e623 3043 if (r)
d38ceaf9 3044 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 3045
50ab2533 3046 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 3047 if (r)
50ab2533 3048 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 3049
763efb6c 3050 r = amdgpu_debugfs_init(adev);
db95e218 3051 if (r)
763efb6c 3052 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
db95e218 3053
d38ceaf9
AD
3054 if ((amdgpu_testing & 1)) {
3055 if (adev->accel_working)
3056 amdgpu_test_moves(adev);
3057 else
3058 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3059 }
d38ceaf9
AD
3060 if (amdgpu_benchmarking) {
3061 if (adev->accel_working)
3062 amdgpu_benchmark(adev, amdgpu_benchmarking);
3063 else
3064 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3065 }
3066
b0adca4d
EQ
3067 /*
3068 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3069 * Otherwise the mgpu fan boost feature will be skipped due to the
3070 * gpu instance is counted less.
3071 */
3072 amdgpu_register_gpu_instance(adev);
3073
d38ceaf9
AD
3074 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3075 * explicit gating rather than handling it automatically.
3076 */
06ec9070 3077 r = amdgpu_device_ip_late_init(adev);
2c1a2784 3078 if (r) {
06ec9070 3079 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
e23b74aa 3080 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 3081 goto failed;
2c1a2784 3082 }
d38ceaf9 3083
108c6a63 3084 /* must succeed. */
511fdbc3 3085 amdgpu_ras_resume(adev);
108c6a63 3086
beff74bc
AD
3087 queue_delayed_work(system_wq, &adev->delayed_init_work,
3088 msecs_to_jiffies(AMDGPU_RESUME_MS));
3089
dcea6e65
KR
3090 r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
3091 if (r) {
3092 dev_err(adev->dev, "Could not create pcie_replay_count");
3093 return r;
3094 }
108c6a63 3095
d155bef0
AB
3096 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3097 r = amdgpu_pmu_init(adev);
9c7c85f7
JK
3098 if (r)
3099 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3100
d38ceaf9 3101 return 0;
83ba126a
AD
3102
3103failed:
89041940 3104 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
3105 if (runtime)
3106 vga_switcheroo_fini_domain_pm_ops(adev->dev);
8840a387 3107
83ba126a 3108 return r;
d38ceaf9
AD
3109}
3110
d38ceaf9
AD
3111/**
3112 * amdgpu_device_fini - tear down the driver
3113 *
3114 * @adev: amdgpu_device pointer
3115 *
3116 * Tear down the driver info (all asics).
3117 * Called at driver shutdown.
3118 */
3119void amdgpu_device_fini(struct amdgpu_device *adev)
3120{
3121 int r;
3122
3123 DRM_INFO("amdgpu: finishing device.\n");
9f875167 3124 flush_delayed_work(&adev->delayed_init_work);
d0d13fe8 3125 adev->shutdown = true;
9f875167 3126
e5b03032
ML
3127 /* disable all interrupts */
3128 amdgpu_irq_disable_all(adev);
ff97cba8
ML
3129 if (adev->mode_info.mode_config_initialized){
3130 if (!amdgpu_device_has_dc_support(adev))
c2d88e06 3131 drm_helper_force_disable_all(adev->ddev);
ff97cba8
ML
3132 else
3133 drm_atomic_helper_shutdown(adev->ddev);
3134 }
d38ceaf9 3135 amdgpu_fence_driver_fini(adev);
58e955d9 3136 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 3137 amdgpu_fbdev_fini(adev);
06ec9070 3138 r = amdgpu_device_ip_fini(adev);
ab4fe3e1
HR
3139 if (adev->firmware.gpu_info_fw) {
3140 release_firmware(adev->firmware.gpu_info_fw);
3141 adev->firmware.gpu_info_fw = NULL;
3142 }
d38ceaf9
AD
3143 adev->accel_working = false;
3144 /* free i2c buses */
4562236b
HW
3145 if (!amdgpu_device_has_dc_support(adev))
3146 amdgpu_i2c_fini(adev);
bfca0289
SL
3147
3148 if (amdgpu_emu_mode != 1)
3149 amdgpu_atombios_fini(adev);
3150
d38ceaf9
AD
3151 kfree(adev->bios);
3152 adev->bios = NULL;
84c8b22e
LW
3153 if (!pci_is_thunderbolt_attached(adev->pdev))
3154 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
3155 if (adev->flags & AMD_IS_PX)
3156 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
3157 vga_client_register(adev->pdev, NULL, NULL, NULL);
3158 if (adev->rio_mem)
3159 pci_iounmap(adev->pdev, adev->rio_mem);
3160 adev->rio_mem = NULL;
3161 iounmap(adev->rmmio);
3162 adev->rmmio = NULL;
06ec9070 3163 amdgpu_device_doorbell_fini(adev);
e9bc1bf7
YT
3164 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
3165 amdgpu_pm_virt_sysfs_fini(adev);
3166
d38ceaf9 3167 amdgpu_debugfs_regs_cleanup(adev);
dcea6e65 3168 device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
5bb23532 3169 amdgpu_ucode_sysfs_fini(adev);
d155bef0
AB
3170 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3171 amdgpu_pmu_fini(adev);
6698a3d0 3172 amdgpu_debugfs_preempt_cleanup(adev);
f54eeab4 3173 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
a190d1c7 3174 amdgpu_discovery_fini(adev);
d38ceaf9
AD
3175}
3176
3177
3178/*
3179 * Suspend & resume.
3180 */
3181/**
810ddc3a 3182 * amdgpu_device_suspend - initiate device suspend
d38ceaf9 3183 *
87e3f136
DP
3184 * @dev: drm dev pointer
3185 * @suspend: suspend state
3186 * @fbcon : notify the fbdev of suspend
d38ceaf9
AD
3187 *
3188 * Puts the hw in the suspend state (all asics).
3189 * Returns 0 for success or an error on failure.
3190 * Called at driver suspend.
3191 */
810ddc3a 3192int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
3193{
3194 struct amdgpu_device *adev;
3195 struct drm_crtc *crtc;
3196 struct drm_connector *connector;
f8d2d39e 3197 struct drm_connector_list_iter iter;
5ceb54c6 3198 int r;
d38ceaf9
AD
3199
3200 if (dev == NULL || dev->dev_private == NULL) {
3201 return -ENODEV;
3202 }
3203
3204 adev = dev->dev_private;
3205
3206 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3207 return 0;
3208
44779b43 3209 adev->in_suspend = true;
d38ceaf9
AD
3210 drm_kms_helper_poll_disable(dev);
3211
5f818173
S
3212 if (fbcon)
3213 amdgpu_fbdev_set_suspend(adev, 1);
3214
beff74bc 3215 cancel_delayed_work_sync(&adev->delayed_init_work);
a5459475 3216
4562236b
HW
3217 if (!amdgpu_device_has_dc_support(adev)) {
3218 /* turn off display hw */
3219 drm_modeset_lock_all(dev);
f8d2d39e
LP
3220 drm_connector_list_iter_begin(dev, &iter);
3221 drm_for_each_connector_iter(connector, &iter)
3222 drm_helper_connector_dpms(connector,
3223 DRM_MODE_DPMS_OFF);
3224 drm_connector_list_iter_end(&iter);
4562236b 3225 drm_modeset_unlock_all(dev);
fe1053b7
AD
3226 /* unpin the front buffers and cursors */
3227 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3228 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3229 struct drm_framebuffer *fb = crtc->primary->fb;
3230 struct amdgpu_bo *robj;
3231
91334223 3232 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
fe1053b7
AD
3233 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3234 r = amdgpu_bo_reserve(aobj, true);
3235 if (r == 0) {
3236 amdgpu_bo_unpin(aobj);
3237 amdgpu_bo_unreserve(aobj);
3238 }
756e6880 3239 }
756e6880 3240
fe1053b7
AD
3241 if (fb == NULL || fb->obj[0] == NULL) {
3242 continue;
3243 }
3244 robj = gem_to_amdgpu_bo(fb->obj[0]);
3245 /* don't unpin kernel fb objects */
3246 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3247 r = amdgpu_bo_reserve(robj, true);
3248 if (r == 0) {
3249 amdgpu_bo_unpin(robj);
3250 amdgpu_bo_unreserve(robj);
3251 }
d38ceaf9
AD
3252 }
3253 }
3254 }
fe1053b7
AD
3255
3256 amdgpu_amdkfd_suspend(adev);
3257
5e6932fe 3258 amdgpu_ras_suspend(adev);
3259
fe1053b7
AD
3260 r = amdgpu_device_ip_suspend_phase1(adev);
3261
d38ceaf9
AD
3262 /* evict vram memory */
3263 amdgpu_bo_evict_vram(adev);
3264
5ceb54c6 3265 amdgpu_fence_driver_suspend(adev);
d38ceaf9 3266
fe1053b7 3267 r = amdgpu_device_ip_suspend_phase2(adev);
d38ceaf9 3268
a0a71e49
AD
3269 /* evict remaining vram memory
3270 * This second call to evict vram is to evict the gart page table
3271 * using the CPU.
3272 */
d38ceaf9
AD
3273 amdgpu_bo_evict_vram(adev);
3274
d38ceaf9 3275 if (suspend) {
803cc26d 3276 pci_save_state(dev->pdev);
d38ceaf9
AD
3277 /* Shut down the device */
3278 pci_disable_device(dev->pdev);
3279 pci_set_power_state(dev->pdev, PCI_D3hot);
3280 }
3281
d38ceaf9
AD
3282 return 0;
3283}
3284
3285/**
810ddc3a 3286 * amdgpu_device_resume - initiate device resume
d38ceaf9 3287 *
87e3f136
DP
3288 * @dev: drm dev pointer
3289 * @resume: resume state
3290 * @fbcon : notify the fbdev of resume
d38ceaf9
AD
3291 *
3292 * Bring the hw back to operating state (all asics).
3293 * Returns 0 for success or an error on failure.
3294 * Called at driver resume.
3295 */
810ddc3a 3296int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
3297{
3298 struct drm_connector *connector;
f8d2d39e 3299 struct drm_connector_list_iter iter;
d38ceaf9 3300 struct amdgpu_device *adev = dev->dev_private;
756e6880 3301 struct drm_crtc *crtc;
03161a6e 3302 int r = 0;
d38ceaf9
AD
3303
3304 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3305 return 0;
3306
d38ceaf9
AD
3307 if (resume) {
3308 pci_set_power_state(dev->pdev, PCI_D0);
3309 pci_restore_state(dev->pdev);
74b0b157 3310 r = pci_enable_device(dev->pdev);
03161a6e 3311 if (r)
4d3b9ae5 3312 return r;
d38ceaf9
AD
3313 }
3314
3315 /* post card */
39c640c0 3316 if (amdgpu_device_need_post(adev)) {
74b0b157 3317 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3318 if (r)
3319 DRM_ERROR("amdgpu asic init failed\n");
3320 }
d38ceaf9 3321
06ec9070 3322 r = amdgpu_device_ip_resume(adev);
e6707218 3323 if (r) {
06ec9070 3324 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
4d3b9ae5 3325 return r;
e6707218 3326 }
5ceb54c6
AD
3327 amdgpu_fence_driver_resume(adev);
3328
d38ceaf9 3329
06ec9070 3330 r = amdgpu_device_ip_late_init(adev);
03161a6e 3331 if (r)
4d3b9ae5 3332 return r;
d38ceaf9 3333
beff74bc
AD
3334 queue_delayed_work(system_wq, &adev->delayed_init_work,
3335 msecs_to_jiffies(AMDGPU_RESUME_MS));
3336
fe1053b7
AD
3337 if (!amdgpu_device_has_dc_support(adev)) {
3338 /* pin cursors */
3339 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3340 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3341
91334223 3342 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
fe1053b7
AD
3343 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3344 r = amdgpu_bo_reserve(aobj, true);
3345 if (r == 0) {
3346 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3347 if (r != 0)
3348 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
3349 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3350 amdgpu_bo_unreserve(aobj);
3351 }
756e6880
AD
3352 }
3353 }
3354 }
ba997709
YZ
3355 r = amdgpu_amdkfd_resume(adev);
3356 if (r)
3357 return r;
756e6880 3358
96a5d8d4 3359 /* Make sure IB tests flushed */
beff74bc 3360 flush_delayed_work(&adev->delayed_init_work);
96a5d8d4 3361
d38ceaf9
AD
3362 /* blat the mode back in */
3363 if (fbcon) {
4562236b
HW
3364 if (!amdgpu_device_has_dc_support(adev)) {
3365 /* pre DCE11 */
3366 drm_helper_resume_force_mode(dev);
3367
3368 /* turn on display hw */
3369 drm_modeset_lock_all(dev);
f8d2d39e
LP
3370
3371 drm_connector_list_iter_begin(dev, &iter);
3372 drm_for_each_connector_iter(connector, &iter)
3373 drm_helper_connector_dpms(connector,
3374 DRM_MODE_DPMS_ON);
3375 drm_connector_list_iter_end(&iter);
3376
4562236b 3377 drm_modeset_unlock_all(dev);
d38ceaf9 3378 }
4d3b9ae5 3379 amdgpu_fbdev_set_suspend(adev, 0);
d38ceaf9
AD
3380 }
3381
3382 drm_kms_helper_poll_enable(dev);
23a1a9e5 3383
5e6932fe 3384 amdgpu_ras_resume(adev);
3385
23a1a9e5
L
3386 /*
3387 * Most of the connector probing functions try to acquire runtime pm
3388 * refs to ensure that the GPU is powered on when connector polling is
3389 * performed. Since we're calling this from a runtime PM callback,
3390 * trying to acquire rpm refs will cause us to deadlock.
3391 *
3392 * Since we're guaranteed to be holding the rpm lock, it's safe to
3393 * temporarily disable the rpm helpers so this doesn't deadlock us.
3394 */
3395#ifdef CONFIG_PM
3396 dev->dev->power.disable_depth++;
3397#endif
4562236b
HW
3398 if (!amdgpu_device_has_dc_support(adev))
3399 drm_helper_hpd_irq_event(dev);
3400 else
3401 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
3402#ifdef CONFIG_PM
3403 dev->dev->power.disable_depth--;
3404#endif
44779b43
RZ
3405 adev->in_suspend = false;
3406
4d3b9ae5 3407 return 0;
d38ceaf9
AD
3408}
3409
e3ecdffa
AD
3410/**
3411 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3412 *
3413 * @adev: amdgpu_device pointer
3414 *
3415 * The list of all the hardware IPs that make up the asic is walked and
3416 * the check_soft_reset callbacks are run. check_soft_reset determines
3417 * if the asic is still hung or not.
3418 * Returns true if any of the IPs are still in a hung state, false if not.
3419 */
06ec9070 3420static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
63fbf42f
CZ
3421{
3422 int i;
3423 bool asic_hang = false;
3424
f993d628
ML
3425 if (amdgpu_sriov_vf(adev))
3426 return true;
3427
8bc04c29
AD
3428 if (amdgpu_asic_need_full_reset(adev))
3429 return true;
3430
63fbf42f 3431 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 3432 if (!adev->ip_blocks[i].status.valid)
63fbf42f 3433 continue;
a1255107
AD
3434 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3435 adev->ip_blocks[i].status.hang =
3436 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3437 if (adev->ip_blocks[i].status.hang) {
3438 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
3439 asic_hang = true;
3440 }
3441 }
3442 return asic_hang;
3443}
3444
e3ecdffa
AD
3445/**
3446 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3447 *
3448 * @adev: amdgpu_device pointer
3449 *
3450 * The list of all the hardware IPs that make up the asic is walked and the
3451 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
3452 * handles any IP specific hardware or software state changes that are
3453 * necessary for a soft reset to succeed.
3454 * Returns 0 on success, negative error code on failure.
3455 */
06ec9070 3456static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
3457{
3458 int i, r = 0;
3459
3460 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 3461 if (!adev->ip_blocks[i].status.valid)
d31a501e 3462 continue;
a1255107
AD
3463 if (adev->ip_blocks[i].status.hang &&
3464 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3465 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
3466 if (r)
3467 return r;
3468 }
3469 }
3470
3471 return 0;
3472}
3473
e3ecdffa
AD
3474/**
3475 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3476 *
3477 * @adev: amdgpu_device pointer
3478 *
3479 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
3480 * reset is necessary to recover.
3481 * Returns true if a full asic reset is required, false if not.
3482 */
06ec9070 3483static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
35d782fe 3484{
da146d3b
AD
3485 int i;
3486
8bc04c29
AD
3487 if (amdgpu_asic_need_full_reset(adev))
3488 return true;
3489
da146d3b 3490 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 3491 if (!adev->ip_blocks[i].status.valid)
da146d3b 3492 continue;
a1255107
AD
3493 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3494 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3495 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
3496 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3497 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 3498 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
3499 DRM_INFO("Some block need full reset!\n");
3500 return true;
3501 }
3502 }
35d782fe
CZ
3503 }
3504 return false;
3505}
3506
e3ecdffa
AD
3507/**
3508 * amdgpu_device_ip_soft_reset - do a soft reset
3509 *
3510 * @adev: amdgpu_device pointer
3511 *
3512 * The list of all the hardware IPs that make up the asic is walked and the
3513 * soft_reset callbacks are run if the block is hung. soft_reset handles any
3514 * IP specific hardware or software state changes that are necessary to soft
3515 * reset the IP.
3516 * Returns 0 on success, negative error code on failure.
3517 */
06ec9070 3518static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
3519{
3520 int i, r = 0;
3521
3522 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 3523 if (!adev->ip_blocks[i].status.valid)
35d782fe 3524 continue;
a1255107
AD
3525 if (adev->ip_blocks[i].status.hang &&
3526 adev->ip_blocks[i].version->funcs->soft_reset) {
3527 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
3528 if (r)
3529 return r;
3530 }
3531 }
3532
3533 return 0;
3534}
3535
e3ecdffa
AD
3536/**
3537 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3538 *
3539 * @adev: amdgpu_device pointer
3540 *
3541 * The list of all the hardware IPs that make up the asic is walked and the
3542 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
3543 * handles any IP specific hardware or software state changes that are
3544 * necessary after the IP has been soft reset.
3545 * Returns 0 on success, negative error code on failure.
3546 */
06ec9070 3547static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
3548{
3549 int i, r = 0;
3550
3551 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 3552 if (!adev->ip_blocks[i].status.valid)
35d782fe 3553 continue;
a1255107
AD
3554 if (adev->ip_blocks[i].status.hang &&
3555 adev->ip_blocks[i].version->funcs->post_soft_reset)
3556 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
3557 if (r)
3558 return r;
3559 }
3560
3561 return 0;
3562}
3563
e3ecdffa 3564/**
c33adbc7 3565 * amdgpu_device_recover_vram - Recover some VRAM contents
e3ecdffa
AD
3566 *
3567 * @adev: amdgpu_device pointer
3568 *
3569 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
3570 * restore things like GPUVM page tables after a GPU reset where
3571 * the contents of VRAM might be lost.
403009bf
CK
3572 *
3573 * Returns:
3574 * 0 on success, negative error code on failure.
e3ecdffa 3575 */
c33adbc7 3576static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
c41d1cf6 3577{
c41d1cf6 3578 struct dma_fence *fence = NULL, *next = NULL;
403009bf
CK
3579 struct amdgpu_bo *shadow;
3580 long r = 1, tmo;
c41d1cf6
ML
3581
3582 if (amdgpu_sriov_runtime(adev))
b045d3af 3583 tmo = msecs_to_jiffies(8000);
c41d1cf6
ML
3584 else
3585 tmo = msecs_to_jiffies(100);
3586
3587 DRM_INFO("recover vram bo from shadow start\n");
3588 mutex_lock(&adev->shadow_list_lock);
403009bf
CK
3589 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
3590
3591 /* No need to recover an evicted BO */
3592 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
b575f10d 3593 shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
403009bf
CK
3594 shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
3595 continue;
3596
3597 r = amdgpu_bo_restore_shadow(shadow, &next);
3598 if (r)
3599 break;
3600
c41d1cf6 3601 if (fence) {
1712fb1a 3602 tmo = dma_fence_wait_timeout(fence, false, tmo);
403009bf
CK
3603 dma_fence_put(fence);
3604 fence = next;
1712fb1a 3605 if (tmo == 0) {
3606 r = -ETIMEDOUT;
c41d1cf6 3607 break;
1712fb1a 3608 } else if (tmo < 0) {
3609 r = tmo;
3610 break;
3611 }
403009bf
CK
3612 } else {
3613 fence = next;
c41d1cf6 3614 }
c41d1cf6
ML
3615 }
3616 mutex_unlock(&adev->shadow_list_lock);
3617
403009bf
CK
3618 if (fence)
3619 tmo = dma_fence_wait_timeout(fence, false, tmo);
c41d1cf6
ML
3620 dma_fence_put(fence);
3621
1712fb1a 3622 if (r < 0 || tmo <= 0) {
3623 DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
403009bf
CK
3624 return -EIO;
3625 }
c41d1cf6 3626
403009bf
CK
3627 DRM_INFO("recover vram bo from shadow done\n");
3628 return 0;
c41d1cf6
ML
3629}
3630
a90ad3c2 3631
e3ecdffa 3632/**
06ec9070 3633 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5740682e
ML
3634 *
3635 * @adev: amdgpu device pointer
87e3f136 3636 * @from_hypervisor: request from hypervisor
5740682e
ML
3637 *
3638 * do VF FLR and reinitialize Asic
3f48c681 3639 * return 0 means succeeded otherwise failed
e3ecdffa
AD
3640 */
3641static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3642 bool from_hypervisor)
5740682e
ML
3643{
3644 int r;
3645
3646 if (from_hypervisor)
3647 r = amdgpu_virt_request_full_gpu(adev, true);
3648 else
3649 r = amdgpu_virt_reset_gpu(adev);
3650 if (r)
3651 return r;
a90ad3c2 3652
f81e8d53
WL
3653 amdgpu_amdkfd_pre_reset(adev);
3654
a90ad3c2 3655 /* Resume IP prior to SMC */
06ec9070 3656 r = amdgpu_device_ip_reinit_early_sriov(adev);
5740682e
ML
3657 if (r)
3658 goto error;
a90ad3c2
ML
3659
3660 /* we need recover gart prior to run SMC/CP/SDMA resume */
c1c7ce8f 3661 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
a90ad3c2 3662
7a3e0bb2
RZ
3663 r = amdgpu_device_fw_loading(adev);
3664 if (r)
3665 return r;
3666
a90ad3c2 3667 /* now we are okay to resume SMC/CP/SDMA */
06ec9070 3668 r = amdgpu_device_ip_reinit_late_sriov(adev);
5740682e
ML
3669 if (r)
3670 goto error;
a90ad3c2
ML
3671
3672 amdgpu_irq_gpu_reset_resume_helper(adev);
5740682e 3673 r = amdgpu_ib_ring_tests(adev);
f81e8d53 3674 amdgpu_amdkfd_post_reset(adev);
a90ad3c2 3675
abc34253 3676error:
d3c117e5 3677 amdgpu_virt_init_data_exchange(adev);
abc34253 3678 amdgpu_virt_release_full_gpu(adev, true);
c41d1cf6 3679 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
e3526257 3680 amdgpu_inc_vram_lost(adev);
c33adbc7 3681 r = amdgpu_device_recover_vram(adev);
a90ad3c2
ML
3682 }
3683
3684 return r;
3685}
3686
12938fad
CK
3687/**
3688 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
3689 *
3690 * @adev: amdgpu device pointer
3691 *
3692 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
3693 * a hung GPU.
3694 */
3695bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
3696{
3697 if (!amdgpu_device_ip_check_soft_reset(adev)) {
3698 DRM_INFO("Timeout, but no hardware hang detected.\n");
3699 return false;
3700 }
3701
3ba7b418
AG
3702 if (amdgpu_gpu_recovery == 0)
3703 goto disabled;
3704
3705 if (amdgpu_sriov_vf(adev))
3706 return true;
3707
3708 if (amdgpu_gpu_recovery == -1) {
3709 switch (adev->asic_type) {
fc42d47c
AG
3710 case CHIP_BONAIRE:
3711 case CHIP_HAWAII:
3ba7b418
AG
3712 case CHIP_TOPAZ:
3713 case CHIP_TONGA:
3714 case CHIP_FIJI:
3715 case CHIP_POLARIS10:
3716 case CHIP_POLARIS11:
3717 case CHIP_POLARIS12:
3718 case CHIP_VEGAM:
3719 case CHIP_VEGA20:
3720 case CHIP_VEGA10:
3721 case CHIP_VEGA12:
c43b849f 3722 case CHIP_RAVEN:
3ba7b418
AG
3723 break;
3724 default:
3725 goto disabled;
3726 }
12938fad
CK
3727 }
3728
3729 return true;
3ba7b418
AG
3730
3731disabled:
3732 DRM_INFO("GPU recovery disabled.\n");
3733 return false;
12938fad
CK
3734}
3735
5c6dd71e 3736
26bc5340
AG
3737static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
3738 struct amdgpu_job *job,
3739 bool *need_full_reset_arg)
3740{
3741 int i, r = 0;
3742 bool need_full_reset = *need_full_reset_arg;
71182665 3743
71182665 3744 /* block all schedulers and reset given job's ring */
0875dc9e
CZ
3745 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3746 struct amdgpu_ring *ring = adev->rings[i];
3747
51687759 3748 if (!ring || !ring->sched.thread)
0875dc9e 3749 continue;
5740682e 3750
2f9d4084
ML
3751 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3752 amdgpu_fence_driver_force_completion(ring);
0875dc9e 3753 }
d38ceaf9 3754
222b5f04
AG
3755 if(job)
3756 drm_sched_increase_karma(&job->base);
3757
1d721ed6 3758 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
26bc5340
AG
3759 if (!amdgpu_sriov_vf(adev)) {
3760
3761 if (!need_full_reset)
3762 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
3763
3764 if (!need_full_reset) {
3765 amdgpu_device_ip_pre_soft_reset(adev);
3766 r = amdgpu_device_ip_soft_reset(adev);
3767 amdgpu_device_ip_post_soft_reset(adev);
3768 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3769 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3770 need_full_reset = true;
3771 }
3772 }
3773
3774 if (need_full_reset)
3775 r = amdgpu_device_ip_suspend(adev);
3776
3777 *need_full_reset_arg = need_full_reset;
3778 }
3779
3780 return r;
3781}
3782
3783static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
3784 struct list_head *device_list_handle,
3785 bool *need_full_reset_arg)
3786{
3787 struct amdgpu_device *tmp_adev = NULL;
3788 bool need_full_reset = *need_full_reset_arg, vram_lost = false;
3789 int r = 0;
3790
3791 /*
3792 * ASIC reset has to be done on all HGMI hive nodes ASAP
3793 * to allow proper links negotiation in FW (within 1 sec)
3794 */
3795 if (need_full_reset) {
3796 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
d4535e2c
AG
3797 /* For XGMI run all resets in parallel to speed up the process */
3798 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3799 if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
3800 r = -EALREADY;
3801 } else
3802 r = amdgpu_asic_reset(tmp_adev);
3803
3804 if (r) {
fed184e9 3805 DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
26bc5340 3806 r, tmp_adev->ddev->unique);
d4535e2c
AG
3807 break;
3808 }
3809 }
3810
3811 /* For XGMI wait for all PSP resets to complete before proceed */
3812 if (!r) {
3813 list_for_each_entry(tmp_adev, device_list_handle,
3814 gmc.xgmi.head) {
3815 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3816 flush_work(&tmp_adev->xgmi_reset_work);
3817 r = tmp_adev->asic_reset_res;
3818 if (r)
3819 break;
3820 }
3821 }
26bc5340
AG
3822 }
3823 }
3824
3825
3826 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3827 if (need_full_reset) {
3828 /* post card */
3829 if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
3830 DRM_WARN("asic atom init failed!");
3831
3832 if (!r) {
3833 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
3834 r = amdgpu_device_ip_resume_phase1(tmp_adev);
3835 if (r)
3836 goto out;
3837
3838 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
3839 if (vram_lost) {
77e7f829 3840 DRM_INFO("VRAM is lost due to GPU reset!\n");
e3526257 3841 amdgpu_inc_vram_lost(tmp_adev);
26bc5340
AG
3842 }
3843
3844 r = amdgpu_gtt_mgr_recover(
3845 &tmp_adev->mman.bdev.man[TTM_PL_TT]);
3846 if (r)
3847 goto out;
3848
3849 r = amdgpu_device_fw_loading(tmp_adev);
3850 if (r)
3851 return r;
3852
3853 r = amdgpu_device_ip_resume_phase2(tmp_adev);
3854 if (r)
3855 goto out;
3856
3857 if (vram_lost)
3858 amdgpu_device_fill_reset_magic(tmp_adev);
3859
fdafb359
EQ
3860 /*
3861 * Add this ASIC as tracked as reset was already
3862 * complete successfully.
3863 */
3864 amdgpu_register_gpu_instance(tmp_adev);
3865
7c04ca50 3866 r = amdgpu_device_ip_late_init(tmp_adev);
3867 if (r)
3868 goto out;
3869
e79a04d5 3870 /* must succeed. */
511fdbc3 3871 amdgpu_ras_resume(tmp_adev);
e79a04d5 3872
26bc5340
AG
3873 /* Update PSP FW topology after reset */
3874 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
3875 r = amdgpu_xgmi_update_topology(hive, tmp_adev);
3876 }
3877 }
3878
3879
3880out:
3881 if (!r) {
3882 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
3883 r = amdgpu_ib_ring_tests(tmp_adev);
3884 if (r) {
3885 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
3886 r = amdgpu_device_ip_suspend(tmp_adev);
3887 need_full_reset = true;
3888 r = -EAGAIN;
3889 goto end;
3890 }
3891 }
3892
3893 if (!r)
3894 r = amdgpu_device_recover_vram(tmp_adev);
3895 else
3896 tmp_adev->asic_reset_res = r;
3897 }
3898
3899end:
3900 *need_full_reset_arg = need_full_reset;
3901 return r;
3902}
3903
1d721ed6 3904static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
26bc5340 3905{
1d721ed6
AG
3906 if (trylock) {
3907 if (!mutex_trylock(&adev->lock_reset))
3908 return false;
3909 } else
3910 mutex_lock(&adev->lock_reset);
5740682e 3911
26bc5340
AG
3912 atomic_inc(&adev->gpu_reset_counter);
3913 adev->in_gpu_reset = 1;
a3a09142
AD
3914 switch (amdgpu_asic_reset_method(adev)) {
3915 case AMD_RESET_METHOD_MODE1:
3916 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
3917 break;
3918 case AMD_RESET_METHOD_MODE2:
3919 adev->mp1_state = PP_MP1_STATE_RESET;
3920 break;
3921 default:
3922 adev->mp1_state = PP_MP1_STATE_NONE;
3923 break;
3924 }
1d721ed6
AG
3925
3926 return true;
26bc5340 3927}
d38ceaf9 3928
26bc5340
AG
3929static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
3930{
89041940 3931 amdgpu_vf_error_trans_all(adev);
a3a09142 3932 adev->mp1_state = PP_MP1_STATE_NONE;
13a752e3
ML
3933 adev->in_gpu_reset = 0;
3934 mutex_unlock(&adev->lock_reset);
26bc5340
AG
3935}
3936
26bc5340
AG
3937/**
3938 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
3939 *
3940 * @adev: amdgpu device pointer
3941 * @job: which job trigger hang
3942 *
3943 * Attempt to reset the GPU if it has hung (all asics).
3944 * Attempt to do soft-reset or full-reset and reinitialize Asic
3945 * Returns 0 for success or an error on failure.
3946 */
3947
3948int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3949 struct amdgpu_job *job)
3950{
1d721ed6
AG
3951 struct list_head device_list, *device_list_handle = NULL;
3952 bool need_full_reset, job_signaled;
26bc5340 3953 struct amdgpu_hive_info *hive = NULL;
26bc5340 3954 struct amdgpu_device *tmp_adev = NULL;
1d721ed6 3955 int i, r = 0;
7c6e68c7 3956 bool in_ras_intr = amdgpu_ras_intr_triggered();
26bc5340 3957
d5ea093e
AG
3958 /*
3959 * Flush RAM to disk so that after reboot
3960 * the user can read log and see why the system rebooted.
3961 */
3962 if (in_ras_intr && amdgpu_ras_get_context(adev)->reboot) {
3963
3964 DRM_WARN("Emergency reboot.");
3965
3966 ksys_sync_helper();
3967 emergency_restart();
3968 }
3969
1d721ed6 3970 need_full_reset = job_signaled = false;
26bc5340
AG
3971 INIT_LIST_HEAD(&device_list);
3972
7c6e68c7 3973 dev_info(adev->dev, "GPU %s begin!\n", in_ras_intr ? "jobs stop":"reset");
26bc5340 3974
beff74bc 3975 cancel_delayed_work_sync(&adev->delayed_init_work);
c53e4db7 3976
1d721ed6
AG
3977 hive = amdgpu_get_xgmi_hive(adev, false);
3978
26bc5340 3979 /*
1d721ed6
AG
3980 * Here we trylock to avoid chain of resets executing from
3981 * either trigger by jobs on different adevs in XGMI hive or jobs on
3982 * different schedulers for same device while this TO handler is running.
3983 * We always reset all schedulers for device and all devices for XGMI
3984 * hive so that should take care of them too.
26bc5340 3985 */
1d721ed6
AG
3986
3987 if (hive && !mutex_trylock(&hive->reset_lock)) {
3988 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
0b2d2c2e 3989 job ? job->base.id : -1, hive->hive_id);
26bc5340 3990 return 0;
1d721ed6 3991 }
26bc5340
AG
3992
3993 /* Start with adev pre asic reset first for soft reset check.*/
1d721ed6
AG
3994 if (!amdgpu_device_lock_adev(adev, !hive)) {
3995 DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
0b2d2c2e 3996 job ? job->base.id : -1);
1d721ed6 3997 return 0;
26bc5340
AG
3998 }
3999
7c6e68c7
AG
4000 /* Block kfd: SRIOV would do it separately */
4001 if (!amdgpu_sriov_vf(adev))
4002 amdgpu_amdkfd_pre_reset(adev);
4003
26bc5340 4004 /* Build list of devices to reset */
1d721ed6 4005 if (adev->gmc.xgmi.num_physical_nodes > 1) {
26bc5340 4006 if (!hive) {
7c6e68c7
AG
4007 /*unlock kfd: SRIOV would do it separately */
4008 if (!amdgpu_sriov_vf(adev))
4009 amdgpu_amdkfd_post_reset(adev);
26bc5340
AG
4010 amdgpu_device_unlock_adev(adev);
4011 return -ENODEV;
4012 }
4013
4014 /*
4015 * In case we are in XGMI hive mode device reset is done for all the
4016 * nodes in the hive to retrain all XGMI links and hence the reset
4017 * sequence is executed in loop on all nodes.
4018 */
4019 device_list_handle = &hive->device_list;
4020 } else {
4021 list_add_tail(&adev->gmc.xgmi.head, &device_list);
4022 device_list_handle = &device_list;
4023 }
4024
1d721ed6
AG
4025 /* block all schedulers and reset given job's ring */
4026 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
7c6e68c7 4027 if (tmp_adev != adev) {
12ffa55d 4028 amdgpu_device_lock_adev(tmp_adev, false);
7c6e68c7
AG
4029 if (!amdgpu_sriov_vf(tmp_adev))
4030 amdgpu_amdkfd_pre_reset(tmp_adev);
4031 }
4032
12ffa55d
AG
4033 /*
4034 * Mark these ASICs to be reseted as untracked first
4035 * And add them back after reset completed
4036 */
4037 amdgpu_unregister_gpu_instance(tmp_adev);
4038
f1c1314b 4039 /* disable ras on ALL IPs */
7c6e68c7 4040 if (!in_ras_intr && amdgpu_device_ip_need_full_reset(tmp_adev))
f1c1314b 4041 amdgpu_ras_suspend(tmp_adev);
4042
1d721ed6
AG
4043 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4044 struct amdgpu_ring *ring = tmp_adev->rings[i];
4045
4046 if (!ring || !ring->sched.thread)
4047 continue;
4048
0b2d2c2e 4049 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
7c6e68c7
AG
4050
4051 if (in_ras_intr)
4052 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
1d721ed6
AG
4053 }
4054 }
4055
4056
7c6e68c7
AG
4057 if (in_ras_intr)
4058 goto skip_sched_resume;
4059
1d721ed6
AG
4060 /*
4061 * Must check guilty signal here since after this point all old
4062 * HW fences are force signaled.
4063 *
4064 * job->base holds a reference to parent fence
4065 */
4066 if (job && job->base.s_fence->parent &&
4067 dma_fence_is_signaled(job->base.s_fence->parent))
4068 job_signaled = true;
4069
1d721ed6
AG
4070 if (job_signaled) {
4071 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4072 goto skip_hw_reset;
4073 }
4074
4075
4076 /* Guilty job will be freed after this*/
0b2d2c2e 4077 r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset);
1d721ed6
AG
4078 if (r) {
4079 /*TODO Should we stop ?*/
4080 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
4081 r, adev->ddev->unique);
4082 adev->asic_reset_res = r;
4083 }
4084
26bc5340
AG
4085retry: /* Rest of adevs pre asic reset from XGMI hive. */
4086 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4087
4088 if (tmp_adev == adev)
4089 continue;
4090
26bc5340
AG
4091 r = amdgpu_device_pre_asic_reset(tmp_adev,
4092 NULL,
4093 &need_full_reset);
4094 /*TODO Should we stop ?*/
4095 if (r) {
4096 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
4097 r, tmp_adev->ddev->unique);
4098 tmp_adev->asic_reset_res = r;
4099 }
4100 }
4101
4102 /* Actual ASIC resets if needed.*/
4103 /* TODO Implement XGMI hive reset logic for SRIOV */
4104 if (amdgpu_sriov_vf(adev)) {
4105 r = amdgpu_device_reset_sriov(adev, job ? false : true);
4106 if (r)
4107 adev->asic_reset_res = r;
4108 } else {
4109 r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
4110 if (r && r == -EAGAIN)
4111 goto retry;
4112 }
4113
1d721ed6
AG
4114skip_hw_reset:
4115
26bc5340
AG
4116 /* Post ASIC reset for all devs .*/
4117 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
7c6e68c7 4118
1d721ed6
AG
4119 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4120 struct amdgpu_ring *ring = tmp_adev->rings[i];
4121
4122 if (!ring || !ring->sched.thread)
4123 continue;
4124
4125 /* No point to resubmit jobs if we didn't HW reset*/
4126 if (!tmp_adev->asic_reset_res && !job_signaled)
4127 drm_sched_resubmit_jobs(&ring->sched);
4128
4129 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4130 }
4131
4132 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4133 drm_helper_resume_force_mode(tmp_adev->ddev);
4134 }
4135
4136 tmp_adev->asic_reset_res = 0;
26bc5340
AG
4137
4138 if (r) {
4139 /* bad news, how to tell it to userspace ? */
12ffa55d 4140 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
26bc5340
AG
4141 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4142 } else {
12ffa55d 4143 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
26bc5340 4144 }
7c6e68c7 4145 }
26bc5340 4146
7c6e68c7
AG
4147skip_sched_resume:
4148 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4149 /*unlock kfd: SRIOV would do it separately */
4150 if (!in_ras_intr && !amdgpu_sriov_vf(tmp_adev))
4151 amdgpu_amdkfd_post_reset(tmp_adev);
26bc5340
AG
4152 amdgpu_device_unlock_adev(tmp_adev);
4153 }
4154
1d721ed6 4155 if (hive)
22d6575b 4156 mutex_unlock(&hive->reset_lock);
26bc5340
AG
4157
4158 if (r)
4159 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
d38ceaf9
AD
4160 return r;
4161}
4162
e3ecdffa
AD
4163/**
4164 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4165 *
4166 * @adev: amdgpu_device pointer
4167 *
4168 * Fetchs and stores in the driver the PCIE capabilities (gen speed
4169 * and lanes) of the slot the device is in. Handles APUs and
4170 * virtualized environments where PCIE config space may not be available.
4171 */
5494d864 4172static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
d0dd7f0c 4173{
5d9a6330 4174 struct pci_dev *pdev;
c5313457
HK
4175 enum pci_bus_speed speed_cap, platform_speed_cap;
4176 enum pcie_link_width platform_link_width;
d0dd7f0c 4177
cd474ba0
AD
4178 if (amdgpu_pcie_gen_cap)
4179 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 4180
cd474ba0
AD
4181 if (amdgpu_pcie_lane_cap)
4182 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 4183
cd474ba0
AD
4184 /* covers APUs as well */
4185 if (pci_is_root_bus(adev->pdev->bus)) {
4186 if (adev->pm.pcie_gen_mask == 0)
4187 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4188 if (adev->pm.pcie_mlw_mask == 0)
4189 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 4190 return;
cd474ba0 4191 }
d0dd7f0c 4192
c5313457
HK
4193 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
4194 return;
4195
dbaa922b
AD
4196 pcie_bandwidth_available(adev->pdev, NULL,
4197 &platform_speed_cap, &platform_link_width);
c5313457 4198
cd474ba0 4199 if (adev->pm.pcie_gen_mask == 0) {
5d9a6330
AD
4200 /* asic caps */
4201 pdev = adev->pdev;
4202 speed_cap = pcie_get_speed_cap(pdev);
4203 if (speed_cap == PCI_SPEED_UNKNOWN) {
4204 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
cd474ba0
AD
4205 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4206 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
cd474ba0 4207 } else {
5d9a6330
AD
4208 if (speed_cap == PCIE_SPEED_16_0GT)
4209 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4210 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4211 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4212 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4213 else if (speed_cap == PCIE_SPEED_8_0GT)
4214 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4215 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4216 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4217 else if (speed_cap == PCIE_SPEED_5_0GT)
4218 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4219 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4220 else
4221 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4222 }
4223 /* platform caps */
c5313457 4224 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5d9a6330
AD
4225 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4226 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4227 } else {
c5313457 4228 if (platform_speed_cap == PCIE_SPEED_16_0GT)
5d9a6330
AD
4229 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4230 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4231 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4232 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
c5313457 4233 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5d9a6330
AD
4234 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4235 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4236 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
c5313457 4237 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5d9a6330
AD
4238 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4239 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4240 else
4241 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4242
cd474ba0
AD
4243 }
4244 }
4245 if (adev->pm.pcie_mlw_mask == 0) {
c5313457 4246 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5d9a6330
AD
4247 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4248 } else {
c5313457 4249 switch (platform_link_width) {
5d9a6330 4250 case PCIE_LNK_X32:
cd474ba0
AD
4251 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4252 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4253 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4254 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4255 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4256 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4257 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4258 break;
5d9a6330 4259 case PCIE_LNK_X16:
cd474ba0
AD
4260 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4261 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4262 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4263 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4264 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4265 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4266 break;
5d9a6330 4267 case PCIE_LNK_X12:
cd474ba0
AD
4268 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4269 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4270 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4271 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4272 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4273 break;
5d9a6330 4274 case PCIE_LNK_X8:
cd474ba0
AD
4275 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4276 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4277 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4278 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4279 break;
5d9a6330 4280 case PCIE_LNK_X4:
cd474ba0
AD
4281 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4282 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4283 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4284 break;
5d9a6330 4285 case PCIE_LNK_X2:
cd474ba0
AD
4286 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4287 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4288 break;
5d9a6330 4289 case PCIE_LNK_X1:
cd474ba0
AD
4290 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4291 break;
4292 default:
4293 break;
4294 }
d0dd7f0c
AD
4295 }
4296 }
4297}
d38ceaf9 4298