drm/amdgpu/vcn2.5: fix the enc loop with hw fini
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
b1ddf548 28#include <linux/power_supply.h>
0875dc9e 29#include <linux/kthread.h>
fdf2f6c5 30#include <linux/module.h>
d38ceaf9
AD
31#include <linux/console.h>
32#include <linux/slab.h>
fdf2f6c5 33
4562236b 34#include <drm/drm_atomic_helper.h>
fcd70cd3 35#include <drm/drm_probe_helper.h>
d38ceaf9
AD
36#include <drm/amdgpu_drm.h>
37#include <linux/vgaarb.h>
38#include <linux/vga_switcheroo.h>
39#include <linux/efi.h>
40#include "amdgpu.h"
f4b373f4 41#include "amdgpu_trace.h"
d38ceaf9
AD
42#include "amdgpu_i2c.h"
43#include "atom.h"
44#include "amdgpu_atombios.h"
a5bde2f9 45#include "amdgpu_atomfirmware.h"
d0dd7f0c 46#include "amd_pcie.h"
33f34802
KW
47#ifdef CONFIG_DRM_AMDGPU_SI
48#include "si.h"
49#endif
a2e73f56
AD
50#ifdef CONFIG_DRM_AMDGPU_CIK
51#include "cik.h"
52#endif
aaa36a97 53#include "vi.h"
460826e6 54#include "soc15.h"
0a5b8c7b 55#include "nv.h"
d38ceaf9 56#include "bif/bif_4_1_d.h"
9accf2fd 57#include <linux/pci.h>
bec86378 58#include <linux/firmware.h>
89041940 59#include "amdgpu_vf_error.h"
d38ceaf9 60
ba997709 61#include "amdgpu_amdkfd.h"
d2f52ac8 62#include "amdgpu_pm.h"
d38ceaf9 63
5183411b 64#include "amdgpu_xgmi.h"
c030f2e4 65#include "amdgpu_ras.h"
9c7c85f7 66#include "amdgpu_pmu.h"
5183411b 67
d5ea093e
AG
68#include <linux/suspend.h>
69
e2a75f88 70MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
3f76dced 71MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
2d2e5e7e 72MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
ad5a67a7 73MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
54c4d17e 74MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
65e60f6e 75MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
b51a26a0 76MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
23c6268e 77MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
ed42cfe1 78MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
42b325e5 79MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
e2a75f88 80
2dc80b00
S
81#define AMDGPU_RESUME_MS 2000
82
050091ab 83const char *amdgpu_asic_name[] = {
da69c161
KW
84 "TAHITI",
85 "PITCAIRN",
86 "VERDE",
87 "OLAND",
88 "HAINAN",
d38ceaf9
AD
89 "BONAIRE",
90 "KAVERI",
91 "KABINI",
92 "HAWAII",
93 "MULLINS",
94 "TOPAZ",
95 "TONGA",
48299f95 96 "FIJI",
d38ceaf9 97 "CARRIZO",
139f4917 98 "STONEY",
2cc0c0b5
FC
99 "POLARIS10",
100 "POLARIS11",
c4642a47 101 "POLARIS12",
48ff108d 102 "VEGAM",
d4196f01 103 "VEGA10",
8fab806a 104 "VEGA12",
956fcddc 105 "VEGA20",
2ca8a5d2 106 "RAVEN",
d6c3b24e 107 "ARCTURUS",
1eee4228 108 "RENOIR",
852a6626 109 "NAVI10",
87dbad02 110 "NAVI14",
9802f5d7 111 "NAVI12",
d38ceaf9
AD
112 "LAST",
113};
114
dcea6e65
KR
115/**
116 * DOC: pcie_replay_count
117 *
118 * The amdgpu driver provides a sysfs API for reporting the total number
119 * of PCIe replays (NAKs)
120 * The file pcie_replay_count is used for this and returns the total
121 * number of replays as a sum of the NAKs generated and NAKs received
122 */
123
124static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
125 struct device_attribute *attr, char *buf)
126{
127 struct drm_device *ddev = dev_get_drvdata(dev);
128 struct amdgpu_device *adev = ddev->dev_private;
129 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
130
131 return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
132}
133
134static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
135 amdgpu_device_get_pcie_replay_count, NULL);
136
5494d864
AD
137static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
138
e3ecdffa
AD
139/**
140 * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
141 *
142 * @dev: drm_device pointer
143 *
144 * Returns true if the device is a dGPU with HG/PX power control,
145 * otherwise return false.
146 */
d38ceaf9
AD
147bool amdgpu_device_is_px(struct drm_device *dev)
148{
149 struct amdgpu_device *adev = dev->dev_private;
150
2f7d10b3 151 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
152 return true;
153 return false;
154}
155
e35e2b11
TY
156/**
157 * VRAM access helper functions.
158 *
159 * amdgpu_device_vram_access - read/write a buffer in vram
160 *
161 * @adev: amdgpu_device pointer
162 * @pos: offset of the buffer in vram
163 * @buf: virtual address of the buffer in system memory
164 * @size: read/write size, sizeof(@buf) must > @size
165 * @write: true - write to vram, otherwise - read from vram
166 */
167void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
168 uint32_t *buf, size_t size, bool write)
169{
170 uint64_t last;
171 unsigned long flags;
172
173 last = size - 4;
174 for (last += pos; pos <= last; pos += 4) {
175 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
176 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
177 WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
178 if (write)
179 WREG32_NO_KIQ(mmMM_DATA, *buf++);
180 else
181 *buf++ = RREG32_NO_KIQ(mmMM_DATA);
182 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
183 }
184}
185
d38ceaf9
AD
186/*
187 * MMIO register access helper functions.
188 */
e3ecdffa
AD
189/**
190 * amdgpu_mm_rreg - read a memory mapped IO register
191 *
192 * @adev: amdgpu_device pointer
193 * @reg: dword aligned register offset
194 * @acc_flags: access flags which require special behavior
195 *
196 * Returns the 32 bit value from the offset specified.
197 */
d38ceaf9 198uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 199 uint32_t acc_flags)
d38ceaf9 200{
f4b373f4
TSD
201 uint32_t ret;
202
43ca8efa 203 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 204 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 205
15d72fd7 206 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 207 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
208 else {
209 unsigned long flags;
d38ceaf9
AD
210
211 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
212 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
213 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
214 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 215 }
f4b373f4
TSD
216 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
217 return ret;
d38ceaf9
AD
218}
219
421a2a30
ML
220/*
221 * MMIO register read with bytes helper functions
222 * @offset:bytes offset from MMIO start
223 *
224*/
225
e3ecdffa
AD
226/**
227 * amdgpu_mm_rreg8 - read a memory mapped IO register
228 *
229 * @adev: amdgpu_device pointer
230 * @offset: byte aligned register offset
231 *
232 * Returns the 8 bit value from the offset specified.
233 */
421a2a30
ML
234uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
235 if (offset < adev->rmmio_size)
236 return (readb(adev->rmmio + offset));
237 BUG();
238}
239
240/*
241 * MMIO register write with bytes helper functions
242 * @offset:bytes offset from MMIO start
243 * @value: the value want to be written to the register
244 *
245*/
e3ecdffa
AD
246/**
247 * amdgpu_mm_wreg8 - read a memory mapped IO register
248 *
249 * @adev: amdgpu_device pointer
250 * @offset: byte aligned register offset
251 * @value: 8 bit value to write
252 *
253 * Writes the value specified to the offset specified.
254 */
421a2a30
ML
255void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
256 if (offset < adev->rmmio_size)
257 writeb(value, adev->rmmio + offset);
258 else
259 BUG();
260}
261
e3ecdffa
AD
262/**
263 * amdgpu_mm_wreg - write to a memory mapped IO register
264 *
265 * @adev: amdgpu_device pointer
266 * @reg: dword aligned register offset
267 * @v: 32 bit value to write to the register
268 * @acc_flags: access flags which require special behavior
269 *
270 * Writes the value specified to the offset specified.
271 */
d38ceaf9 272void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 273 uint32_t acc_flags)
d38ceaf9 274{
f4b373f4 275 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 276
47ed4e1c
KW
277 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
278 adev->last_mm_index = v;
279 }
280
43ca8efa 281 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 282 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 283
15d72fd7 284 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
285 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
286 else {
287 unsigned long flags;
288
289 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
290 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
291 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
292 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
293 }
47ed4e1c
KW
294
295 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
296 udelay(500);
297 }
d38ceaf9
AD
298}
299
e3ecdffa
AD
300/**
301 * amdgpu_io_rreg - read an IO register
302 *
303 * @adev: amdgpu_device pointer
304 * @reg: dword aligned register offset
305 *
306 * Returns the 32 bit value from the offset specified.
307 */
d38ceaf9
AD
308u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
309{
310 if ((reg * 4) < adev->rio_mem_size)
311 return ioread32(adev->rio_mem + (reg * 4));
312 else {
313 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
314 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
315 }
316}
317
e3ecdffa
AD
318/**
319 * amdgpu_io_wreg - write to an IO register
320 *
321 * @adev: amdgpu_device pointer
322 * @reg: dword aligned register offset
323 * @v: 32 bit value to write to the register
324 *
325 * Writes the value specified to the offset specified.
326 */
d38ceaf9
AD
327void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
328{
47ed4e1c
KW
329 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
330 adev->last_mm_index = v;
331 }
d38ceaf9
AD
332
333 if ((reg * 4) < adev->rio_mem_size)
334 iowrite32(v, adev->rio_mem + (reg * 4));
335 else {
336 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
337 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
338 }
47ed4e1c
KW
339
340 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
341 udelay(500);
342 }
d38ceaf9
AD
343}
344
345/**
346 * amdgpu_mm_rdoorbell - read a doorbell dword
347 *
348 * @adev: amdgpu_device pointer
349 * @index: doorbell index
350 *
351 * Returns the value in the doorbell aperture at the
352 * requested doorbell index (CIK).
353 */
354u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
355{
356 if (index < adev->doorbell.num_doorbells) {
357 return readl(adev->doorbell.ptr + index);
358 } else {
359 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
360 return 0;
361 }
362}
363
364/**
365 * amdgpu_mm_wdoorbell - write a doorbell dword
366 *
367 * @adev: amdgpu_device pointer
368 * @index: doorbell index
369 * @v: value to write
370 *
371 * Writes @v to the doorbell aperture at the
372 * requested doorbell index (CIK).
373 */
374void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
375{
376 if (index < adev->doorbell.num_doorbells) {
377 writel(v, adev->doorbell.ptr + index);
378 } else {
379 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
380 }
381}
382
832be404
KW
383/**
384 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
385 *
386 * @adev: amdgpu_device pointer
387 * @index: doorbell index
388 *
389 * Returns the value in the doorbell aperture at the
390 * requested doorbell index (VEGA10+).
391 */
392u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
393{
394 if (index < adev->doorbell.num_doorbells) {
395 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
396 } else {
397 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
398 return 0;
399 }
400}
401
402/**
403 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
404 *
405 * @adev: amdgpu_device pointer
406 * @index: doorbell index
407 * @v: value to write
408 *
409 * Writes @v to the doorbell aperture at the
410 * requested doorbell index (VEGA10+).
411 */
412void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
413{
414 if (index < adev->doorbell.num_doorbells) {
415 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
416 } else {
417 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
418 }
419}
420
d38ceaf9
AD
421/**
422 * amdgpu_invalid_rreg - dummy reg read function
423 *
424 * @adev: amdgpu device pointer
425 * @reg: offset of register
426 *
427 * Dummy register read function. Used for register blocks
428 * that certain asics don't have (all asics).
429 * Returns the value in the register.
430 */
431static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
432{
433 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
434 BUG();
435 return 0;
436}
437
438/**
439 * amdgpu_invalid_wreg - dummy reg write function
440 *
441 * @adev: amdgpu device pointer
442 * @reg: offset of register
443 * @v: value to write to the register
444 *
445 * Dummy register read function. Used for register blocks
446 * that certain asics don't have (all asics).
447 */
448static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
449{
450 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
451 reg, v);
452 BUG();
453}
454
4fa1c6a6
TZ
455/**
456 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
457 *
458 * @adev: amdgpu device pointer
459 * @reg: offset of register
460 *
461 * Dummy register read function. Used for register blocks
462 * that certain asics don't have (all asics).
463 * Returns the value in the register.
464 */
465static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
466{
467 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
468 BUG();
469 return 0;
470}
471
472/**
473 * amdgpu_invalid_wreg64 - dummy reg write function
474 *
475 * @adev: amdgpu device pointer
476 * @reg: offset of register
477 * @v: value to write to the register
478 *
479 * Dummy register read function. Used for register blocks
480 * that certain asics don't have (all asics).
481 */
482static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
483{
484 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
485 reg, v);
486 BUG();
487}
488
d38ceaf9
AD
489/**
490 * amdgpu_block_invalid_rreg - dummy reg read function
491 *
492 * @adev: amdgpu device pointer
493 * @block: offset of instance
494 * @reg: offset of register
495 *
496 * Dummy register read function. Used for register blocks
497 * that certain asics don't have (all asics).
498 * Returns the value in the register.
499 */
500static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
501 uint32_t block, uint32_t reg)
502{
503 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
504 reg, block);
505 BUG();
506 return 0;
507}
508
509/**
510 * amdgpu_block_invalid_wreg - dummy reg write function
511 *
512 * @adev: amdgpu device pointer
513 * @block: offset of instance
514 * @reg: offset of register
515 * @v: value to write to the register
516 *
517 * Dummy register read function. Used for register blocks
518 * that certain asics don't have (all asics).
519 */
520static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
521 uint32_t block,
522 uint32_t reg, uint32_t v)
523{
524 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
525 reg, block, v);
526 BUG();
527}
528
e3ecdffa
AD
529/**
530 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
531 *
532 * @adev: amdgpu device pointer
533 *
534 * Allocates a scratch page of VRAM for use by various things in the
535 * driver.
536 */
06ec9070 537static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
d38ceaf9 538{
a4a02777
CK
539 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
540 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
541 &adev->vram_scratch.robj,
542 &adev->vram_scratch.gpu_addr,
543 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
544}
545
e3ecdffa
AD
546/**
547 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
548 *
549 * @adev: amdgpu device pointer
550 *
551 * Frees the VRAM scratch page.
552 */
06ec9070 553static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
d38ceaf9 554{
078af1a3 555 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
556}
557
558/**
9c3f2b54 559 * amdgpu_device_program_register_sequence - program an array of registers.
d38ceaf9
AD
560 *
561 * @adev: amdgpu_device pointer
562 * @registers: pointer to the register array
563 * @array_size: size of the register array
564 *
565 * Programs an array or registers with and and or masks.
566 * This is a helper for setting golden registers.
567 */
9c3f2b54
AD
568void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
569 const u32 *registers,
570 const u32 array_size)
d38ceaf9
AD
571{
572 u32 tmp, reg, and_mask, or_mask;
573 int i;
574
575 if (array_size % 3)
576 return;
577
578 for (i = 0; i < array_size; i +=3) {
579 reg = registers[i + 0];
580 and_mask = registers[i + 1];
581 or_mask = registers[i + 2];
582
583 if (and_mask == 0xffffffff) {
584 tmp = or_mask;
585 } else {
586 tmp = RREG32(reg);
587 tmp &= ~and_mask;
e0d07657
HZ
588 if (adev->family >= AMDGPU_FAMILY_AI)
589 tmp |= (or_mask & and_mask);
590 else
591 tmp |= or_mask;
d38ceaf9
AD
592 }
593 WREG32(reg, tmp);
594 }
595}
596
e3ecdffa
AD
597/**
598 * amdgpu_device_pci_config_reset - reset the GPU
599 *
600 * @adev: amdgpu_device pointer
601 *
602 * Resets the GPU using the pci config reset sequence.
603 * Only applicable to asics prior to vega10.
604 */
8111c387 605void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
d38ceaf9
AD
606{
607 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
608}
609
610/*
611 * GPU doorbell aperture helpers function.
612 */
613/**
06ec9070 614 * amdgpu_device_doorbell_init - Init doorbell driver information.
d38ceaf9
AD
615 *
616 * @adev: amdgpu_device pointer
617 *
618 * Init doorbell driver information (CIK)
619 * Returns 0 on success, error on failure.
620 */
06ec9070 621static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
d38ceaf9 622{
6585661d 623
705e519e
CK
624 /* No doorbell on SI hardware generation */
625 if (adev->asic_type < CHIP_BONAIRE) {
626 adev->doorbell.base = 0;
627 adev->doorbell.size = 0;
628 adev->doorbell.num_doorbells = 0;
629 adev->doorbell.ptr = NULL;
630 return 0;
631 }
632
d6895ad3
CK
633 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
634 return -EINVAL;
635
22357775
AD
636 amdgpu_asic_init_doorbell_index(adev);
637
d38ceaf9
AD
638 /* doorbell bar mapping */
639 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
640 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
641
edf600da 642 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
9564f192 643 adev->doorbell_index.max_assignment+1);
d38ceaf9
AD
644 if (adev->doorbell.num_doorbells == 0)
645 return -EINVAL;
646
ec3db8a6 647 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
88dc26e4
OZ
648 * paging queue doorbell use the second page. The
649 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
650 * doorbells are in the first page. So with paging queue enabled,
651 * the max num_doorbells should + 1 page (0x400 in dword)
ec3db8a6
PY
652 */
653 if (adev->asic_type >= CHIP_VEGA10)
88dc26e4 654 adev->doorbell.num_doorbells += 0x400;
ec3db8a6 655
8972e5d2
CK
656 adev->doorbell.ptr = ioremap(adev->doorbell.base,
657 adev->doorbell.num_doorbells *
658 sizeof(u32));
659 if (adev->doorbell.ptr == NULL)
d38ceaf9 660 return -ENOMEM;
d38ceaf9
AD
661
662 return 0;
663}
664
665/**
06ec9070 666 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
d38ceaf9
AD
667 *
668 * @adev: amdgpu_device pointer
669 *
670 * Tear down doorbell driver information (CIK)
671 */
06ec9070 672static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
d38ceaf9
AD
673{
674 iounmap(adev->doorbell.ptr);
675 adev->doorbell.ptr = NULL;
676}
677
22cb0164 678
d38ceaf9
AD
679
680/*
06ec9070 681 * amdgpu_device_wb_*()
455a7bc2 682 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 683 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
684 */
685
686/**
06ec9070 687 * amdgpu_device_wb_fini - Disable Writeback and free memory
d38ceaf9
AD
688 *
689 * @adev: amdgpu_device pointer
690 *
691 * Disables Writeback and frees the Writeback memory (all asics).
692 * Used at driver shutdown.
693 */
06ec9070 694static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
d38ceaf9
AD
695{
696 if (adev->wb.wb_obj) {
a76ed485
AD
697 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
698 &adev->wb.gpu_addr,
699 (void **)&adev->wb.wb);
d38ceaf9
AD
700 adev->wb.wb_obj = NULL;
701 }
702}
703
704/**
06ec9070 705 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
d38ceaf9
AD
706 *
707 * @adev: amdgpu_device pointer
708 *
455a7bc2 709 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
710 * Used at driver startup.
711 * Returns 0 on success or an -error on failure.
712 */
06ec9070 713static int amdgpu_device_wb_init(struct amdgpu_device *adev)
d38ceaf9
AD
714{
715 int r;
716
717 if (adev->wb.wb_obj == NULL) {
97407b63
AD
718 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
719 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
720 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
721 &adev->wb.wb_obj, &adev->wb.gpu_addr,
722 (void **)&adev->wb.wb);
d38ceaf9
AD
723 if (r) {
724 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
725 return r;
726 }
d38ceaf9
AD
727
728 adev->wb.num_wb = AMDGPU_MAX_WB;
729 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
730
731 /* clear wb memory */
73469585 732 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
d38ceaf9
AD
733 }
734
735 return 0;
736}
737
738/**
131b4b36 739 * amdgpu_device_wb_get - Allocate a wb entry
d38ceaf9
AD
740 *
741 * @adev: amdgpu_device pointer
742 * @wb: wb index
743 *
744 * Allocate a wb slot for use by the driver (all asics).
745 * Returns 0 on success or -EINVAL on failure.
746 */
131b4b36 747int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
d38ceaf9
AD
748{
749 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 750
97407b63 751 if (offset < adev->wb.num_wb) {
7014285a 752 __set_bit(offset, adev->wb.used);
63ae07ca 753 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
754 return 0;
755 } else {
756 return -EINVAL;
757 }
758}
759
d38ceaf9 760/**
131b4b36 761 * amdgpu_device_wb_free - Free a wb entry
d38ceaf9
AD
762 *
763 * @adev: amdgpu_device pointer
764 * @wb: wb index
765 *
766 * Free a wb slot allocated for use by the driver (all asics)
767 */
131b4b36 768void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
d38ceaf9 769{
73469585 770 wb >>= 3;
d38ceaf9 771 if (wb < adev->wb.num_wb)
73469585 772 __clear_bit(wb, adev->wb.used);
d38ceaf9
AD
773}
774
d6895ad3
CK
775/**
776 * amdgpu_device_resize_fb_bar - try to resize FB BAR
777 *
778 * @adev: amdgpu_device pointer
779 *
780 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
781 * to fail, but if any of the BARs is not accessible after the size we abort
782 * driver loading by returning -ENODEV.
783 */
784int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
785{
770d13b1 786 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
d6895ad3 787 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
31b8adab
CK
788 struct pci_bus *root;
789 struct resource *res;
790 unsigned i;
d6895ad3
CK
791 u16 cmd;
792 int r;
793
0c03b912 794 /* Bypass for VF */
795 if (amdgpu_sriov_vf(adev))
796 return 0;
797
31b8adab
CK
798 /* Check if the root BUS has 64bit memory resources */
799 root = adev->pdev->bus;
800 while (root->parent)
801 root = root->parent;
802
803 pci_bus_for_each_resource(root, res, i) {
0ebb7c54 804 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
31b8adab
CK
805 res->start > 0x100000000ull)
806 break;
807 }
808
809 /* Trying to resize is pointless without a root hub window above 4GB */
810 if (!res)
811 return 0;
812
d6895ad3
CK
813 /* Disable memory decoding while we change the BAR addresses and size */
814 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
815 pci_write_config_word(adev->pdev, PCI_COMMAND,
816 cmd & ~PCI_COMMAND_MEMORY);
817
818 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
06ec9070 819 amdgpu_device_doorbell_fini(adev);
d6895ad3
CK
820 if (adev->asic_type >= CHIP_BONAIRE)
821 pci_release_resource(adev->pdev, 2);
822
823 pci_release_resource(adev->pdev, 0);
824
825 r = pci_resize_resource(adev->pdev, 0, rbar_size);
826 if (r == -ENOSPC)
827 DRM_INFO("Not enough PCI address space for a large BAR.");
828 else if (r && r != -ENOTSUPP)
829 DRM_ERROR("Problem resizing BAR0 (%d).", r);
830
831 pci_assign_unassigned_bus_resources(adev->pdev->bus);
832
833 /* When the doorbell or fb BAR isn't available we have no chance of
834 * using the device.
835 */
06ec9070 836 r = amdgpu_device_doorbell_init(adev);
d6895ad3
CK
837 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
838 return -ENODEV;
839
840 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
841
842 return 0;
843}
a05502e5 844
d38ceaf9
AD
845/*
846 * GPU helpers function.
847 */
848/**
39c640c0 849 * amdgpu_device_need_post - check if the hw need post or not
d38ceaf9
AD
850 *
851 * @adev: amdgpu_device pointer
852 *
c836fec5
JQ
853 * Check if the asic has been initialized (all asics) at driver startup
854 * or post is needed if hw reset is performed.
855 * Returns true if need or false if not.
d38ceaf9 856 */
39c640c0 857bool amdgpu_device_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
858{
859 uint32_t reg;
860
bec86378
ML
861 if (amdgpu_sriov_vf(adev))
862 return false;
863
864 if (amdgpu_passthrough(adev)) {
1da2c326
ML
865 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
866 * some old smc fw still need driver do vPost otherwise gpu hang, while
867 * those smc fw version above 22.15 doesn't have this flaw, so we force
868 * vpost executed for smc version below 22.15
bec86378
ML
869 */
870 if (adev->asic_type == CHIP_FIJI) {
871 int err;
872 uint32_t fw_ver;
873 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
874 /* force vPost if error occured */
875 if (err)
876 return true;
877
878 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
879 if (fw_ver < 0x00160e00)
880 return true;
bec86378 881 }
bec86378 882 }
91fe77eb 883
884 if (adev->has_hw_reset) {
885 adev->has_hw_reset = false;
886 return true;
887 }
888
889 /* bios scratch used on CIK+ */
890 if (adev->asic_type >= CHIP_BONAIRE)
891 return amdgpu_atombios_scratch_need_asic_init(adev);
892
893 /* check MEM_SIZE for older asics */
894 reg = amdgpu_asic_get_config_memsize(adev);
895
896 if ((reg != 0) && (reg != 0xffffffff))
897 return false;
898
899 return true;
bec86378
ML
900}
901
d38ceaf9
AD
902/* if we get transitioned to only one device, take VGA back */
903/**
06ec9070 904 * amdgpu_device_vga_set_decode - enable/disable vga decode
d38ceaf9
AD
905 *
906 * @cookie: amdgpu_device pointer
907 * @state: enable/disable vga decode
908 *
909 * Enable/disable vga decode (all asics).
910 * Returns VGA resource flags.
911 */
06ec9070 912static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
d38ceaf9
AD
913{
914 struct amdgpu_device *adev = cookie;
915 amdgpu_asic_set_vga_state(adev, state);
916 if (state)
917 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
918 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
919 else
920 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
921}
922
e3ecdffa
AD
923/**
924 * amdgpu_device_check_block_size - validate the vm block size
925 *
926 * @adev: amdgpu_device pointer
927 *
928 * Validates the vm block size specified via module parameter.
929 * The vm block size defines number of bits in page table versus page directory,
930 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
931 * page table and the remaining bits are in the page directory.
932 */
06ec9070 933static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
934{
935 /* defines number of bits in page table versus page directory,
936 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
937 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
938 if (amdgpu_vm_block_size == -1)
939 return;
a1adf8be 940
bab4fee7 941 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
942 dev_warn(adev->dev, "VM page table size (%d) too small\n",
943 amdgpu_vm_block_size);
97489129 944 amdgpu_vm_block_size = -1;
a1adf8be 945 }
a1adf8be
CZ
946}
947
e3ecdffa
AD
948/**
949 * amdgpu_device_check_vm_size - validate the vm size
950 *
951 * @adev: amdgpu_device pointer
952 *
953 * Validates the vm size in GB specified via module parameter.
954 * The VM size is the size of the GPU virtual memory space in GB.
955 */
06ec9070 956static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
83ca145d 957{
64dab074
AD
958 /* no need to check the default value */
959 if (amdgpu_vm_size == -1)
960 return;
961
83ca145d
ZJ
962 if (amdgpu_vm_size < 1) {
963 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
964 amdgpu_vm_size);
f3368128 965 amdgpu_vm_size = -1;
83ca145d 966 }
83ca145d
ZJ
967}
968
7951e376
RZ
969static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
970{
971 struct sysinfo si;
972 bool is_os_64 = (sizeof(void *) == 8) ? true : false;
973 uint64_t total_memory;
974 uint64_t dram_size_seven_GB = 0x1B8000000;
975 uint64_t dram_size_three_GB = 0xB8000000;
976
977 if (amdgpu_smu_memory_pool_size == 0)
978 return;
979
980 if (!is_os_64) {
981 DRM_WARN("Not 64-bit OS, feature not supported\n");
982 goto def_value;
983 }
984 si_meminfo(&si);
985 total_memory = (uint64_t)si.totalram * si.mem_unit;
986
987 if ((amdgpu_smu_memory_pool_size == 1) ||
988 (amdgpu_smu_memory_pool_size == 2)) {
989 if (total_memory < dram_size_three_GB)
990 goto def_value1;
991 } else if ((amdgpu_smu_memory_pool_size == 4) ||
992 (amdgpu_smu_memory_pool_size == 8)) {
993 if (total_memory < dram_size_seven_GB)
994 goto def_value1;
995 } else {
996 DRM_WARN("Smu memory pool size not supported\n");
997 goto def_value;
998 }
999 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1000
1001 return;
1002
1003def_value1:
1004 DRM_WARN("No enough system memory\n");
1005def_value:
1006 adev->pm.smu_prv_buffer_size = 0;
1007}
1008
d38ceaf9 1009/**
06ec9070 1010 * amdgpu_device_check_arguments - validate module params
d38ceaf9
AD
1011 *
1012 * @adev: amdgpu_device pointer
1013 *
1014 * Validates certain module parameters and updates
1015 * the associated values used by the driver (all asics).
1016 */
912dfc84 1017static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
d38ceaf9 1018{
912dfc84
EQ
1019 int ret = 0;
1020
5b011235
CZ
1021 if (amdgpu_sched_jobs < 4) {
1022 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1023 amdgpu_sched_jobs);
1024 amdgpu_sched_jobs = 4;
76117507 1025 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
1026 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1027 amdgpu_sched_jobs);
1028 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1029 }
d38ceaf9 1030
83e74db6 1031 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
1032 /* gart size must be greater or equal to 32M */
1033 dev_warn(adev->dev, "gart size (%d) too small\n",
1034 amdgpu_gart_size);
83e74db6 1035 amdgpu_gart_size = -1;
d38ceaf9
AD
1036 }
1037
36d38372 1038 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 1039 /* gtt size must be greater or equal to 32M */
36d38372
CK
1040 dev_warn(adev->dev, "gtt size (%d) too small\n",
1041 amdgpu_gtt_size);
1042 amdgpu_gtt_size = -1;
d38ceaf9
AD
1043 }
1044
d07f14be
RH
1045 /* valid range is between 4 and 9 inclusive */
1046 if (amdgpu_vm_fragment_size != -1 &&
1047 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1048 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1049 amdgpu_vm_fragment_size = -1;
1050 }
1051
7951e376
RZ
1052 amdgpu_device_check_smu_prv_buffer_size(adev);
1053
06ec9070 1054 amdgpu_device_check_vm_size(adev);
d38ceaf9 1055
06ec9070 1056 amdgpu_device_check_block_size(adev);
6a7f76e7 1057
19aede77 1058 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
912dfc84
EQ
1059
1060 return ret;
d38ceaf9
AD
1061}
1062
1063/**
1064 * amdgpu_switcheroo_set_state - set switcheroo state
1065 *
1066 * @pdev: pci dev pointer
1694467b 1067 * @state: vga_switcheroo state
d38ceaf9
AD
1068 *
1069 * Callback for the switcheroo driver. Suspends or resumes the
1070 * the asics before or after it is powered up using ACPI methods.
1071 */
1072static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1073{
1074 struct drm_device *dev = pci_get_drvdata(pdev);
1075
1076 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1077 return;
1078
1079 if (state == VGA_SWITCHEROO_ON) {
7ca85295 1080 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1081 /* don't suspend or resume card normally */
1082 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1083
810ddc3a 1084 amdgpu_device_resume(dev, true, true);
d38ceaf9 1085
d38ceaf9
AD
1086 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1087 drm_kms_helper_poll_enable(dev);
1088 } else {
7ca85295 1089 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1090 drm_kms_helper_poll_disable(dev);
1091 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1092 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1093 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1094 }
1095}
1096
1097/**
1098 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1099 *
1100 * @pdev: pci dev pointer
1101 *
1102 * Callback for the switcheroo driver. Check of the switcheroo
1103 * state can be changed.
1104 * Returns true if the state can be changed, false if not.
1105 */
1106static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1107{
1108 struct drm_device *dev = pci_get_drvdata(pdev);
1109
1110 /*
1111 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1112 * locking inversion with the driver load path. And the access here is
1113 * completely racy anyway. So don't bother with locking for now.
1114 */
1115 return dev->open_count == 0;
1116}
1117
1118static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1119 .set_gpu_state = amdgpu_switcheroo_set_state,
1120 .reprobe = NULL,
1121 .can_switch = amdgpu_switcheroo_can_switch,
1122};
1123
e3ecdffa
AD
1124/**
1125 * amdgpu_device_ip_set_clockgating_state - set the CG state
1126 *
87e3f136 1127 * @dev: amdgpu_device pointer
e3ecdffa
AD
1128 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1129 * @state: clockgating state (gate or ungate)
1130 *
1131 * Sets the requested clockgating state for all instances of
1132 * the hardware IP specified.
1133 * Returns the error code from the last instance.
1134 */
43fa561f 1135int amdgpu_device_ip_set_clockgating_state(void *dev,
2990a1fc
AD
1136 enum amd_ip_block_type block_type,
1137 enum amd_clockgating_state state)
d38ceaf9 1138{
43fa561f 1139 struct amdgpu_device *adev = dev;
d38ceaf9
AD
1140 int i, r = 0;
1141
1142 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1143 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1144 continue;
c722865a
RZ
1145 if (adev->ip_blocks[i].version->type != block_type)
1146 continue;
1147 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1148 continue;
1149 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1150 (void *)adev, state);
1151 if (r)
1152 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1153 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1154 }
1155 return r;
1156}
1157
e3ecdffa
AD
1158/**
1159 * amdgpu_device_ip_set_powergating_state - set the PG state
1160 *
87e3f136 1161 * @dev: amdgpu_device pointer
e3ecdffa
AD
1162 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1163 * @state: powergating state (gate or ungate)
1164 *
1165 * Sets the requested powergating state for all instances of
1166 * the hardware IP specified.
1167 * Returns the error code from the last instance.
1168 */
43fa561f 1169int amdgpu_device_ip_set_powergating_state(void *dev,
2990a1fc
AD
1170 enum amd_ip_block_type block_type,
1171 enum amd_powergating_state state)
d38ceaf9 1172{
43fa561f 1173 struct amdgpu_device *adev = dev;
d38ceaf9
AD
1174 int i, r = 0;
1175
1176 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1177 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1178 continue;
c722865a
RZ
1179 if (adev->ip_blocks[i].version->type != block_type)
1180 continue;
1181 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1182 continue;
1183 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1184 (void *)adev, state);
1185 if (r)
1186 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1187 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1188 }
1189 return r;
1190}
1191
e3ecdffa
AD
1192/**
1193 * amdgpu_device_ip_get_clockgating_state - get the CG state
1194 *
1195 * @adev: amdgpu_device pointer
1196 * @flags: clockgating feature flags
1197 *
1198 * Walks the list of IPs on the device and updates the clockgating
1199 * flags for each IP.
1200 * Updates @flags with the feature flags for each hardware IP where
1201 * clockgating is enabled.
1202 */
2990a1fc
AD
1203void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1204 u32 *flags)
6cb2d4e4
HR
1205{
1206 int i;
1207
1208 for (i = 0; i < adev->num_ip_blocks; i++) {
1209 if (!adev->ip_blocks[i].status.valid)
1210 continue;
1211 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1212 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1213 }
1214}
1215
e3ecdffa
AD
1216/**
1217 * amdgpu_device_ip_wait_for_idle - wait for idle
1218 *
1219 * @adev: amdgpu_device pointer
1220 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1221 *
1222 * Waits for the request hardware IP to be idle.
1223 * Returns 0 for success or a negative error code on failure.
1224 */
2990a1fc
AD
1225int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1226 enum amd_ip_block_type block_type)
5dbbb60b
AD
1227{
1228 int i, r;
1229
1230 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1231 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1232 continue;
a1255107
AD
1233 if (adev->ip_blocks[i].version->type == block_type) {
1234 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1235 if (r)
1236 return r;
1237 break;
1238 }
1239 }
1240 return 0;
1241
1242}
1243
e3ecdffa
AD
1244/**
1245 * amdgpu_device_ip_is_idle - is the hardware IP idle
1246 *
1247 * @adev: amdgpu_device pointer
1248 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1249 *
1250 * Check if the hardware IP is idle or not.
1251 * Returns true if it the IP is idle, false if not.
1252 */
2990a1fc
AD
1253bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1254 enum amd_ip_block_type block_type)
5dbbb60b
AD
1255{
1256 int i;
1257
1258 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1259 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1260 continue;
a1255107
AD
1261 if (adev->ip_blocks[i].version->type == block_type)
1262 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1263 }
1264 return true;
1265
1266}
1267
e3ecdffa
AD
1268/**
1269 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1270 *
1271 * @adev: amdgpu_device pointer
87e3f136 1272 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
e3ecdffa
AD
1273 *
1274 * Returns a pointer to the hardware IP block structure
1275 * if it exists for the asic, otherwise NULL.
1276 */
2990a1fc
AD
1277struct amdgpu_ip_block *
1278amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1279 enum amd_ip_block_type type)
d38ceaf9
AD
1280{
1281 int i;
1282
1283 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1284 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1285 return &adev->ip_blocks[i];
1286
1287 return NULL;
1288}
1289
1290/**
2990a1fc 1291 * amdgpu_device_ip_block_version_cmp
d38ceaf9
AD
1292 *
1293 * @adev: amdgpu_device pointer
5fc3aeeb 1294 * @type: enum amd_ip_block_type
d38ceaf9
AD
1295 * @major: major version
1296 * @minor: minor version
1297 *
1298 * return 0 if equal or greater
1299 * return 1 if smaller or the ip_block doesn't exist
1300 */
2990a1fc
AD
1301int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1302 enum amd_ip_block_type type,
1303 u32 major, u32 minor)
d38ceaf9 1304{
2990a1fc 1305 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
d38ceaf9 1306
a1255107
AD
1307 if (ip_block && ((ip_block->version->major > major) ||
1308 ((ip_block->version->major == major) &&
1309 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1310 return 0;
1311
1312 return 1;
1313}
1314
a1255107 1315/**
2990a1fc 1316 * amdgpu_device_ip_block_add
a1255107
AD
1317 *
1318 * @adev: amdgpu_device pointer
1319 * @ip_block_version: pointer to the IP to add
1320 *
1321 * Adds the IP block driver information to the collection of IPs
1322 * on the asic.
1323 */
2990a1fc
AD
1324int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1325 const struct amdgpu_ip_block_version *ip_block_version)
a1255107
AD
1326{
1327 if (!ip_block_version)
1328 return -EINVAL;
1329
e966a725 1330 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
a0bae357
HR
1331 ip_block_version->funcs->name);
1332
a1255107
AD
1333 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1334
1335 return 0;
1336}
1337
e3ecdffa
AD
1338/**
1339 * amdgpu_device_enable_virtual_display - enable virtual display feature
1340 *
1341 * @adev: amdgpu_device pointer
1342 *
1343 * Enabled the virtual display feature if the user has enabled it via
1344 * the module parameter virtual_display. This feature provides a virtual
1345 * display hardware on headless boards or in virtualized environments.
1346 * This function parses and validates the configuration string specified by
1347 * the user and configues the virtual display configuration (number of
1348 * virtual connectors, crtcs, etc.) specified.
1349 */
483ef985 1350static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1351{
1352 adev->enable_virtual_display = false;
1353
1354 if (amdgpu_virtual_display) {
1355 struct drm_device *ddev = adev->ddev;
1356 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1357 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1358
1359 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1360 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1361 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1362 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1363 if (!strcmp("all", pciaddname)
1364 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1365 long num_crtc;
1366 int res = -1;
1367
9accf2fd 1368 adev->enable_virtual_display = true;
0f66356d
ED
1369
1370 if (pciaddname_tmp)
1371 res = kstrtol(pciaddname_tmp, 10,
1372 &num_crtc);
1373
1374 if (!res) {
1375 if (num_crtc < 1)
1376 num_crtc = 1;
1377 if (num_crtc > 6)
1378 num_crtc = 6;
1379 adev->mode_info.num_crtc = num_crtc;
1380 } else {
1381 adev->mode_info.num_crtc = 1;
1382 }
9accf2fd
ED
1383 break;
1384 }
1385 }
1386
0f66356d
ED
1387 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1388 amdgpu_virtual_display, pci_address_name,
1389 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1390
1391 kfree(pciaddstr);
1392 }
1393}
1394
e3ecdffa
AD
1395/**
1396 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1397 *
1398 * @adev: amdgpu_device pointer
1399 *
1400 * Parses the asic configuration parameters specified in the gpu info
1401 * firmware and makes them availale to the driver for use in configuring
1402 * the asic.
1403 * Returns 0 on success, -EINVAL on failure.
1404 */
e2a75f88
AD
1405static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1406{
e2a75f88
AD
1407 const char *chip_name;
1408 char fw_name[30];
1409 int err;
1410 const struct gpu_info_firmware_header_v1_0 *hdr;
1411
ab4fe3e1
HR
1412 adev->firmware.gpu_info_fw = NULL;
1413
e2a75f88
AD
1414 switch (adev->asic_type) {
1415 case CHIP_TOPAZ:
1416 case CHIP_TONGA:
1417 case CHIP_FIJI:
e2a75f88 1418 case CHIP_POLARIS10:
cc07f18d 1419 case CHIP_POLARIS11:
e2a75f88 1420 case CHIP_POLARIS12:
cc07f18d 1421 case CHIP_VEGAM:
e2a75f88
AD
1422 case CHIP_CARRIZO:
1423 case CHIP_STONEY:
1424#ifdef CONFIG_DRM_AMDGPU_SI
1425 case CHIP_VERDE:
1426 case CHIP_TAHITI:
1427 case CHIP_PITCAIRN:
1428 case CHIP_OLAND:
1429 case CHIP_HAINAN:
1430#endif
1431#ifdef CONFIG_DRM_AMDGPU_CIK
1432 case CHIP_BONAIRE:
1433 case CHIP_HAWAII:
1434 case CHIP_KAVERI:
1435 case CHIP_KABINI:
1436 case CHIP_MULLINS:
1437#endif
27c0bc71 1438 case CHIP_VEGA20:
e2a75f88
AD
1439 default:
1440 return 0;
1441 case CHIP_VEGA10:
1442 chip_name = "vega10";
1443 break;
3f76dced
AD
1444 case CHIP_VEGA12:
1445 chip_name = "vega12";
1446 break;
2d2e5e7e 1447 case CHIP_RAVEN:
54c4d17e
FX
1448 if (adev->rev_id >= 8)
1449 chip_name = "raven2";
741deade
AD
1450 else if (adev->pdev->device == 0x15d8)
1451 chip_name = "picasso";
54c4d17e
FX
1452 else
1453 chip_name = "raven";
2d2e5e7e 1454 break;
65e60f6e
LM
1455 case CHIP_ARCTURUS:
1456 chip_name = "arcturus";
1457 break;
b51a26a0
HR
1458 case CHIP_RENOIR:
1459 chip_name = "renoir";
1460 break;
23c6268e
HR
1461 case CHIP_NAVI10:
1462 chip_name = "navi10";
1463 break;
ed42cfe1
XY
1464 case CHIP_NAVI14:
1465 chip_name = "navi14";
1466 break;
42b325e5
XY
1467 case CHIP_NAVI12:
1468 chip_name = "navi12";
1469 break;
e2a75f88
AD
1470 }
1471
1472 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1473 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1474 if (err) {
1475 dev_err(adev->dev,
1476 "Failed to load gpu_info firmware \"%s\"\n",
1477 fw_name);
1478 goto out;
1479 }
ab4fe3e1 1480 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1481 if (err) {
1482 dev_err(adev->dev,
1483 "Failed to validate gpu_info firmware \"%s\"\n",
1484 fw_name);
1485 goto out;
1486 }
1487
ab4fe3e1 1488 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1489 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1490
1491 switch (hdr->version_major) {
1492 case 1:
1493 {
1494 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1495 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1496 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1497
ec51d3fa
XY
1498 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
1499 goto parse_soc_bounding_box;
1500
b5ab16bf
AD
1501 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1502 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1503 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1504 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1505 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1506 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1507 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1508 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1509 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1510 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1511 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1512 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1513 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1514 adev->gfx.cu_info.max_waves_per_simd =
1515 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1516 adev->gfx.cu_info.max_scratch_slots_per_cu =
1517 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1518 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
48321c3d 1519 if (hdr->version_minor >= 1) {
35c2e910
HZ
1520 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1521 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1522 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1523 adev->gfx.config.num_sc_per_sh =
1524 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1525 adev->gfx.config.num_packer_per_sc =
1526 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1527 }
ec51d3fa
XY
1528
1529parse_soc_bounding_box:
ec51d3fa
XY
1530 /*
1531 * soc bounding box info is not integrated in disocovery table,
1532 * we always need to parse it from gpu info firmware.
1533 */
48321c3d
HW
1534 if (hdr->version_minor == 2) {
1535 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1536 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1537 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1538 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1539 }
e2a75f88
AD
1540 break;
1541 }
1542 default:
1543 dev_err(adev->dev,
1544 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1545 err = -EINVAL;
1546 goto out;
1547 }
1548out:
e2a75f88
AD
1549 return err;
1550}
1551
e3ecdffa
AD
1552/**
1553 * amdgpu_device_ip_early_init - run early init for hardware IPs
1554 *
1555 * @adev: amdgpu_device pointer
1556 *
1557 * Early initialization pass for hardware IPs. The hardware IPs that make
1558 * up each asic are discovered each IP's early_init callback is run. This
1559 * is the first stage in initializing the asic.
1560 * Returns 0 on success, negative error code on failure.
1561 */
06ec9070 1562static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
d38ceaf9 1563{
aaa36a97 1564 int i, r;
d38ceaf9 1565
483ef985 1566 amdgpu_device_enable_virtual_display(adev);
a6be7570 1567
d38ceaf9 1568 switch (adev->asic_type) {
aaa36a97
AD
1569 case CHIP_TOPAZ:
1570 case CHIP_TONGA:
48299f95 1571 case CHIP_FIJI:
2cc0c0b5 1572 case CHIP_POLARIS10:
32cc7e53 1573 case CHIP_POLARIS11:
c4642a47 1574 case CHIP_POLARIS12:
32cc7e53 1575 case CHIP_VEGAM:
aaa36a97 1576 case CHIP_CARRIZO:
39bb0c92
SL
1577 case CHIP_STONEY:
1578 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1579 adev->family = AMDGPU_FAMILY_CZ;
1580 else
1581 adev->family = AMDGPU_FAMILY_VI;
1582
1583 r = vi_set_ip_blocks(adev);
1584 if (r)
1585 return r;
1586 break;
33f34802
KW
1587#ifdef CONFIG_DRM_AMDGPU_SI
1588 case CHIP_VERDE:
1589 case CHIP_TAHITI:
1590 case CHIP_PITCAIRN:
1591 case CHIP_OLAND:
1592 case CHIP_HAINAN:
295d0daf 1593 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1594 r = si_set_ip_blocks(adev);
1595 if (r)
1596 return r;
1597 break;
1598#endif
a2e73f56
AD
1599#ifdef CONFIG_DRM_AMDGPU_CIK
1600 case CHIP_BONAIRE:
1601 case CHIP_HAWAII:
1602 case CHIP_KAVERI:
1603 case CHIP_KABINI:
1604 case CHIP_MULLINS:
1605 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1606 adev->family = AMDGPU_FAMILY_CI;
1607 else
1608 adev->family = AMDGPU_FAMILY_KV;
1609
1610 r = cik_set_ip_blocks(adev);
1611 if (r)
1612 return r;
1613 break;
1614#endif
e48a3cd9
AD
1615 case CHIP_VEGA10:
1616 case CHIP_VEGA12:
e4bd8170 1617 case CHIP_VEGA20:
e48a3cd9 1618 case CHIP_RAVEN:
61cf44c1 1619 case CHIP_ARCTURUS:
b51a26a0
HR
1620 case CHIP_RENOIR:
1621 if (adev->asic_type == CHIP_RAVEN ||
1622 adev->asic_type == CHIP_RENOIR)
2ca8a5d2
CZ
1623 adev->family = AMDGPU_FAMILY_RV;
1624 else
1625 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1626
1627 r = soc15_set_ip_blocks(adev);
1628 if (r)
1629 return r;
1630 break;
0a5b8c7b 1631 case CHIP_NAVI10:
7ecb5cd4 1632 case CHIP_NAVI14:
4808cf9c 1633 case CHIP_NAVI12:
0a5b8c7b
HR
1634 adev->family = AMDGPU_FAMILY_NV;
1635
1636 r = nv_set_ip_blocks(adev);
1637 if (r)
1638 return r;
1639 break;
d38ceaf9
AD
1640 default:
1641 /* FIXME: not supported yet */
1642 return -EINVAL;
1643 }
1644
e2a75f88
AD
1645 r = amdgpu_device_parse_gpu_info_fw(adev);
1646 if (r)
1647 return r;
1648
ec51d3fa
XY
1649 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
1650 amdgpu_discovery_get_gfx_info(adev);
1651
1884734a 1652 amdgpu_amdkfd_device_probe(adev);
1653
3149d9da
XY
1654 if (amdgpu_sriov_vf(adev)) {
1655 r = amdgpu_virt_request_full_gpu(adev, true);
1656 if (r)
5ffa61c1 1657 return -EAGAIN;
3149d9da
XY
1658 }
1659
3b94fb10 1660 adev->pm.pp_feature = amdgpu_pp_feature_mask;
a35ad98b 1661 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
00544006 1662 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
00f54b97 1663
d38ceaf9
AD
1664 for (i = 0; i < adev->num_ip_blocks; i++) {
1665 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1666 DRM_ERROR("disabled ip block: %d <%s>\n",
1667 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1668 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1669 } else {
a1255107
AD
1670 if (adev->ip_blocks[i].version->funcs->early_init) {
1671 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1672 if (r == -ENOENT) {
a1255107 1673 adev->ip_blocks[i].status.valid = false;
2c1a2784 1674 } else if (r) {
a1255107
AD
1675 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1676 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1677 return r;
2c1a2784 1678 } else {
a1255107 1679 adev->ip_blocks[i].status.valid = true;
2c1a2784 1680 }
974e6b64 1681 } else {
a1255107 1682 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1683 }
d38ceaf9 1684 }
21a249ca
AD
1685 /* get the vbios after the asic_funcs are set up */
1686 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
1687 /* Read BIOS */
1688 if (!amdgpu_get_bios(adev))
1689 return -EINVAL;
1690
1691 r = amdgpu_atombios_init(adev);
1692 if (r) {
1693 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1694 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
1695 return r;
1696 }
1697 }
d38ceaf9
AD
1698 }
1699
395d1fb9
NH
1700 adev->cg_flags &= amdgpu_cg_mask;
1701 adev->pg_flags &= amdgpu_pg_mask;
1702
d38ceaf9
AD
1703 return 0;
1704}
1705
0a4f2520
RZ
1706static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
1707{
1708 int i, r;
1709
1710 for (i = 0; i < adev->num_ip_blocks; i++) {
1711 if (!adev->ip_blocks[i].status.sw)
1712 continue;
1713 if (adev->ip_blocks[i].status.hw)
1714 continue;
1715 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2d11fd3f 1716 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
0a4f2520
RZ
1717 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
1718 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1719 if (r) {
1720 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1721 adev->ip_blocks[i].version->funcs->name, r);
1722 return r;
1723 }
1724 adev->ip_blocks[i].status.hw = true;
1725 }
1726 }
1727
1728 return 0;
1729}
1730
1731static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
1732{
1733 int i, r;
1734
1735 for (i = 0; i < adev->num_ip_blocks; i++) {
1736 if (!adev->ip_blocks[i].status.sw)
1737 continue;
1738 if (adev->ip_blocks[i].status.hw)
1739 continue;
1740 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1741 if (r) {
1742 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1743 adev->ip_blocks[i].version->funcs->name, r);
1744 return r;
1745 }
1746 adev->ip_blocks[i].status.hw = true;
1747 }
1748
1749 return 0;
1750}
1751
7a3e0bb2
RZ
1752static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1753{
1754 int r = 0;
1755 int i;
80f41f84 1756 uint32_t smu_version;
7a3e0bb2
RZ
1757
1758 if (adev->asic_type >= CHIP_VEGA10) {
1759 for (i = 0; i < adev->num_ip_blocks; i++) {
482f0e53
ML
1760 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
1761 continue;
1762
1763 /* no need to do the fw loading again if already done*/
1764 if (adev->ip_blocks[i].status.hw == true)
1765 break;
1766
1767 if (adev->in_gpu_reset || adev->in_suspend) {
1768 r = adev->ip_blocks[i].version->funcs->resume(adev);
1769 if (r) {
1770 DRM_ERROR("resume of IP block <%s> failed %d\n",
7a3e0bb2 1771 adev->ip_blocks[i].version->funcs->name, r);
482f0e53
ML
1772 return r;
1773 }
1774 } else {
1775 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1776 if (r) {
1777 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1778 adev->ip_blocks[i].version->funcs->name, r);
1779 return r;
7a3e0bb2 1780 }
7a3e0bb2 1781 }
482f0e53
ML
1782
1783 adev->ip_blocks[i].status.hw = true;
1784 break;
7a3e0bb2
RZ
1785 }
1786 }
482f0e53 1787
80f41f84 1788 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
7a3e0bb2 1789
80f41f84 1790 return r;
7a3e0bb2
RZ
1791}
1792
e3ecdffa
AD
1793/**
1794 * amdgpu_device_ip_init - run init for hardware IPs
1795 *
1796 * @adev: amdgpu_device pointer
1797 *
1798 * Main initialization pass for hardware IPs. The list of all the hardware
1799 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1800 * are run. sw_init initializes the software state associated with each IP
1801 * and hw_init initializes the hardware associated with each IP.
1802 * Returns 0 on success, negative error code on failure.
1803 */
06ec9070 1804static int amdgpu_device_ip_init(struct amdgpu_device *adev)
d38ceaf9
AD
1805{
1806 int i, r;
1807
c030f2e4 1808 r = amdgpu_ras_init(adev);
1809 if (r)
1810 return r;
1811
d38ceaf9 1812 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1813 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1814 continue;
a1255107 1815 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1816 if (r) {
a1255107
AD
1817 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1818 adev->ip_blocks[i].version->funcs->name, r);
72d3f592 1819 goto init_failed;
2c1a2784 1820 }
a1255107 1821 adev->ip_blocks[i].status.sw = true;
bfca0289 1822
d38ceaf9 1823 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1824 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
06ec9070 1825 r = amdgpu_device_vram_scratch_init(adev);
2c1a2784
AD
1826 if (r) {
1827 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
72d3f592 1828 goto init_failed;
2c1a2784 1829 }
a1255107 1830 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1831 if (r) {
1832 DRM_ERROR("hw_init %d failed %d\n", i, r);
72d3f592 1833 goto init_failed;
2c1a2784 1834 }
06ec9070 1835 r = amdgpu_device_wb_init(adev);
2c1a2784 1836 if (r) {
06ec9070 1837 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
72d3f592 1838 goto init_failed;
2c1a2784 1839 }
a1255107 1840 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1841
1842 /* right after GMC hw init, we create CSA */
f92d5c61 1843 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1e256e27
RZ
1844 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
1845 AMDGPU_GEM_DOMAIN_VRAM,
1846 AMDGPU_CSA_SIZE);
2493664f
ML
1847 if (r) {
1848 DRM_ERROR("allocate CSA failed %d\n", r);
72d3f592 1849 goto init_failed;
2493664f
ML
1850 }
1851 }
d38ceaf9
AD
1852 }
1853 }
1854
533aed27
AG
1855 r = amdgpu_ib_pool_init(adev);
1856 if (r) {
1857 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
1858 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
1859 goto init_failed;
1860 }
1861
c8963ea4
RZ
1862 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
1863 if (r)
72d3f592 1864 goto init_failed;
0a4f2520
RZ
1865
1866 r = amdgpu_device_ip_hw_init_phase1(adev);
1867 if (r)
72d3f592 1868 goto init_failed;
0a4f2520 1869
7a3e0bb2
RZ
1870 r = amdgpu_device_fw_loading(adev);
1871 if (r)
72d3f592 1872 goto init_failed;
7a3e0bb2 1873
0a4f2520
RZ
1874 r = amdgpu_device_ip_hw_init_phase2(adev);
1875 if (r)
72d3f592 1876 goto init_failed;
d38ceaf9 1877
121a2bc6
AG
1878 /*
1879 * retired pages will be loaded from eeprom and reserved here,
1880 * it should be called after amdgpu_device_ip_hw_init_phase2 since
1881 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
1882 * for I2C communication which only true at this point.
1883 * recovery_init may fail, but it can free all resources allocated by
1884 * itself and its failure should not stop amdgpu init process.
1885 *
1886 * Note: theoretically, this should be called before all vram allocations
1887 * to protect retired page from abusing
1888 */
1889 amdgpu_ras_recovery_init(adev);
1890
3e2e2ab5
HZ
1891 if (adev->gmc.xgmi.num_physical_nodes > 1)
1892 amdgpu_xgmi_add_device(adev);
1884734a 1893 amdgpu_amdkfd_device_init(adev);
c6332b97 1894
72d3f592 1895init_failed:
d3c117e5 1896 if (amdgpu_sriov_vf(adev)) {
72d3f592
ED
1897 if (!r)
1898 amdgpu_virt_init_data_exchange(adev);
c6332b97 1899 amdgpu_virt_release_full_gpu(adev, true);
d3c117e5 1900 }
c6332b97 1901
72d3f592 1902 return r;
d38ceaf9
AD
1903}
1904
e3ecdffa
AD
1905/**
1906 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1907 *
1908 * @adev: amdgpu_device pointer
1909 *
1910 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
1911 * this function before a GPU reset. If the value is retained after a
1912 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
1913 */
06ec9070 1914static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
0c49e0b8
CZ
1915{
1916 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1917}
1918
e3ecdffa
AD
1919/**
1920 * amdgpu_device_check_vram_lost - check if vram is valid
1921 *
1922 * @adev: amdgpu_device pointer
1923 *
1924 * Checks the reset magic value written to the gart pointer in VRAM.
1925 * The driver calls this after a GPU reset to see if the contents of
1926 * VRAM is lost or now.
1927 * returns true if vram is lost, false if not.
1928 */
06ec9070 1929static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
0c49e0b8
CZ
1930{
1931 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1932 AMDGPU_RESET_MAGIC_NUM);
1933}
1934
e3ecdffa 1935/**
1112a46b 1936 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
e3ecdffa
AD
1937 *
1938 * @adev: amdgpu_device pointer
b8b72130 1939 * @state: clockgating state (gate or ungate)
e3ecdffa 1940 *
e3ecdffa 1941 * The list of all the hardware IPs that make up the asic is walked and the
1112a46b
RZ
1942 * set_clockgating_state callbacks are run.
1943 * Late initialization pass enabling clockgating for hardware IPs.
1944 * Fini or suspend, pass disabling clockgating for hardware IPs.
e3ecdffa
AD
1945 * Returns 0 on success, negative error code on failure.
1946 */
fdd34271 1947
1112a46b
RZ
1948static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
1949 enum amd_clockgating_state state)
d38ceaf9 1950{
1112a46b 1951 int i, j, r;
d38ceaf9 1952
4a2ba394
SL
1953 if (amdgpu_emu_mode == 1)
1954 return 0;
1955
1112a46b
RZ
1956 for (j = 0; j < adev->num_ip_blocks; j++) {
1957 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
a2d31dc3 1958 if (!adev->ip_blocks[i].status.late_initialized)
d38ceaf9 1959 continue;
4a446d55 1960 /* skip CG for VCE/UVD, it's handled specially */
a1255107 1961 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
57716327 1962 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
34319b32 1963 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
52f2e779 1964 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
57716327 1965 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
4a446d55 1966 /* enable clockgating to save power */
a1255107 1967 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1112a46b 1968 state);
4a446d55
AD
1969 if (r) {
1970 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1971 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1972 return r;
1973 }
b0b00ff1 1974 }
d38ceaf9 1975 }
06b18f61 1976
c9f96fd5
RZ
1977 return 0;
1978}
1979
1112a46b 1980static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
c9f96fd5 1981{
1112a46b 1982 int i, j, r;
06b18f61 1983
c9f96fd5
RZ
1984 if (amdgpu_emu_mode == 1)
1985 return 0;
1986
1112a46b
RZ
1987 for (j = 0; j < adev->num_ip_blocks; j++) {
1988 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
a2d31dc3 1989 if (!adev->ip_blocks[i].status.late_initialized)
c9f96fd5
RZ
1990 continue;
1991 /* skip CG for VCE/UVD, it's handled specially */
1992 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1993 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1994 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
52f2e779 1995 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
c9f96fd5
RZ
1996 adev->ip_blocks[i].version->funcs->set_powergating_state) {
1997 /* enable powergating to save power */
1998 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1112a46b 1999 state);
c9f96fd5
RZ
2000 if (r) {
2001 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2002 adev->ip_blocks[i].version->funcs->name, r);
2003 return r;
2004 }
2005 }
2006 }
2dc80b00
S
2007 return 0;
2008}
2009
beff74bc
AD
2010static int amdgpu_device_enable_mgpu_fan_boost(void)
2011{
2012 struct amdgpu_gpu_instance *gpu_ins;
2013 struct amdgpu_device *adev;
2014 int i, ret = 0;
2015
2016 mutex_lock(&mgpu_info.mutex);
2017
2018 /*
2019 * MGPU fan boost feature should be enabled
2020 * only when there are two or more dGPUs in
2021 * the system
2022 */
2023 if (mgpu_info.num_dgpu < 2)
2024 goto out;
2025
2026 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2027 gpu_ins = &(mgpu_info.gpu_ins[i]);
2028 adev = gpu_ins->adev;
2029 if (!(adev->flags & AMD_IS_APU) &&
2030 !gpu_ins->mgpu_fan_enabled &&
2031 adev->powerplay.pp_funcs &&
2032 adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
2033 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2034 if (ret)
2035 break;
2036
2037 gpu_ins->mgpu_fan_enabled = 1;
2038 }
2039 }
2040
2041out:
2042 mutex_unlock(&mgpu_info.mutex);
2043
2044 return ret;
2045}
2046
e3ecdffa
AD
2047/**
2048 * amdgpu_device_ip_late_init - run late init for hardware IPs
2049 *
2050 * @adev: amdgpu_device pointer
2051 *
2052 * Late initialization pass for hardware IPs. The list of all the hardware
2053 * IPs that make up the asic is walked and the late_init callbacks are run.
2054 * late_init covers any special initialization that an IP requires
2055 * after all of the have been initialized or something that needs to happen
2056 * late in the init process.
2057 * Returns 0 on success, negative error code on failure.
2058 */
06ec9070 2059static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2dc80b00 2060{
60599a03 2061 struct amdgpu_gpu_instance *gpu_instance;
2dc80b00
S
2062 int i = 0, r;
2063
2064 for (i = 0; i < adev->num_ip_blocks; i++) {
73f847db 2065 if (!adev->ip_blocks[i].status.hw)
2dc80b00
S
2066 continue;
2067 if (adev->ip_blocks[i].version->funcs->late_init) {
2068 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2069 if (r) {
2070 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2071 adev->ip_blocks[i].version->funcs->name, r);
2072 return r;
2073 }
2dc80b00 2074 }
73f847db 2075 adev->ip_blocks[i].status.late_initialized = true;
2dc80b00
S
2076 }
2077
1112a46b
RZ
2078 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2079 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
916ac57f 2080
06ec9070 2081 amdgpu_device_fill_reset_magic(adev);
d38ceaf9 2082
beff74bc
AD
2083 r = amdgpu_device_enable_mgpu_fan_boost();
2084 if (r)
2085 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2086
60599a03
EQ
2087
2088 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2089 mutex_lock(&mgpu_info.mutex);
2090
2091 /*
2092 * Reset device p-state to low as this was booted with high.
2093 *
2094 * This should be performed only after all devices from the same
2095 * hive get initialized.
2096 *
2097 * However, it's unknown how many device in the hive in advance.
2098 * As this is counted one by one during devices initializations.
2099 *
2100 * So, we wait for all XGMI interlinked devices initialized.
2101 * This may bring some delays as those devices may come from
2102 * different hives. But that should be OK.
2103 */
2104 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2105 for (i = 0; i < mgpu_info.num_gpu; i++) {
2106 gpu_instance = &(mgpu_info.gpu_ins[i]);
2107 if (gpu_instance->adev->flags & AMD_IS_APU)
2108 continue;
2109
2110 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0);
2111 if (r) {
2112 DRM_ERROR("pstate setting failed (%d).\n", r);
2113 break;
2114 }
2115 }
2116 }
2117
2118 mutex_unlock(&mgpu_info.mutex);
2119 }
2120
d38ceaf9
AD
2121 return 0;
2122}
2123
e3ecdffa
AD
2124/**
2125 * amdgpu_device_ip_fini - run fini for hardware IPs
2126 *
2127 * @adev: amdgpu_device pointer
2128 *
2129 * Main teardown pass for hardware IPs. The list of all the hardware
2130 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2131 * are run. hw_fini tears down the hardware associated with each IP
2132 * and sw_fini tears down any software state associated with each IP.
2133 * Returns 0 on success, negative error code on failure.
2134 */
06ec9070 2135static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
d38ceaf9
AD
2136{
2137 int i, r;
2138
c030f2e4 2139 amdgpu_ras_pre_fini(adev);
2140
a82400b5
AG
2141 if (adev->gmc.xgmi.num_physical_nodes > 1)
2142 amdgpu_xgmi_remove_device(adev);
2143
1884734a 2144 amdgpu_amdkfd_device_fini(adev);
05df1f01
RZ
2145
2146 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
fdd34271
RZ
2147 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2148
3e96dbfd
AD
2149 /* need to disable SMC first */
2150 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2151 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 2152 continue;
fdd34271 2153 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
a1255107 2154 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
2155 /* XXX handle errors */
2156 if (r) {
2157 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 2158 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 2159 }
a1255107 2160 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
2161 break;
2162 }
2163 }
2164
d38ceaf9 2165 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2166 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 2167 continue;
8201a67a 2168
a1255107 2169 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 2170 /* XXX handle errors */
2c1a2784 2171 if (r) {
a1255107
AD
2172 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2173 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 2174 }
8201a67a 2175
a1255107 2176 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
2177 }
2178
9950cda2 2179
d38ceaf9 2180 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2181 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 2182 continue;
c12aba3a
ML
2183
2184 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
c8963ea4 2185 amdgpu_ucode_free_bo(adev);
1e256e27 2186 amdgpu_free_static_csa(&adev->virt.csa_obj);
c12aba3a
ML
2187 amdgpu_device_wb_fini(adev);
2188 amdgpu_device_vram_scratch_fini(adev);
533aed27 2189 amdgpu_ib_pool_fini(adev);
c12aba3a
ML
2190 }
2191
a1255107 2192 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 2193 /* XXX handle errors */
2c1a2784 2194 if (r) {
a1255107
AD
2195 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2196 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 2197 }
a1255107
AD
2198 adev->ip_blocks[i].status.sw = false;
2199 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
2200 }
2201
a6dcfd9c 2202 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2203 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 2204 continue;
a1255107
AD
2205 if (adev->ip_blocks[i].version->funcs->late_fini)
2206 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2207 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
2208 }
2209
c030f2e4 2210 amdgpu_ras_fini(adev);
2211
030308fc 2212 if (amdgpu_sriov_vf(adev))
24136135
ML
2213 if (amdgpu_virt_release_full_gpu(adev, false))
2214 DRM_ERROR("failed to release exclusive mode on fini\n");
2493664f 2215
d38ceaf9
AD
2216 return 0;
2217}
2218
e3ecdffa 2219/**
beff74bc 2220 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
e3ecdffa 2221 *
1112a46b 2222 * @work: work_struct.
e3ecdffa 2223 */
beff74bc 2224static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2dc80b00
S
2225{
2226 struct amdgpu_device *adev =
beff74bc 2227 container_of(work, struct amdgpu_device, delayed_init_work.work);
916ac57f
RZ
2228 int r;
2229
2230 r = amdgpu_ib_ring_tests(adev);
2231 if (r)
2232 DRM_ERROR("ib ring test failed (%d).\n", r);
2dc80b00
S
2233}
2234
1e317b99
RZ
2235static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2236{
2237 struct amdgpu_device *adev =
2238 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2239
2240 mutex_lock(&adev->gfx.gfx_off_mutex);
2241 if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2242 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2243 adev->gfx.gfx_off_state = true;
2244 }
2245 mutex_unlock(&adev->gfx.gfx_off_mutex);
2246}
2247
e3ecdffa 2248/**
e7854a03 2249 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
e3ecdffa
AD
2250 *
2251 * @adev: amdgpu_device pointer
2252 *
2253 * Main suspend function for hardware IPs. The list of all the hardware
2254 * IPs that make up the asic is walked, clockgating is disabled and the
2255 * suspend callbacks are run. suspend puts the hardware and software state
2256 * in each IP into a state suitable for suspend.
2257 * Returns 0 on success, negative error code on failure.
2258 */
e7854a03
AD
2259static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2260{
2261 int i, r;
2262
05df1f01 2263 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
fdd34271 2264 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
05df1f01 2265
e7854a03
AD
2266 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2267 if (!adev->ip_blocks[i].status.valid)
2268 continue;
2269 /* displays are handled separately */
2270 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
e7854a03
AD
2271 /* XXX handle errors */
2272 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2273 /* XXX handle errors */
2274 if (r) {
2275 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2276 adev->ip_blocks[i].version->funcs->name, r);
482f0e53 2277 return r;
e7854a03 2278 }
482f0e53 2279 adev->ip_blocks[i].status.hw = false;
e7854a03
AD
2280 }
2281 }
2282
e7854a03
AD
2283 return 0;
2284}
2285
2286/**
2287 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2288 *
2289 * @adev: amdgpu_device pointer
2290 *
2291 * Main suspend function for hardware IPs. The list of all the hardware
2292 * IPs that make up the asic is walked, clockgating is disabled and the
2293 * suspend callbacks are run. suspend puts the hardware and software state
2294 * in each IP into a state suitable for suspend.
2295 * Returns 0 on success, negative error code on failure.
2296 */
2297static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2298{
2299 int i, r;
2300
2301 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 2302 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2303 continue;
e7854a03
AD
2304 /* displays are handled in phase1 */
2305 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2306 continue;
bff77e86
LM
2307 /* PSP lost connection when err_event_athub occurs */
2308 if (amdgpu_ras_intr_triggered() &&
2309 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2310 adev->ip_blocks[i].status.hw = false;
2311 continue;
2312 }
d38ceaf9 2313 /* XXX handle errors */
a1255107 2314 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 2315 /* XXX handle errors */
2c1a2784 2316 if (r) {
a1255107
AD
2317 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2318 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 2319 }
876923fb 2320 adev->ip_blocks[i].status.hw = false;
a3a09142
AD
2321 /* handle putting the SMC in the appropriate state */
2322 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2323 if (is_support_sw_smu(adev)) {
0e0b89c0 2324 r = smu_set_mp1_state(&adev->smu, adev->mp1_state);
a3a09142 2325 } else if (adev->powerplay.pp_funcs &&
482f0e53 2326 adev->powerplay.pp_funcs->set_mp1_state) {
a3a09142
AD
2327 r = adev->powerplay.pp_funcs->set_mp1_state(
2328 adev->powerplay.pp_handle,
2329 adev->mp1_state);
0e0b89c0
EQ
2330 }
2331 if (r) {
2332 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2333 adev->mp1_state, r);
2334 return r;
a3a09142
AD
2335 }
2336 }
b5507c7e
AG
2337
2338 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
2339 }
2340
2341 return 0;
2342}
2343
e7854a03
AD
2344/**
2345 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2346 *
2347 * @adev: amdgpu_device pointer
2348 *
2349 * Main suspend function for hardware IPs. The list of all the hardware
2350 * IPs that make up the asic is walked, clockgating is disabled and the
2351 * suspend callbacks are run. suspend puts the hardware and software state
2352 * in each IP into a state suitable for suspend.
2353 * Returns 0 on success, negative error code on failure.
2354 */
2355int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2356{
2357 int r;
2358
e7819644
YT
2359 if (amdgpu_sriov_vf(adev))
2360 amdgpu_virt_request_full_gpu(adev, false);
2361
e7854a03
AD
2362 r = amdgpu_device_ip_suspend_phase1(adev);
2363 if (r)
2364 return r;
2365 r = amdgpu_device_ip_suspend_phase2(adev);
2366
e7819644
YT
2367 if (amdgpu_sriov_vf(adev))
2368 amdgpu_virt_release_full_gpu(adev, false);
2369
e7854a03
AD
2370 return r;
2371}
2372
06ec9070 2373static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
2374{
2375 int i, r;
2376
2cb681b6
ML
2377 static enum amd_ip_block_type ip_order[] = {
2378 AMD_IP_BLOCK_TYPE_GMC,
2379 AMD_IP_BLOCK_TYPE_COMMON,
39186aef 2380 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
2381 AMD_IP_BLOCK_TYPE_IH,
2382 };
a90ad3c2 2383
2cb681b6
ML
2384 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2385 int j;
2386 struct amdgpu_ip_block *block;
a90ad3c2 2387
2cb681b6
ML
2388 for (j = 0; j < adev->num_ip_blocks; j++) {
2389 block = &adev->ip_blocks[j];
2390
482f0e53 2391 block->status.hw = false;
2cb681b6
ML
2392 if (block->version->type != ip_order[i] ||
2393 !block->status.valid)
2394 continue;
2395
2396 r = block->version->funcs->hw_init(adev);
0aaeefcc 2397 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
c41d1cf6
ML
2398 if (r)
2399 return r;
482f0e53 2400 block->status.hw = true;
a90ad3c2
ML
2401 }
2402 }
2403
2404 return 0;
2405}
2406
06ec9070 2407static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
2408{
2409 int i, r;
2410
2cb681b6
ML
2411 static enum amd_ip_block_type ip_order[] = {
2412 AMD_IP_BLOCK_TYPE_SMC,
2413 AMD_IP_BLOCK_TYPE_DCE,
2414 AMD_IP_BLOCK_TYPE_GFX,
2415 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
2416 AMD_IP_BLOCK_TYPE_UVD,
2417 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 2418 };
a90ad3c2 2419
2cb681b6
ML
2420 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2421 int j;
2422 struct amdgpu_ip_block *block;
a90ad3c2 2423
2cb681b6
ML
2424 for (j = 0; j < adev->num_ip_blocks; j++) {
2425 block = &adev->ip_blocks[j];
2426
2427 if (block->version->type != ip_order[i] ||
482f0e53
ML
2428 !block->status.valid ||
2429 block->status.hw)
2cb681b6
ML
2430 continue;
2431
2432 r = block->version->funcs->hw_init(adev);
0aaeefcc 2433 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
c41d1cf6
ML
2434 if (r)
2435 return r;
482f0e53 2436 block->status.hw = true;
a90ad3c2
ML
2437 }
2438 }
2439
2440 return 0;
2441}
2442
e3ecdffa
AD
2443/**
2444 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2445 *
2446 * @adev: amdgpu_device pointer
2447 *
2448 * First resume function for hardware IPs. The list of all the hardware
2449 * IPs that make up the asic is walked and the resume callbacks are run for
2450 * COMMON, GMC, and IH. resume puts the hardware into a functional state
2451 * after a suspend and updates the software state as necessary. This
2452 * function is also used for restoring the GPU after a GPU reset.
2453 * Returns 0 on success, negative error code on failure.
2454 */
06ec9070 2455static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
2456{
2457 int i, r;
2458
a90ad3c2 2459 for (i = 0; i < adev->num_ip_blocks; i++) {
482f0e53 2460 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
a90ad3c2 2461 continue;
a90ad3c2 2462 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa
AD
2463 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2464 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
482f0e53 2465
fcf0649f
CZ
2466 r = adev->ip_blocks[i].version->funcs->resume(adev);
2467 if (r) {
2468 DRM_ERROR("resume of IP block <%s> failed %d\n",
2469 adev->ip_blocks[i].version->funcs->name, r);
2470 return r;
2471 }
482f0e53 2472 adev->ip_blocks[i].status.hw = true;
a90ad3c2
ML
2473 }
2474 }
2475
2476 return 0;
2477}
2478
e3ecdffa
AD
2479/**
2480 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2481 *
2482 * @adev: amdgpu_device pointer
2483 *
2484 * First resume function for hardware IPs. The list of all the hardware
2485 * IPs that make up the asic is walked and the resume callbacks are run for
2486 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
2487 * functional state after a suspend and updates the software state as
2488 * necessary. This function is also used for restoring the GPU after a GPU
2489 * reset.
2490 * Returns 0 on success, negative error code on failure.
2491 */
06ec9070 2492static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2493{
2494 int i, r;
2495
2496 for (i = 0; i < adev->num_ip_blocks; i++) {
482f0e53 2497 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
d38ceaf9 2498 continue;
fcf0649f 2499 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa 2500 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
7a3e0bb2
RZ
2501 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2502 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
fcf0649f 2503 continue;
a1255107 2504 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 2505 if (r) {
a1255107
AD
2506 DRM_ERROR("resume of IP block <%s> failed %d\n",
2507 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 2508 return r;
2c1a2784 2509 }
482f0e53 2510 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
2511 }
2512
2513 return 0;
2514}
2515
e3ecdffa
AD
2516/**
2517 * amdgpu_device_ip_resume - run resume for hardware IPs
2518 *
2519 * @adev: amdgpu_device pointer
2520 *
2521 * Main resume function for hardware IPs. The hardware IPs
2522 * are split into two resume functions because they are
2523 * are also used in in recovering from a GPU reset and some additional
2524 * steps need to be take between them. In this case (S3/S4) they are
2525 * run sequentially.
2526 * Returns 0 on success, negative error code on failure.
2527 */
06ec9070 2528static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
fcf0649f
CZ
2529{
2530 int r;
2531
06ec9070 2532 r = amdgpu_device_ip_resume_phase1(adev);
fcf0649f
CZ
2533 if (r)
2534 return r;
7a3e0bb2
RZ
2535
2536 r = amdgpu_device_fw_loading(adev);
2537 if (r)
2538 return r;
2539
06ec9070 2540 r = amdgpu_device_ip_resume_phase2(adev);
fcf0649f
CZ
2541
2542 return r;
2543}
2544
e3ecdffa
AD
2545/**
2546 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2547 *
2548 * @adev: amdgpu_device pointer
2549 *
2550 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2551 */
4e99a44e 2552static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 2553{
6867e1b5
ML
2554 if (amdgpu_sriov_vf(adev)) {
2555 if (adev->is_atom_fw) {
2556 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2557 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2558 } else {
2559 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2560 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2561 }
2562
2563 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2564 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 2565 }
048765ad
AR
2566}
2567
e3ecdffa
AD
2568/**
2569 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2570 *
2571 * @asic_type: AMD asic type
2572 *
2573 * Check if there is DC (new modesetting infrastructre) support for an asic.
2574 * returns true if DC has support, false if not.
2575 */
4562236b
HW
2576bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2577{
2578 switch (asic_type) {
2579#if defined(CONFIG_DRM_AMD_DC)
2580 case CHIP_BONAIRE:
0d6fbccb 2581 case CHIP_KAVERI:
367e6687
AD
2582 case CHIP_KABINI:
2583 case CHIP_MULLINS:
d9fda248
HW
2584 /*
2585 * We have systems in the wild with these ASICs that require
2586 * LVDS and VGA support which is not supported with DC.
2587 *
2588 * Fallback to the non-DC driver here by default so as not to
2589 * cause regressions.
2590 */
2591 return amdgpu_dc > 0;
2592 case CHIP_HAWAII:
4562236b
HW
2593 case CHIP_CARRIZO:
2594 case CHIP_STONEY:
4562236b 2595 case CHIP_POLARIS10:
675fd32b 2596 case CHIP_POLARIS11:
2c8ad2d5 2597 case CHIP_POLARIS12:
675fd32b 2598 case CHIP_VEGAM:
4562236b
HW
2599 case CHIP_TONGA:
2600 case CHIP_FIJI:
42f8ffa1 2601 case CHIP_VEGA10:
dca7b401 2602 case CHIP_VEGA12:
c6034aa2 2603 case CHIP_VEGA20:
b86a1aa3 2604#if defined(CONFIG_DRM_AMD_DC_DCN)
fd187853 2605 case CHIP_RAVEN:
b4f199c7 2606 case CHIP_NAVI10:
8fceceb6 2607 case CHIP_NAVI14:
078655d9 2608 case CHIP_NAVI12:
e1c14c43 2609 case CHIP_RENOIR:
42f8ffa1 2610#endif
fd187853 2611 return amdgpu_dc != 0;
4562236b
HW
2612#endif
2613 default:
2614 return false;
2615 }
2616}
2617
2618/**
2619 * amdgpu_device_has_dc_support - check if dc is supported
2620 *
2621 * @adev: amdgpu_device_pointer
2622 *
2623 * Returns true for supported, false for not supported
2624 */
2625bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2626{
2555039d
XY
2627 if (amdgpu_sriov_vf(adev))
2628 return false;
2629
4562236b
HW
2630 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2631}
2632
d4535e2c
AG
2633
2634static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
2635{
2636 struct amdgpu_device *adev =
2637 container_of(__work, struct amdgpu_device, xgmi_reset_work);
2638
2639 adev->asic_reset_res = amdgpu_asic_reset(adev);
2640 if (adev->asic_reset_res)
fed184e9 2641 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
d4535e2c
AG
2642 adev->asic_reset_res, adev->ddev->unique);
2643}
2644
71f98027
AD
2645static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
2646{
2647 char *input = amdgpu_lockup_timeout;
2648 char *timeout_setting = NULL;
2649 int index = 0;
2650 long timeout;
2651 int ret = 0;
2652
2653 /*
2654 * By default timeout for non compute jobs is 10000.
2655 * And there is no timeout enforced on compute jobs.
2656 * In SR-IOV or passthrough mode, timeout for compute
2657 * jobs are 10000 by default.
2658 */
2659 adev->gfx_timeout = msecs_to_jiffies(10000);
2660 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
2661 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
2662 adev->compute_timeout = adev->gfx_timeout;
2663 else
2664 adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
2665
f440ff44 2666 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
71f98027 2667 while ((timeout_setting = strsep(&input, ",")) &&
f440ff44 2668 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
71f98027
AD
2669 ret = kstrtol(timeout_setting, 0, &timeout);
2670 if (ret)
2671 return ret;
2672
2673 if (timeout == 0) {
2674 index++;
2675 continue;
2676 } else if (timeout < 0) {
2677 timeout = MAX_SCHEDULE_TIMEOUT;
2678 } else {
2679 timeout = msecs_to_jiffies(timeout);
2680 }
2681
2682 switch (index++) {
2683 case 0:
2684 adev->gfx_timeout = timeout;
2685 break;
2686 case 1:
2687 adev->compute_timeout = timeout;
2688 break;
2689 case 2:
2690 adev->sdma_timeout = timeout;
2691 break;
2692 case 3:
2693 adev->video_timeout = timeout;
2694 break;
2695 default:
2696 break;
2697 }
2698 }
2699 /*
2700 * There is only one value specified and
2701 * it should apply to all non-compute jobs.
2702 */
bcccee89 2703 if (index == 1) {
71f98027 2704 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
bcccee89
ED
2705 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
2706 adev->compute_timeout = adev->gfx_timeout;
2707 }
71f98027
AD
2708 }
2709
2710 return ret;
2711}
d4535e2c 2712
d38ceaf9
AD
2713/**
2714 * amdgpu_device_init - initialize the driver
2715 *
2716 * @adev: amdgpu_device pointer
87e3f136 2717 * @ddev: drm dev pointer
d38ceaf9
AD
2718 * @pdev: pci dev pointer
2719 * @flags: driver flags
2720 *
2721 * Initializes the driver info and hw (all asics).
2722 * Returns 0 for success or an error on failure.
2723 * Called at driver startup.
2724 */
2725int amdgpu_device_init(struct amdgpu_device *adev,
2726 struct drm_device *ddev,
2727 struct pci_dev *pdev,
2728 uint32_t flags)
2729{
2730 int r, i;
2731 bool runtime = false;
95844d20 2732 u32 max_MBps;
d38ceaf9
AD
2733
2734 adev->shutdown = false;
2735 adev->dev = &pdev->dev;
2736 adev->ddev = ddev;
2737 adev->pdev = pdev;
2738 adev->flags = flags;
4e66d7d2
YZ
2739
2740 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
2741 adev->asic_type = amdgpu_force_asic_type;
2742 else
2743 adev->asic_type = flags & AMD_ASIC_MASK;
2744
d38ceaf9 2745 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
593aa2d2
SL
2746 if (amdgpu_emu_mode == 1)
2747 adev->usec_timeout *= 2;
770d13b1 2748 adev->gmc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
2749 adev->accel_working = false;
2750 adev->num_rings = 0;
2751 adev->mman.buffer_funcs = NULL;
2752 adev->mman.buffer_funcs_ring = NULL;
2753 adev->vm_manager.vm_pte_funcs = NULL;
3798e9a6 2754 adev->vm_manager.vm_pte_num_rqs = 0;
132f34e4 2755 adev->gmc.gmc_funcs = NULL;
f54d1867 2756 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 2757 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
2758
2759 adev->smc_rreg = &amdgpu_invalid_rreg;
2760 adev->smc_wreg = &amdgpu_invalid_wreg;
2761 adev->pcie_rreg = &amdgpu_invalid_rreg;
2762 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
2763 adev->pciep_rreg = &amdgpu_invalid_rreg;
2764 adev->pciep_wreg = &amdgpu_invalid_wreg;
4fa1c6a6
TZ
2765 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
2766 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
d38ceaf9
AD
2767 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2768 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2769 adev->didt_rreg = &amdgpu_invalid_rreg;
2770 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
2771 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2772 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2773 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2774 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2775
3e39ab90
AD
2776 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2777 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2778 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
2779
2780 /* mutex initialization are all done here so we
2781 * can recall function without having locking issues */
d38ceaf9 2782 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 2783 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
2784 mutex_init(&adev->pm.mutex);
2785 mutex_init(&adev->gfx.gpu_clock_mutex);
2786 mutex_init(&adev->srbm_mutex);
b8866c26 2787 mutex_init(&adev->gfx.pipe_reserve_mutex);
d23ee13f 2788 mutex_init(&adev->gfx.gfx_off_mutex);
d38ceaf9 2789 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 2790 mutex_init(&adev->mn_lock);
e23b74aa 2791 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 2792 hash_init(adev->mn_hash);
13a752e3 2793 mutex_init(&adev->lock_reset);
bb5a2bdf 2794 mutex_init(&adev->virt.dpm_mutex);
32eaeae0 2795 mutex_init(&adev->psp.mutex);
d38ceaf9 2796
912dfc84
EQ
2797 r = amdgpu_device_check_arguments(adev);
2798 if (r)
2799 return r;
d38ceaf9 2800
d38ceaf9
AD
2801 spin_lock_init(&adev->mmio_idx_lock);
2802 spin_lock_init(&adev->smc_idx_lock);
2803 spin_lock_init(&adev->pcie_idx_lock);
2804 spin_lock_init(&adev->uvd_ctx_idx_lock);
2805 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 2806 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 2807 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 2808 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 2809 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 2810
0c4e7fa5
CZ
2811 INIT_LIST_HEAD(&adev->shadow_list);
2812 mutex_init(&adev->shadow_list_lock);
2813
795f2813
AR
2814 INIT_LIST_HEAD(&adev->ring_lru_list);
2815 spin_lock_init(&adev->ring_lru_list_lock);
2816
beff74bc
AD
2817 INIT_DELAYED_WORK(&adev->delayed_init_work,
2818 amdgpu_device_delayed_init_work_handler);
1e317b99
RZ
2819 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
2820 amdgpu_device_delay_enable_gfx_off);
2dc80b00 2821
d4535e2c
AG
2822 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
2823
d23ee13f 2824 adev->gfx.gfx_off_req_count = 1;
b1ddf548
RZ
2825 adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
2826
0fa49558
AX
2827 /* Registers mapping */
2828 /* TODO: block userspace mapping of io register */
da69c161
KW
2829 if (adev->asic_type >= CHIP_BONAIRE) {
2830 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2831 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2832 } else {
2833 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2834 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2835 }
d38ceaf9 2836
d38ceaf9
AD
2837 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2838 if (adev->rmmio == NULL) {
2839 return -ENOMEM;
2840 }
2841 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2842 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2843
d38ceaf9
AD
2844 /* io port mapping */
2845 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2846 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2847 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2848 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2849 break;
2850 }
2851 }
2852 if (adev->rio_mem == NULL)
b64a18c5 2853 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9 2854
b2109d8e
JX
2855 /* enable PCIE atomic ops */
2856 r = pci_enable_atomic_ops_to_root(adev->pdev,
2857 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
2858 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
2859 if (r) {
2860 adev->have_atomics_support = false;
2861 DRM_INFO("PCIE atomic ops is not supported\n");
2862 } else {
2863 adev->have_atomics_support = true;
2864 }
2865
5494d864
AD
2866 amdgpu_device_get_pcie_info(adev);
2867
b239c017
JX
2868 if (amdgpu_mcbp)
2869 DRM_INFO("MCBP is enabled\n");
2870
5f84cc63
JX
2871 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
2872 adev->enable_mes = true;
2873
f54eeab4 2874 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
a190d1c7
XY
2875 r = amdgpu_discovery_init(adev);
2876 if (r) {
2877 dev_err(adev->dev, "amdgpu_discovery_init failed\n");
2878 return r;
2879 }
2880 }
2881
d38ceaf9 2882 /* early init functions */
06ec9070 2883 r = amdgpu_device_ip_early_init(adev);
d38ceaf9
AD
2884 if (r)
2885 return r;
2886
df99ac0f
JZ
2887 r = amdgpu_device_get_job_timeout_settings(adev);
2888 if (r) {
2889 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
2890 return r;
2891 }
2892
6585661d
OZ
2893 /* doorbell bar mapping and doorbell index init*/
2894 amdgpu_device_doorbell_init(adev);
2895
d38ceaf9
AD
2896 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2897 /* this will fail for cards that aren't VGA class devices, just
2898 * ignore it */
06ec9070 2899 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
d38ceaf9 2900
e9bef455 2901 if (amdgpu_device_is_px(ddev))
d38ceaf9 2902 runtime = true;
84c8b22e
LW
2903 if (!pci_is_thunderbolt_attached(adev->pdev))
2904 vga_switcheroo_register_client(adev->pdev,
2905 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
2906 if (runtime)
2907 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2908
9475a943
SL
2909 if (amdgpu_emu_mode == 1) {
2910 /* post the asic on emulation mode */
2911 emu_soc_asic_init(adev);
bfca0289 2912 goto fence_driver_init;
9475a943 2913 }
bfca0289 2914
4e99a44e
ML
2915 /* detect if we are with an SRIOV vbios */
2916 amdgpu_device_detect_sriov_bios(adev);
048765ad 2917
95e8e59e
AD
2918 /* check if we need to reset the asic
2919 * E.g., driver was not cleanly unloaded previously, etc.
2920 */
f14899fd 2921 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
95e8e59e
AD
2922 r = amdgpu_asic_reset(adev);
2923 if (r) {
2924 dev_err(adev->dev, "asic reset on init failed\n");
2925 goto failed;
2926 }
2927 }
2928
d38ceaf9 2929 /* Post card if necessary */
39c640c0 2930 if (amdgpu_device_need_post(adev)) {
d38ceaf9 2931 if (!adev->bios) {
bec86378 2932 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
2933 r = -EINVAL;
2934 goto failed;
d38ceaf9 2935 }
bec86378 2936 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
2937 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2938 if (r) {
2939 dev_err(adev->dev, "gpu post error!\n");
2940 goto failed;
2941 }
d38ceaf9
AD
2942 }
2943
88b64e95
AD
2944 if (adev->is_atom_fw) {
2945 /* Initialize clocks */
2946 r = amdgpu_atomfirmware_get_clock_info(adev);
2947 if (r) {
2948 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 2949 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
2950 goto failed;
2951 }
2952 } else {
a5bde2f9
AD
2953 /* Initialize clocks */
2954 r = amdgpu_atombios_get_clock_info(adev);
2955 if (r) {
2956 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 2957 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 2958 goto failed;
a5bde2f9
AD
2959 }
2960 /* init i2c buses */
4562236b
HW
2961 if (!amdgpu_device_has_dc_support(adev))
2962 amdgpu_atombios_i2c_init(adev);
2c1a2784 2963 }
d38ceaf9 2964
bfca0289 2965fence_driver_init:
d38ceaf9
AD
2966 /* Fence driver */
2967 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
2968 if (r) {
2969 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 2970 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 2971 goto failed;
2c1a2784 2972 }
d38ceaf9
AD
2973
2974 /* init the mode config */
2975 drm_mode_config_init(adev->ddev);
2976
06ec9070 2977 r = amdgpu_device_ip_init(adev);
d38ceaf9 2978 if (r) {
8840a387 2979 /* failed in exclusive mode due to timeout */
2980 if (amdgpu_sriov_vf(adev) &&
2981 !amdgpu_sriov_runtime(adev) &&
2982 amdgpu_virt_mmio_blocked(adev) &&
2983 !amdgpu_virt_wait_reset(adev)) {
2984 dev_err(adev->dev, "VF exclusive mode timeout\n");
1daee8b4
PD
2985 /* Don't send request since VF is inactive. */
2986 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2987 adev->virt.ops = NULL;
8840a387 2988 r = -EAGAIN;
2989 goto failed;
2990 }
06ec9070 2991 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
e23b74aa 2992 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
72d3f592
ED
2993 if (amdgpu_virt_request_full_gpu(adev, false))
2994 amdgpu_virt_release_full_gpu(adev, false);
83ba126a 2995 goto failed;
d38ceaf9
AD
2996 }
2997
2998 adev->accel_working = true;
2999
e59c0205
AX
3000 amdgpu_vm_check_compute_bug(adev);
3001
95844d20
MO
3002 /* Initialize the buffer migration limit. */
3003 if (amdgpu_moverate >= 0)
3004 max_MBps = amdgpu_moverate;
3005 else
3006 max_MBps = 8; /* Allow 8 MB/s. */
3007 /* Get a log2 for easy divisions. */
3008 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3009
9bc92b9c
ML
3010 amdgpu_fbdev_init(adev);
3011
e9bc1bf7
YT
3012 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
3013 amdgpu_pm_virt_sysfs_init(adev);
3014
d2f52ac8
RZ
3015 r = amdgpu_pm_sysfs_init(adev);
3016 if (r)
3017 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3018
5bb23532
OM
3019 r = amdgpu_ucode_sysfs_init(adev);
3020 if (r)
3021 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3022
75758255 3023 r = amdgpu_debugfs_gem_init(adev);
3f14e623 3024 if (r)
d38ceaf9 3025 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
3026
3027 r = amdgpu_debugfs_regs_init(adev);
3f14e623 3028 if (r)
d38ceaf9 3029 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 3030
50ab2533 3031 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 3032 if (r)
50ab2533 3033 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 3034
763efb6c 3035 r = amdgpu_debugfs_init(adev);
db95e218 3036 if (r)
763efb6c 3037 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
db95e218 3038
d38ceaf9
AD
3039 if ((amdgpu_testing & 1)) {
3040 if (adev->accel_working)
3041 amdgpu_test_moves(adev);
3042 else
3043 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3044 }
d38ceaf9
AD
3045 if (amdgpu_benchmarking) {
3046 if (adev->accel_working)
3047 amdgpu_benchmark(adev, amdgpu_benchmarking);
3048 else
3049 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3050 }
3051
b0adca4d
EQ
3052 /*
3053 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3054 * Otherwise the mgpu fan boost feature will be skipped due to the
3055 * gpu instance is counted less.
3056 */
3057 amdgpu_register_gpu_instance(adev);
3058
d38ceaf9
AD
3059 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3060 * explicit gating rather than handling it automatically.
3061 */
06ec9070 3062 r = amdgpu_device_ip_late_init(adev);
2c1a2784 3063 if (r) {
06ec9070 3064 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
e23b74aa 3065 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 3066 goto failed;
2c1a2784 3067 }
d38ceaf9 3068
108c6a63 3069 /* must succeed. */
511fdbc3 3070 amdgpu_ras_resume(adev);
108c6a63 3071
beff74bc
AD
3072 queue_delayed_work(system_wq, &adev->delayed_init_work,
3073 msecs_to_jiffies(AMDGPU_RESUME_MS));
3074
dcea6e65
KR
3075 r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
3076 if (r) {
3077 dev_err(adev->dev, "Could not create pcie_replay_count");
3078 return r;
3079 }
108c6a63 3080
d155bef0
AB
3081 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3082 r = amdgpu_pmu_init(adev);
9c7c85f7
JK
3083 if (r)
3084 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3085
d38ceaf9 3086 return 0;
83ba126a
AD
3087
3088failed:
89041940 3089 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
3090 if (runtime)
3091 vga_switcheroo_fini_domain_pm_ops(adev->dev);
8840a387 3092
83ba126a 3093 return r;
d38ceaf9
AD
3094}
3095
d38ceaf9
AD
3096/**
3097 * amdgpu_device_fini - tear down the driver
3098 *
3099 * @adev: amdgpu_device pointer
3100 *
3101 * Tear down the driver info (all asics).
3102 * Called at driver shutdown.
3103 */
3104void amdgpu_device_fini(struct amdgpu_device *adev)
3105{
3106 int r;
3107
3108 DRM_INFO("amdgpu: finishing device.\n");
3109 adev->shutdown = true;
9f875167
JZ
3110
3111 flush_delayed_work(&adev->delayed_init_work);
3112
e5b03032
ML
3113 /* disable all interrupts */
3114 amdgpu_irq_disable_all(adev);
ff97cba8
ML
3115 if (adev->mode_info.mode_config_initialized){
3116 if (!amdgpu_device_has_dc_support(adev))
c2d88e06 3117 drm_helper_force_disable_all(adev->ddev);
ff97cba8
ML
3118 else
3119 drm_atomic_helper_shutdown(adev->ddev);
3120 }
d38ceaf9 3121 amdgpu_fence_driver_fini(adev);
58e955d9 3122 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 3123 amdgpu_fbdev_fini(adev);
06ec9070 3124 r = amdgpu_device_ip_fini(adev);
ab4fe3e1
HR
3125 if (adev->firmware.gpu_info_fw) {
3126 release_firmware(adev->firmware.gpu_info_fw);
3127 adev->firmware.gpu_info_fw = NULL;
3128 }
d38ceaf9 3129 adev->accel_working = false;
beff74bc 3130 cancel_delayed_work_sync(&adev->delayed_init_work);
d38ceaf9 3131 /* free i2c buses */
4562236b
HW
3132 if (!amdgpu_device_has_dc_support(adev))
3133 amdgpu_i2c_fini(adev);
bfca0289
SL
3134
3135 if (amdgpu_emu_mode != 1)
3136 amdgpu_atombios_fini(adev);
3137
d38ceaf9
AD
3138 kfree(adev->bios);
3139 adev->bios = NULL;
84c8b22e
LW
3140 if (!pci_is_thunderbolt_attached(adev->pdev))
3141 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
3142 if (adev->flags & AMD_IS_PX)
3143 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
3144 vga_client_register(adev->pdev, NULL, NULL, NULL);
3145 if (adev->rio_mem)
3146 pci_iounmap(adev->pdev, adev->rio_mem);
3147 adev->rio_mem = NULL;
3148 iounmap(adev->rmmio);
3149 adev->rmmio = NULL;
06ec9070 3150 amdgpu_device_doorbell_fini(adev);
e9bc1bf7
YT
3151 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
3152 amdgpu_pm_virt_sysfs_fini(adev);
3153
d38ceaf9 3154 amdgpu_debugfs_regs_cleanup(adev);
dcea6e65 3155 device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
5bb23532 3156 amdgpu_ucode_sysfs_fini(adev);
d155bef0
AB
3157 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3158 amdgpu_pmu_fini(adev);
6698a3d0 3159 amdgpu_debugfs_preempt_cleanup(adev);
f54eeab4 3160 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
a190d1c7 3161 amdgpu_discovery_fini(adev);
d38ceaf9
AD
3162}
3163
3164
3165/*
3166 * Suspend & resume.
3167 */
3168/**
810ddc3a 3169 * amdgpu_device_suspend - initiate device suspend
d38ceaf9 3170 *
87e3f136
DP
3171 * @dev: drm dev pointer
3172 * @suspend: suspend state
3173 * @fbcon : notify the fbdev of suspend
d38ceaf9
AD
3174 *
3175 * Puts the hw in the suspend state (all asics).
3176 * Returns 0 for success or an error on failure.
3177 * Called at driver suspend.
3178 */
810ddc3a 3179int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
3180{
3181 struct amdgpu_device *adev;
3182 struct drm_crtc *crtc;
3183 struct drm_connector *connector;
f8d2d39e 3184 struct drm_connector_list_iter iter;
5ceb54c6 3185 int r;
d38ceaf9
AD
3186
3187 if (dev == NULL || dev->dev_private == NULL) {
3188 return -ENODEV;
3189 }
3190
3191 adev = dev->dev_private;
3192
3193 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3194 return 0;
3195
44779b43 3196 adev->in_suspend = true;
d38ceaf9
AD
3197 drm_kms_helper_poll_disable(dev);
3198
5f818173
S
3199 if (fbcon)
3200 amdgpu_fbdev_set_suspend(adev, 1);
3201
beff74bc 3202 cancel_delayed_work_sync(&adev->delayed_init_work);
a5459475 3203
4562236b
HW
3204 if (!amdgpu_device_has_dc_support(adev)) {
3205 /* turn off display hw */
3206 drm_modeset_lock_all(dev);
f8d2d39e
LP
3207 drm_connector_list_iter_begin(dev, &iter);
3208 drm_for_each_connector_iter(connector, &iter)
3209 drm_helper_connector_dpms(connector,
3210 DRM_MODE_DPMS_OFF);
3211 drm_connector_list_iter_end(&iter);
4562236b 3212 drm_modeset_unlock_all(dev);
fe1053b7
AD
3213 /* unpin the front buffers and cursors */
3214 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3215 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3216 struct drm_framebuffer *fb = crtc->primary->fb;
3217 struct amdgpu_bo *robj;
3218
91334223 3219 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
fe1053b7
AD
3220 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3221 r = amdgpu_bo_reserve(aobj, true);
3222 if (r == 0) {
3223 amdgpu_bo_unpin(aobj);
3224 amdgpu_bo_unreserve(aobj);
3225 }
756e6880 3226 }
756e6880 3227
fe1053b7
AD
3228 if (fb == NULL || fb->obj[0] == NULL) {
3229 continue;
3230 }
3231 robj = gem_to_amdgpu_bo(fb->obj[0]);
3232 /* don't unpin kernel fb objects */
3233 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3234 r = amdgpu_bo_reserve(robj, true);
3235 if (r == 0) {
3236 amdgpu_bo_unpin(robj);
3237 amdgpu_bo_unreserve(robj);
3238 }
d38ceaf9
AD
3239 }
3240 }
3241 }
fe1053b7
AD
3242
3243 amdgpu_amdkfd_suspend(adev);
3244
5e6932fe 3245 amdgpu_ras_suspend(adev);
3246
fe1053b7
AD
3247 r = amdgpu_device_ip_suspend_phase1(adev);
3248
d38ceaf9
AD
3249 /* evict vram memory */
3250 amdgpu_bo_evict_vram(adev);
3251
5ceb54c6 3252 amdgpu_fence_driver_suspend(adev);
d38ceaf9 3253
fe1053b7 3254 r = amdgpu_device_ip_suspend_phase2(adev);
d38ceaf9 3255
a0a71e49
AD
3256 /* evict remaining vram memory
3257 * This second call to evict vram is to evict the gart page table
3258 * using the CPU.
3259 */
d38ceaf9
AD
3260 amdgpu_bo_evict_vram(adev);
3261
d38ceaf9 3262 if (suspend) {
803cc26d 3263 pci_save_state(dev->pdev);
d38ceaf9
AD
3264 /* Shut down the device */
3265 pci_disable_device(dev->pdev);
3266 pci_set_power_state(dev->pdev, PCI_D3hot);
3267 }
3268
d38ceaf9
AD
3269 return 0;
3270}
3271
3272/**
810ddc3a 3273 * amdgpu_device_resume - initiate device resume
d38ceaf9 3274 *
87e3f136
DP
3275 * @dev: drm dev pointer
3276 * @resume: resume state
3277 * @fbcon : notify the fbdev of resume
d38ceaf9
AD
3278 *
3279 * Bring the hw back to operating state (all asics).
3280 * Returns 0 for success or an error on failure.
3281 * Called at driver resume.
3282 */
810ddc3a 3283int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
3284{
3285 struct drm_connector *connector;
f8d2d39e 3286 struct drm_connector_list_iter iter;
d38ceaf9 3287 struct amdgpu_device *adev = dev->dev_private;
756e6880 3288 struct drm_crtc *crtc;
03161a6e 3289 int r = 0;
d38ceaf9
AD
3290
3291 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3292 return 0;
3293
d38ceaf9
AD
3294 if (resume) {
3295 pci_set_power_state(dev->pdev, PCI_D0);
3296 pci_restore_state(dev->pdev);
74b0b157 3297 r = pci_enable_device(dev->pdev);
03161a6e 3298 if (r)
4d3b9ae5 3299 return r;
d38ceaf9
AD
3300 }
3301
3302 /* post card */
39c640c0 3303 if (amdgpu_device_need_post(adev)) {
74b0b157 3304 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3305 if (r)
3306 DRM_ERROR("amdgpu asic init failed\n");
3307 }
d38ceaf9 3308
06ec9070 3309 r = amdgpu_device_ip_resume(adev);
e6707218 3310 if (r) {
06ec9070 3311 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
4d3b9ae5 3312 return r;
e6707218 3313 }
5ceb54c6
AD
3314 amdgpu_fence_driver_resume(adev);
3315
d38ceaf9 3316
06ec9070 3317 r = amdgpu_device_ip_late_init(adev);
03161a6e 3318 if (r)
4d3b9ae5 3319 return r;
d38ceaf9 3320
beff74bc
AD
3321 queue_delayed_work(system_wq, &adev->delayed_init_work,
3322 msecs_to_jiffies(AMDGPU_RESUME_MS));
3323
fe1053b7
AD
3324 if (!amdgpu_device_has_dc_support(adev)) {
3325 /* pin cursors */
3326 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3327 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3328
91334223 3329 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
fe1053b7
AD
3330 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3331 r = amdgpu_bo_reserve(aobj, true);
3332 if (r == 0) {
3333 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3334 if (r != 0)
3335 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
3336 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3337 amdgpu_bo_unreserve(aobj);
3338 }
756e6880
AD
3339 }
3340 }
3341 }
ba997709
YZ
3342 r = amdgpu_amdkfd_resume(adev);
3343 if (r)
3344 return r;
756e6880 3345
96a5d8d4 3346 /* Make sure IB tests flushed */
beff74bc 3347 flush_delayed_work(&adev->delayed_init_work);
96a5d8d4 3348
d38ceaf9
AD
3349 /* blat the mode back in */
3350 if (fbcon) {
4562236b
HW
3351 if (!amdgpu_device_has_dc_support(adev)) {
3352 /* pre DCE11 */
3353 drm_helper_resume_force_mode(dev);
3354
3355 /* turn on display hw */
3356 drm_modeset_lock_all(dev);
f8d2d39e
LP
3357
3358 drm_connector_list_iter_begin(dev, &iter);
3359 drm_for_each_connector_iter(connector, &iter)
3360 drm_helper_connector_dpms(connector,
3361 DRM_MODE_DPMS_ON);
3362 drm_connector_list_iter_end(&iter);
3363
4562236b 3364 drm_modeset_unlock_all(dev);
d38ceaf9 3365 }
4d3b9ae5 3366 amdgpu_fbdev_set_suspend(adev, 0);
d38ceaf9
AD
3367 }
3368
3369 drm_kms_helper_poll_enable(dev);
23a1a9e5 3370
5e6932fe 3371 amdgpu_ras_resume(adev);
3372
23a1a9e5
L
3373 /*
3374 * Most of the connector probing functions try to acquire runtime pm
3375 * refs to ensure that the GPU is powered on when connector polling is
3376 * performed. Since we're calling this from a runtime PM callback,
3377 * trying to acquire rpm refs will cause us to deadlock.
3378 *
3379 * Since we're guaranteed to be holding the rpm lock, it's safe to
3380 * temporarily disable the rpm helpers so this doesn't deadlock us.
3381 */
3382#ifdef CONFIG_PM
3383 dev->dev->power.disable_depth++;
3384#endif
4562236b
HW
3385 if (!amdgpu_device_has_dc_support(adev))
3386 drm_helper_hpd_irq_event(dev);
3387 else
3388 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
3389#ifdef CONFIG_PM
3390 dev->dev->power.disable_depth--;
3391#endif
44779b43
RZ
3392 adev->in_suspend = false;
3393
4d3b9ae5 3394 return 0;
d38ceaf9
AD
3395}
3396
e3ecdffa
AD
3397/**
3398 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3399 *
3400 * @adev: amdgpu_device pointer
3401 *
3402 * The list of all the hardware IPs that make up the asic is walked and
3403 * the check_soft_reset callbacks are run. check_soft_reset determines
3404 * if the asic is still hung or not.
3405 * Returns true if any of the IPs are still in a hung state, false if not.
3406 */
06ec9070 3407static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
63fbf42f
CZ
3408{
3409 int i;
3410 bool asic_hang = false;
3411
f993d628
ML
3412 if (amdgpu_sriov_vf(adev))
3413 return true;
3414
8bc04c29
AD
3415 if (amdgpu_asic_need_full_reset(adev))
3416 return true;
3417
63fbf42f 3418 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 3419 if (!adev->ip_blocks[i].status.valid)
63fbf42f 3420 continue;
a1255107
AD
3421 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3422 adev->ip_blocks[i].status.hang =
3423 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3424 if (adev->ip_blocks[i].status.hang) {
3425 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
3426 asic_hang = true;
3427 }
3428 }
3429 return asic_hang;
3430}
3431
e3ecdffa
AD
3432/**
3433 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3434 *
3435 * @adev: amdgpu_device pointer
3436 *
3437 * The list of all the hardware IPs that make up the asic is walked and the
3438 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
3439 * handles any IP specific hardware or software state changes that are
3440 * necessary for a soft reset to succeed.
3441 * Returns 0 on success, negative error code on failure.
3442 */
06ec9070 3443static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
3444{
3445 int i, r = 0;
3446
3447 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 3448 if (!adev->ip_blocks[i].status.valid)
d31a501e 3449 continue;
a1255107
AD
3450 if (adev->ip_blocks[i].status.hang &&
3451 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3452 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
3453 if (r)
3454 return r;
3455 }
3456 }
3457
3458 return 0;
3459}
3460
e3ecdffa
AD
3461/**
3462 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3463 *
3464 * @adev: amdgpu_device pointer
3465 *
3466 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
3467 * reset is necessary to recover.
3468 * Returns true if a full asic reset is required, false if not.
3469 */
06ec9070 3470static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
35d782fe 3471{
da146d3b
AD
3472 int i;
3473
8bc04c29
AD
3474 if (amdgpu_asic_need_full_reset(adev))
3475 return true;
3476
da146d3b 3477 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 3478 if (!adev->ip_blocks[i].status.valid)
da146d3b 3479 continue;
a1255107
AD
3480 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3481 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3482 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
3483 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3484 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 3485 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
3486 DRM_INFO("Some block need full reset!\n");
3487 return true;
3488 }
3489 }
35d782fe
CZ
3490 }
3491 return false;
3492}
3493
e3ecdffa
AD
3494/**
3495 * amdgpu_device_ip_soft_reset - do a soft reset
3496 *
3497 * @adev: amdgpu_device pointer
3498 *
3499 * The list of all the hardware IPs that make up the asic is walked and the
3500 * soft_reset callbacks are run if the block is hung. soft_reset handles any
3501 * IP specific hardware or software state changes that are necessary to soft
3502 * reset the IP.
3503 * Returns 0 on success, negative error code on failure.
3504 */
06ec9070 3505static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
3506{
3507 int i, r = 0;
3508
3509 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 3510 if (!adev->ip_blocks[i].status.valid)
35d782fe 3511 continue;
a1255107
AD
3512 if (adev->ip_blocks[i].status.hang &&
3513 adev->ip_blocks[i].version->funcs->soft_reset) {
3514 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
3515 if (r)
3516 return r;
3517 }
3518 }
3519
3520 return 0;
3521}
3522
e3ecdffa
AD
3523/**
3524 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3525 *
3526 * @adev: amdgpu_device pointer
3527 *
3528 * The list of all the hardware IPs that make up the asic is walked and the
3529 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
3530 * handles any IP specific hardware or software state changes that are
3531 * necessary after the IP has been soft reset.
3532 * Returns 0 on success, negative error code on failure.
3533 */
06ec9070 3534static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
3535{
3536 int i, r = 0;
3537
3538 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 3539 if (!adev->ip_blocks[i].status.valid)
35d782fe 3540 continue;
a1255107
AD
3541 if (adev->ip_blocks[i].status.hang &&
3542 adev->ip_blocks[i].version->funcs->post_soft_reset)
3543 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
3544 if (r)
3545 return r;
3546 }
3547
3548 return 0;
3549}
3550
e3ecdffa 3551/**
c33adbc7 3552 * amdgpu_device_recover_vram - Recover some VRAM contents
e3ecdffa
AD
3553 *
3554 * @adev: amdgpu_device pointer
3555 *
3556 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
3557 * restore things like GPUVM page tables after a GPU reset where
3558 * the contents of VRAM might be lost.
403009bf
CK
3559 *
3560 * Returns:
3561 * 0 on success, negative error code on failure.
e3ecdffa 3562 */
c33adbc7 3563static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
c41d1cf6 3564{
c41d1cf6 3565 struct dma_fence *fence = NULL, *next = NULL;
403009bf
CK
3566 struct amdgpu_bo *shadow;
3567 long r = 1, tmo;
c41d1cf6
ML
3568
3569 if (amdgpu_sriov_runtime(adev))
b045d3af 3570 tmo = msecs_to_jiffies(8000);
c41d1cf6
ML
3571 else
3572 tmo = msecs_to_jiffies(100);
3573
3574 DRM_INFO("recover vram bo from shadow start\n");
3575 mutex_lock(&adev->shadow_list_lock);
403009bf
CK
3576 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
3577
3578 /* No need to recover an evicted BO */
3579 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
b575f10d 3580 shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
403009bf
CK
3581 shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
3582 continue;
3583
3584 r = amdgpu_bo_restore_shadow(shadow, &next);
3585 if (r)
3586 break;
3587
c41d1cf6 3588 if (fence) {
1712fb1a 3589 tmo = dma_fence_wait_timeout(fence, false, tmo);
403009bf
CK
3590 dma_fence_put(fence);
3591 fence = next;
1712fb1a 3592 if (tmo == 0) {
3593 r = -ETIMEDOUT;
c41d1cf6 3594 break;
1712fb1a 3595 } else if (tmo < 0) {
3596 r = tmo;
3597 break;
3598 }
403009bf
CK
3599 } else {
3600 fence = next;
c41d1cf6 3601 }
c41d1cf6
ML
3602 }
3603 mutex_unlock(&adev->shadow_list_lock);
3604
403009bf
CK
3605 if (fence)
3606 tmo = dma_fence_wait_timeout(fence, false, tmo);
c41d1cf6
ML
3607 dma_fence_put(fence);
3608
1712fb1a 3609 if (r < 0 || tmo <= 0) {
3610 DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
403009bf
CK
3611 return -EIO;
3612 }
c41d1cf6 3613
403009bf
CK
3614 DRM_INFO("recover vram bo from shadow done\n");
3615 return 0;
c41d1cf6
ML
3616}
3617
a90ad3c2 3618
e3ecdffa 3619/**
06ec9070 3620 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5740682e
ML
3621 *
3622 * @adev: amdgpu device pointer
87e3f136 3623 * @from_hypervisor: request from hypervisor
5740682e
ML
3624 *
3625 * do VF FLR and reinitialize Asic
3f48c681 3626 * return 0 means succeeded otherwise failed
e3ecdffa
AD
3627 */
3628static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3629 bool from_hypervisor)
5740682e
ML
3630{
3631 int r;
3632
3633 if (from_hypervisor)
3634 r = amdgpu_virt_request_full_gpu(adev, true);
3635 else
3636 r = amdgpu_virt_reset_gpu(adev);
3637 if (r)
3638 return r;
a90ad3c2 3639
f81e8d53
WL
3640 amdgpu_amdkfd_pre_reset(adev);
3641
a90ad3c2 3642 /* Resume IP prior to SMC */
06ec9070 3643 r = amdgpu_device_ip_reinit_early_sriov(adev);
5740682e
ML
3644 if (r)
3645 goto error;
a90ad3c2
ML
3646
3647 /* we need recover gart prior to run SMC/CP/SDMA resume */
c1c7ce8f 3648 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
a90ad3c2 3649
7a3e0bb2
RZ
3650 r = amdgpu_device_fw_loading(adev);
3651 if (r)
3652 return r;
3653
a90ad3c2 3654 /* now we are okay to resume SMC/CP/SDMA */
06ec9070 3655 r = amdgpu_device_ip_reinit_late_sriov(adev);
5740682e
ML
3656 if (r)
3657 goto error;
a90ad3c2
ML
3658
3659 amdgpu_irq_gpu_reset_resume_helper(adev);
5740682e 3660 r = amdgpu_ib_ring_tests(adev);
f81e8d53 3661 amdgpu_amdkfd_post_reset(adev);
a90ad3c2 3662
abc34253 3663error:
d3c117e5 3664 amdgpu_virt_init_data_exchange(adev);
abc34253 3665 amdgpu_virt_release_full_gpu(adev, true);
c41d1cf6 3666 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
e3526257 3667 amdgpu_inc_vram_lost(adev);
c33adbc7 3668 r = amdgpu_device_recover_vram(adev);
a90ad3c2
ML
3669 }
3670
3671 return r;
3672}
3673
12938fad
CK
3674/**
3675 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
3676 *
3677 * @adev: amdgpu device pointer
3678 *
3679 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
3680 * a hung GPU.
3681 */
3682bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
3683{
3684 if (!amdgpu_device_ip_check_soft_reset(adev)) {
3685 DRM_INFO("Timeout, but no hardware hang detected.\n");
3686 return false;
3687 }
3688
3ba7b418
AG
3689 if (amdgpu_gpu_recovery == 0)
3690 goto disabled;
3691
3692 if (amdgpu_sriov_vf(adev))
3693 return true;
3694
3695 if (amdgpu_gpu_recovery == -1) {
3696 switch (adev->asic_type) {
fc42d47c
AG
3697 case CHIP_BONAIRE:
3698 case CHIP_HAWAII:
3ba7b418
AG
3699 case CHIP_TOPAZ:
3700 case CHIP_TONGA:
3701 case CHIP_FIJI:
3702 case CHIP_POLARIS10:
3703 case CHIP_POLARIS11:
3704 case CHIP_POLARIS12:
3705 case CHIP_VEGAM:
3706 case CHIP_VEGA20:
3707 case CHIP_VEGA10:
3708 case CHIP_VEGA12:
c43b849f 3709 case CHIP_RAVEN:
3ba7b418
AG
3710 break;
3711 default:
3712 goto disabled;
3713 }
12938fad
CK
3714 }
3715
3716 return true;
3ba7b418
AG
3717
3718disabled:
3719 DRM_INFO("GPU recovery disabled.\n");
3720 return false;
12938fad
CK
3721}
3722
5c6dd71e 3723
26bc5340
AG
3724static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
3725 struct amdgpu_job *job,
3726 bool *need_full_reset_arg)
3727{
3728 int i, r = 0;
3729 bool need_full_reset = *need_full_reset_arg;
71182665 3730
71182665 3731 /* block all schedulers and reset given job's ring */
0875dc9e
CZ
3732 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3733 struct amdgpu_ring *ring = adev->rings[i];
3734
51687759 3735 if (!ring || !ring->sched.thread)
0875dc9e 3736 continue;
5740682e 3737
2f9d4084
ML
3738 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3739 amdgpu_fence_driver_force_completion(ring);
0875dc9e 3740 }
d38ceaf9 3741
222b5f04
AG
3742 if(job)
3743 drm_sched_increase_karma(&job->base);
3744
1d721ed6 3745 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
26bc5340
AG
3746 if (!amdgpu_sriov_vf(adev)) {
3747
3748 if (!need_full_reset)
3749 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
3750
3751 if (!need_full_reset) {
3752 amdgpu_device_ip_pre_soft_reset(adev);
3753 r = amdgpu_device_ip_soft_reset(adev);
3754 amdgpu_device_ip_post_soft_reset(adev);
3755 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3756 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3757 need_full_reset = true;
3758 }
3759 }
3760
3761 if (need_full_reset)
3762 r = amdgpu_device_ip_suspend(adev);
3763
3764 *need_full_reset_arg = need_full_reset;
3765 }
3766
3767 return r;
3768}
3769
3770static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
3771 struct list_head *device_list_handle,
3772 bool *need_full_reset_arg)
3773{
3774 struct amdgpu_device *tmp_adev = NULL;
3775 bool need_full_reset = *need_full_reset_arg, vram_lost = false;
3776 int r = 0;
3777
3778 /*
3779 * ASIC reset has to be done on all HGMI hive nodes ASAP
3780 * to allow proper links negotiation in FW (within 1 sec)
3781 */
3782 if (need_full_reset) {
3783 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
d4535e2c
AG
3784 /* For XGMI run all resets in parallel to speed up the process */
3785 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3786 if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
3787 r = -EALREADY;
3788 } else
3789 r = amdgpu_asic_reset(tmp_adev);
3790
3791 if (r) {
fed184e9 3792 DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
26bc5340 3793 r, tmp_adev->ddev->unique);
d4535e2c
AG
3794 break;
3795 }
3796 }
3797
3798 /* For XGMI wait for all PSP resets to complete before proceed */
3799 if (!r) {
3800 list_for_each_entry(tmp_adev, device_list_handle,
3801 gmc.xgmi.head) {
3802 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3803 flush_work(&tmp_adev->xgmi_reset_work);
3804 r = tmp_adev->asic_reset_res;
3805 if (r)
3806 break;
3807 }
3808 }
26bc5340
AG
3809 }
3810 }
3811
3812
3813 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3814 if (need_full_reset) {
3815 /* post card */
3816 if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
3817 DRM_WARN("asic atom init failed!");
3818
3819 if (!r) {
3820 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
3821 r = amdgpu_device_ip_resume_phase1(tmp_adev);
3822 if (r)
3823 goto out;
3824
3825 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
3826 if (vram_lost) {
77e7f829 3827 DRM_INFO("VRAM is lost due to GPU reset!\n");
e3526257 3828 amdgpu_inc_vram_lost(tmp_adev);
26bc5340
AG
3829 }
3830
3831 r = amdgpu_gtt_mgr_recover(
3832 &tmp_adev->mman.bdev.man[TTM_PL_TT]);
3833 if (r)
3834 goto out;
3835
3836 r = amdgpu_device_fw_loading(tmp_adev);
3837 if (r)
3838 return r;
3839
3840 r = amdgpu_device_ip_resume_phase2(tmp_adev);
3841 if (r)
3842 goto out;
3843
3844 if (vram_lost)
3845 amdgpu_device_fill_reset_magic(tmp_adev);
3846
fdafb359
EQ
3847 /*
3848 * Add this ASIC as tracked as reset was already
3849 * complete successfully.
3850 */
3851 amdgpu_register_gpu_instance(tmp_adev);
3852
7c04ca50 3853 r = amdgpu_device_ip_late_init(tmp_adev);
3854 if (r)
3855 goto out;
3856
e79a04d5 3857 /* must succeed. */
511fdbc3 3858 amdgpu_ras_resume(tmp_adev);
e79a04d5 3859
26bc5340
AG
3860 /* Update PSP FW topology after reset */
3861 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
3862 r = amdgpu_xgmi_update_topology(hive, tmp_adev);
3863 }
3864 }
3865
3866
3867out:
3868 if (!r) {
3869 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
3870 r = amdgpu_ib_ring_tests(tmp_adev);
3871 if (r) {
3872 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
3873 r = amdgpu_device_ip_suspend(tmp_adev);
3874 need_full_reset = true;
3875 r = -EAGAIN;
3876 goto end;
3877 }
3878 }
3879
3880 if (!r)
3881 r = amdgpu_device_recover_vram(tmp_adev);
3882 else
3883 tmp_adev->asic_reset_res = r;
3884 }
3885
3886end:
3887 *need_full_reset_arg = need_full_reset;
3888 return r;
3889}
3890
1d721ed6 3891static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
26bc5340 3892{
1d721ed6
AG
3893 if (trylock) {
3894 if (!mutex_trylock(&adev->lock_reset))
3895 return false;
3896 } else
3897 mutex_lock(&adev->lock_reset);
5740682e 3898
26bc5340
AG
3899 atomic_inc(&adev->gpu_reset_counter);
3900 adev->in_gpu_reset = 1;
a3a09142
AD
3901 switch (amdgpu_asic_reset_method(adev)) {
3902 case AMD_RESET_METHOD_MODE1:
3903 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
3904 break;
3905 case AMD_RESET_METHOD_MODE2:
3906 adev->mp1_state = PP_MP1_STATE_RESET;
3907 break;
3908 default:
3909 adev->mp1_state = PP_MP1_STATE_NONE;
3910 break;
3911 }
1d721ed6
AG
3912
3913 return true;
26bc5340 3914}
d38ceaf9 3915
26bc5340
AG
3916static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
3917{
89041940 3918 amdgpu_vf_error_trans_all(adev);
a3a09142 3919 adev->mp1_state = PP_MP1_STATE_NONE;
13a752e3
ML
3920 adev->in_gpu_reset = 0;
3921 mutex_unlock(&adev->lock_reset);
26bc5340
AG
3922}
3923
26bc5340
AG
3924/**
3925 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
3926 *
3927 * @adev: amdgpu device pointer
3928 * @job: which job trigger hang
3929 *
3930 * Attempt to reset the GPU if it has hung (all asics).
3931 * Attempt to do soft-reset or full-reset and reinitialize Asic
3932 * Returns 0 for success or an error on failure.
3933 */
3934
3935int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3936 struct amdgpu_job *job)
3937{
1d721ed6
AG
3938 struct list_head device_list, *device_list_handle = NULL;
3939 bool need_full_reset, job_signaled;
26bc5340 3940 struct amdgpu_hive_info *hive = NULL;
26bc5340 3941 struct amdgpu_device *tmp_adev = NULL;
1d721ed6 3942 int i, r = 0;
7c6e68c7 3943 bool in_ras_intr = amdgpu_ras_intr_triggered();
26bc5340 3944
d5ea093e
AG
3945 /*
3946 * Flush RAM to disk so that after reboot
3947 * the user can read log and see why the system rebooted.
3948 */
3949 if (in_ras_intr && amdgpu_ras_get_context(adev)->reboot) {
3950
3951 DRM_WARN("Emergency reboot.");
3952
3953 ksys_sync_helper();
3954 emergency_restart();
3955 }
3956
1d721ed6 3957 need_full_reset = job_signaled = false;
26bc5340
AG
3958 INIT_LIST_HEAD(&device_list);
3959
7c6e68c7 3960 dev_info(adev->dev, "GPU %s begin!\n", in_ras_intr ? "jobs stop":"reset");
26bc5340 3961
beff74bc 3962 cancel_delayed_work_sync(&adev->delayed_init_work);
c53e4db7 3963
1d721ed6
AG
3964 hive = amdgpu_get_xgmi_hive(adev, false);
3965
26bc5340 3966 /*
1d721ed6
AG
3967 * Here we trylock to avoid chain of resets executing from
3968 * either trigger by jobs on different adevs in XGMI hive or jobs on
3969 * different schedulers for same device while this TO handler is running.
3970 * We always reset all schedulers for device and all devices for XGMI
3971 * hive so that should take care of them too.
26bc5340 3972 */
1d721ed6
AG
3973
3974 if (hive && !mutex_trylock(&hive->reset_lock)) {
3975 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
0b2d2c2e 3976 job ? job->base.id : -1, hive->hive_id);
26bc5340 3977 return 0;
1d721ed6 3978 }
26bc5340
AG
3979
3980 /* Start with adev pre asic reset first for soft reset check.*/
1d721ed6
AG
3981 if (!amdgpu_device_lock_adev(adev, !hive)) {
3982 DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
0b2d2c2e 3983 job ? job->base.id : -1);
1d721ed6 3984 return 0;
26bc5340
AG
3985 }
3986
7c6e68c7
AG
3987 /* Block kfd: SRIOV would do it separately */
3988 if (!amdgpu_sriov_vf(adev))
3989 amdgpu_amdkfd_pre_reset(adev);
3990
26bc5340 3991 /* Build list of devices to reset */
1d721ed6 3992 if (adev->gmc.xgmi.num_physical_nodes > 1) {
26bc5340 3993 if (!hive) {
7c6e68c7
AG
3994 /*unlock kfd: SRIOV would do it separately */
3995 if (!amdgpu_sriov_vf(adev))
3996 amdgpu_amdkfd_post_reset(adev);
26bc5340
AG
3997 amdgpu_device_unlock_adev(adev);
3998 return -ENODEV;
3999 }
4000
4001 /*
4002 * In case we are in XGMI hive mode device reset is done for all the
4003 * nodes in the hive to retrain all XGMI links and hence the reset
4004 * sequence is executed in loop on all nodes.
4005 */
4006 device_list_handle = &hive->device_list;
4007 } else {
4008 list_add_tail(&adev->gmc.xgmi.head, &device_list);
4009 device_list_handle = &device_list;
4010 }
4011
1d721ed6
AG
4012 /* block all schedulers and reset given job's ring */
4013 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
7c6e68c7 4014 if (tmp_adev != adev) {
12ffa55d 4015 amdgpu_device_lock_adev(tmp_adev, false);
7c6e68c7
AG
4016 if (!amdgpu_sriov_vf(tmp_adev))
4017 amdgpu_amdkfd_pre_reset(tmp_adev);
4018 }
4019
12ffa55d
AG
4020 /*
4021 * Mark these ASICs to be reseted as untracked first
4022 * And add them back after reset completed
4023 */
4024 amdgpu_unregister_gpu_instance(tmp_adev);
4025
f1c1314b 4026 /* disable ras on ALL IPs */
7c6e68c7 4027 if (!in_ras_intr && amdgpu_device_ip_need_full_reset(tmp_adev))
f1c1314b 4028 amdgpu_ras_suspend(tmp_adev);
4029
1d721ed6
AG
4030 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4031 struct amdgpu_ring *ring = tmp_adev->rings[i];
4032
4033 if (!ring || !ring->sched.thread)
4034 continue;
4035
0b2d2c2e 4036 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
7c6e68c7
AG
4037
4038 if (in_ras_intr)
4039 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
1d721ed6
AG
4040 }
4041 }
4042
4043
7c6e68c7
AG
4044 if (in_ras_intr)
4045 goto skip_sched_resume;
4046
1d721ed6
AG
4047 /*
4048 * Must check guilty signal here since after this point all old
4049 * HW fences are force signaled.
4050 *
4051 * job->base holds a reference to parent fence
4052 */
4053 if (job && job->base.s_fence->parent &&
4054 dma_fence_is_signaled(job->base.s_fence->parent))
4055 job_signaled = true;
4056
1d721ed6
AG
4057 if (job_signaled) {
4058 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4059 goto skip_hw_reset;
4060 }
4061
4062
4063 /* Guilty job will be freed after this*/
0b2d2c2e 4064 r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset);
1d721ed6
AG
4065 if (r) {
4066 /*TODO Should we stop ?*/
4067 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
4068 r, adev->ddev->unique);
4069 adev->asic_reset_res = r;
4070 }
4071
26bc5340
AG
4072retry: /* Rest of adevs pre asic reset from XGMI hive. */
4073 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4074
4075 if (tmp_adev == adev)
4076 continue;
4077
26bc5340
AG
4078 r = amdgpu_device_pre_asic_reset(tmp_adev,
4079 NULL,
4080 &need_full_reset);
4081 /*TODO Should we stop ?*/
4082 if (r) {
4083 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
4084 r, tmp_adev->ddev->unique);
4085 tmp_adev->asic_reset_res = r;
4086 }
4087 }
4088
4089 /* Actual ASIC resets if needed.*/
4090 /* TODO Implement XGMI hive reset logic for SRIOV */
4091 if (amdgpu_sriov_vf(adev)) {
4092 r = amdgpu_device_reset_sriov(adev, job ? false : true);
4093 if (r)
4094 adev->asic_reset_res = r;
4095 } else {
4096 r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
4097 if (r && r == -EAGAIN)
4098 goto retry;
4099 }
4100
1d721ed6
AG
4101skip_hw_reset:
4102
26bc5340
AG
4103 /* Post ASIC reset for all devs .*/
4104 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
7c6e68c7 4105
1d721ed6
AG
4106 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4107 struct amdgpu_ring *ring = tmp_adev->rings[i];
4108
4109 if (!ring || !ring->sched.thread)
4110 continue;
4111
4112 /* No point to resubmit jobs if we didn't HW reset*/
4113 if (!tmp_adev->asic_reset_res && !job_signaled)
4114 drm_sched_resubmit_jobs(&ring->sched);
4115
4116 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4117 }
4118
4119 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4120 drm_helper_resume_force_mode(tmp_adev->ddev);
4121 }
4122
4123 tmp_adev->asic_reset_res = 0;
26bc5340
AG
4124
4125 if (r) {
4126 /* bad news, how to tell it to userspace ? */
12ffa55d 4127 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
26bc5340
AG
4128 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4129 } else {
12ffa55d 4130 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
26bc5340 4131 }
7c6e68c7 4132 }
26bc5340 4133
7c6e68c7
AG
4134skip_sched_resume:
4135 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4136 /*unlock kfd: SRIOV would do it separately */
4137 if (!in_ras_intr && !amdgpu_sriov_vf(tmp_adev))
4138 amdgpu_amdkfd_post_reset(tmp_adev);
26bc5340
AG
4139 amdgpu_device_unlock_adev(tmp_adev);
4140 }
4141
1d721ed6 4142 if (hive)
22d6575b 4143 mutex_unlock(&hive->reset_lock);
26bc5340
AG
4144
4145 if (r)
4146 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
d38ceaf9
AD
4147 return r;
4148}
4149
e3ecdffa
AD
4150/**
4151 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4152 *
4153 * @adev: amdgpu_device pointer
4154 *
4155 * Fetchs and stores in the driver the PCIE capabilities (gen speed
4156 * and lanes) of the slot the device is in. Handles APUs and
4157 * virtualized environments where PCIE config space may not be available.
4158 */
5494d864 4159static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
d0dd7f0c 4160{
5d9a6330 4161 struct pci_dev *pdev;
c5313457
HK
4162 enum pci_bus_speed speed_cap, platform_speed_cap;
4163 enum pcie_link_width platform_link_width;
d0dd7f0c 4164
cd474ba0
AD
4165 if (amdgpu_pcie_gen_cap)
4166 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 4167
cd474ba0
AD
4168 if (amdgpu_pcie_lane_cap)
4169 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 4170
cd474ba0
AD
4171 /* covers APUs as well */
4172 if (pci_is_root_bus(adev->pdev->bus)) {
4173 if (adev->pm.pcie_gen_mask == 0)
4174 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4175 if (adev->pm.pcie_mlw_mask == 0)
4176 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 4177 return;
cd474ba0 4178 }
d0dd7f0c 4179
c5313457
HK
4180 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
4181 return;
4182
dbaa922b
AD
4183 pcie_bandwidth_available(adev->pdev, NULL,
4184 &platform_speed_cap, &platform_link_width);
c5313457 4185
cd474ba0 4186 if (adev->pm.pcie_gen_mask == 0) {
5d9a6330
AD
4187 /* asic caps */
4188 pdev = adev->pdev;
4189 speed_cap = pcie_get_speed_cap(pdev);
4190 if (speed_cap == PCI_SPEED_UNKNOWN) {
4191 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
cd474ba0
AD
4192 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4193 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
cd474ba0 4194 } else {
5d9a6330
AD
4195 if (speed_cap == PCIE_SPEED_16_0GT)
4196 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4197 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4198 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4199 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4200 else if (speed_cap == PCIE_SPEED_8_0GT)
4201 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4202 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4203 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4204 else if (speed_cap == PCIE_SPEED_5_0GT)
4205 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4206 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4207 else
4208 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4209 }
4210 /* platform caps */
c5313457 4211 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5d9a6330
AD
4212 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4213 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4214 } else {
c5313457 4215 if (platform_speed_cap == PCIE_SPEED_16_0GT)
5d9a6330
AD
4216 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4217 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4218 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4219 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
c5313457 4220 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5d9a6330
AD
4221 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4222 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4223 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
c5313457 4224 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5d9a6330
AD
4225 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4226 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4227 else
4228 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4229
cd474ba0
AD
4230 }
4231 }
4232 if (adev->pm.pcie_mlw_mask == 0) {
c5313457 4233 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5d9a6330
AD
4234 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4235 } else {
c5313457 4236 switch (platform_link_width) {
5d9a6330 4237 case PCIE_LNK_X32:
cd474ba0
AD
4238 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4239 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4240 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4241 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4242 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4243 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4244 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4245 break;
5d9a6330 4246 case PCIE_LNK_X16:
cd474ba0
AD
4247 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4248 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4249 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4250 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4251 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4252 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4253 break;
5d9a6330 4254 case PCIE_LNK_X12:
cd474ba0
AD
4255 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4256 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4257 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4258 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4259 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4260 break;
5d9a6330 4261 case PCIE_LNK_X8:
cd474ba0
AD
4262 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4263 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4264 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4265 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4266 break;
5d9a6330 4267 case PCIE_LNK_X4:
cd474ba0
AD
4268 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4269 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4270 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4271 break;
5d9a6330 4272 case PCIE_LNK_X2:
cd474ba0
AD
4273 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4274 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4275 break;
5d9a6330 4276 case PCIE_LNK_X1:
cd474ba0
AD
4277 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4278 break;
4279 default:
4280 break;
4281 }
d0dd7f0c
AD
4282 }
4283 }
4284}
d38ceaf9 4285