drm/amdgpu: change gfx9 ib test to use WB
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
d38ceaf9
AD
31#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
4562236b 33#include <drm/drm_atomic_helper.h>
d38ceaf9
AD
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
f4b373f4 39#include "amdgpu_trace.h"
d38ceaf9
AD
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
a5bde2f9 43#include "amdgpu_atomfirmware.h"
d0dd7f0c 44#include "amd_pcie.h"
33f34802
KW
45#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
a2e73f56
AD
48#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
aaa36a97 51#include "vi.h"
460826e6 52#include "soc15.h"
d38ceaf9 53#include "bif/bif_4_1_d.h"
9accf2fd 54#include <linux/pci.h>
bec86378 55#include <linux/firmware.h>
89041940 56#include "amdgpu_vf_error.h"
d38ceaf9 57
ba997709 58#include "amdgpu_amdkfd.h"
d2f52ac8 59#include "amdgpu_pm.h"
d38ceaf9 60
e2a75f88 61MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
2d2e5e7e 62MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
e2a75f88 63
2dc80b00
S
64#define AMDGPU_RESUME_MS 2000
65
d38ceaf9 66static const char *amdgpu_asic_name[] = {
da69c161
KW
67 "TAHITI",
68 "PITCAIRN",
69 "VERDE",
70 "OLAND",
71 "HAINAN",
d38ceaf9
AD
72 "BONAIRE",
73 "KAVERI",
74 "KABINI",
75 "HAWAII",
76 "MULLINS",
77 "TOPAZ",
78 "TONGA",
48299f95 79 "FIJI",
d38ceaf9 80 "CARRIZO",
139f4917 81 "STONEY",
2cc0c0b5
FC
82 "POLARIS10",
83 "POLARIS11",
c4642a47 84 "POLARIS12",
d4196f01 85 "VEGA10",
2ca8a5d2 86 "RAVEN",
d38ceaf9
AD
87 "LAST",
88};
89
90bool amdgpu_device_is_px(struct drm_device *dev)
91{
92 struct amdgpu_device *adev = dev->dev_private;
93
2f7d10b3 94 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
95 return true;
96 return false;
97}
98
99/*
100 * MMIO register access helper functions.
101 */
102uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 103 uint32_t acc_flags)
d38ceaf9 104{
f4b373f4
TSD
105 uint32_t ret;
106
43ca8efa 107 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 108 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 109
15d72fd7 110 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 111 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
112 else {
113 unsigned long flags;
d38ceaf9
AD
114
115 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
116 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
117 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
118 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 119 }
f4b373f4
TSD
120 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
121 return ret;
d38ceaf9
AD
122}
123
124void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 125 uint32_t acc_flags)
d38ceaf9 126{
f4b373f4 127 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 128
47ed4e1c
KW
129 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
130 adev->last_mm_index = v;
131 }
132
43ca8efa 133 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 134 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 135
15d72fd7 136 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
137 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
138 else {
139 unsigned long flags;
140
141 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
142 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
143 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
144 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
145 }
47ed4e1c
KW
146
147 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
148 udelay(500);
149 }
d38ceaf9
AD
150}
151
152u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
153{
154 if ((reg * 4) < adev->rio_mem_size)
155 return ioread32(adev->rio_mem + (reg * 4));
156 else {
157 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
158 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
159 }
160}
161
162void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
163{
47ed4e1c
KW
164 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
165 adev->last_mm_index = v;
166 }
d38ceaf9
AD
167
168 if ((reg * 4) < adev->rio_mem_size)
169 iowrite32(v, adev->rio_mem + (reg * 4));
170 else {
171 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
172 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
173 }
47ed4e1c
KW
174
175 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
176 udelay(500);
177 }
d38ceaf9
AD
178}
179
180/**
181 * amdgpu_mm_rdoorbell - read a doorbell dword
182 *
183 * @adev: amdgpu_device pointer
184 * @index: doorbell index
185 *
186 * Returns the value in the doorbell aperture at the
187 * requested doorbell index (CIK).
188 */
189u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
190{
191 if (index < adev->doorbell.num_doorbells) {
192 return readl(adev->doorbell.ptr + index);
193 } else {
194 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
195 return 0;
196 }
197}
198
199/**
200 * amdgpu_mm_wdoorbell - write a doorbell dword
201 *
202 * @adev: amdgpu_device pointer
203 * @index: doorbell index
204 * @v: value to write
205 *
206 * Writes @v to the doorbell aperture at the
207 * requested doorbell index (CIK).
208 */
209void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
210{
211 if (index < adev->doorbell.num_doorbells) {
212 writel(v, adev->doorbell.ptr + index);
213 } else {
214 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
215 }
216}
217
832be404
KW
218/**
219 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
220 *
221 * @adev: amdgpu_device pointer
222 * @index: doorbell index
223 *
224 * Returns the value in the doorbell aperture at the
225 * requested doorbell index (VEGA10+).
226 */
227u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
228{
229 if (index < adev->doorbell.num_doorbells) {
230 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
231 } else {
232 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
233 return 0;
234 }
235}
236
237/**
238 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
239 *
240 * @adev: amdgpu_device pointer
241 * @index: doorbell index
242 * @v: value to write
243 *
244 * Writes @v to the doorbell aperture at the
245 * requested doorbell index (VEGA10+).
246 */
247void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
248{
249 if (index < adev->doorbell.num_doorbells) {
250 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
251 } else {
252 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
253 }
254}
255
d38ceaf9
AD
256/**
257 * amdgpu_invalid_rreg - dummy reg read function
258 *
259 * @adev: amdgpu device pointer
260 * @reg: offset of register
261 *
262 * Dummy register read function. Used for register blocks
263 * that certain asics don't have (all asics).
264 * Returns the value in the register.
265 */
266static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
267{
268 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
269 BUG();
270 return 0;
271}
272
273/**
274 * amdgpu_invalid_wreg - dummy reg write function
275 *
276 * @adev: amdgpu device pointer
277 * @reg: offset of register
278 * @v: value to write to the register
279 *
280 * Dummy register read function. Used for register blocks
281 * that certain asics don't have (all asics).
282 */
283static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
284{
285 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
286 reg, v);
287 BUG();
288}
289
290/**
291 * amdgpu_block_invalid_rreg - dummy reg read function
292 *
293 * @adev: amdgpu device pointer
294 * @block: offset of instance
295 * @reg: offset of register
296 *
297 * Dummy register read function. Used for register blocks
298 * that certain asics don't have (all asics).
299 * Returns the value in the register.
300 */
301static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
302 uint32_t block, uint32_t reg)
303{
304 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
305 reg, block);
306 BUG();
307 return 0;
308}
309
310/**
311 * amdgpu_block_invalid_wreg - dummy reg write function
312 *
313 * @adev: amdgpu device pointer
314 * @block: offset of instance
315 * @reg: offset of register
316 * @v: value to write to the register
317 *
318 * Dummy register read function. Used for register blocks
319 * that certain asics don't have (all asics).
320 */
321static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
322 uint32_t block,
323 uint32_t reg, uint32_t v)
324{
325 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
326 reg, block, v);
327 BUG();
328}
329
06ec9070 330static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
d38ceaf9 331{
a4a02777
CK
332 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
333 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
334 &adev->vram_scratch.robj,
335 &adev->vram_scratch.gpu_addr,
336 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
337}
338
06ec9070 339static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
d38ceaf9 340{
078af1a3 341 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
342}
343
344/**
9c3f2b54 345 * amdgpu_device_program_register_sequence - program an array of registers.
d38ceaf9
AD
346 *
347 * @adev: amdgpu_device pointer
348 * @registers: pointer to the register array
349 * @array_size: size of the register array
350 *
351 * Programs an array or registers with and and or masks.
352 * This is a helper for setting golden registers.
353 */
9c3f2b54
AD
354void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
355 const u32 *registers,
356 const u32 array_size)
d38ceaf9
AD
357{
358 u32 tmp, reg, and_mask, or_mask;
359 int i;
360
361 if (array_size % 3)
362 return;
363
364 for (i = 0; i < array_size; i +=3) {
365 reg = registers[i + 0];
366 and_mask = registers[i + 1];
367 or_mask = registers[i + 2];
368
369 if (and_mask == 0xffffffff) {
370 tmp = or_mask;
371 } else {
372 tmp = RREG32(reg);
373 tmp &= ~and_mask;
374 tmp |= or_mask;
375 }
376 WREG32(reg, tmp);
377 }
378}
379
8111c387 380void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
d38ceaf9
AD
381{
382 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
383}
384
385/*
386 * GPU doorbell aperture helpers function.
387 */
388/**
06ec9070 389 * amdgpu_device_doorbell_init - Init doorbell driver information.
d38ceaf9
AD
390 *
391 * @adev: amdgpu_device pointer
392 *
393 * Init doorbell driver information (CIK)
394 * Returns 0 on success, error on failure.
395 */
06ec9070 396static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
d38ceaf9 397{
705e519e
CK
398 /* No doorbell on SI hardware generation */
399 if (adev->asic_type < CHIP_BONAIRE) {
400 adev->doorbell.base = 0;
401 adev->doorbell.size = 0;
402 adev->doorbell.num_doorbells = 0;
403 adev->doorbell.ptr = NULL;
404 return 0;
405 }
406
d6895ad3
CK
407 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
408 return -EINVAL;
409
d38ceaf9
AD
410 /* doorbell bar mapping */
411 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
412 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
413
edf600da 414 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
415 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
416 if (adev->doorbell.num_doorbells == 0)
417 return -EINVAL;
418
8972e5d2
CK
419 adev->doorbell.ptr = ioremap(adev->doorbell.base,
420 adev->doorbell.num_doorbells *
421 sizeof(u32));
422 if (adev->doorbell.ptr == NULL)
d38ceaf9 423 return -ENOMEM;
d38ceaf9
AD
424
425 return 0;
426}
427
428/**
06ec9070 429 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
d38ceaf9
AD
430 *
431 * @adev: amdgpu_device pointer
432 *
433 * Tear down doorbell driver information (CIK)
434 */
06ec9070 435static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
d38ceaf9
AD
436{
437 iounmap(adev->doorbell.ptr);
438 adev->doorbell.ptr = NULL;
439}
440
22cb0164 441
d38ceaf9
AD
442
443/*
06ec9070 444 * amdgpu_device_wb_*()
455a7bc2 445 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 446 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
447 */
448
449/**
06ec9070 450 * amdgpu_device_wb_fini - Disable Writeback and free memory
d38ceaf9
AD
451 *
452 * @adev: amdgpu_device pointer
453 *
454 * Disables Writeback and frees the Writeback memory (all asics).
455 * Used at driver shutdown.
456 */
06ec9070 457static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
d38ceaf9
AD
458{
459 if (adev->wb.wb_obj) {
a76ed485
AD
460 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
461 &adev->wb.gpu_addr,
462 (void **)&adev->wb.wb);
d38ceaf9
AD
463 adev->wb.wb_obj = NULL;
464 }
465}
466
467/**
06ec9070 468 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
d38ceaf9
AD
469 *
470 * @adev: amdgpu_device pointer
471 *
455a7bc2 472 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
473 * Used at driver startup.
474 * Returns 0 on success or an -error on failure.
475 */
06ec9070 476static int amdgpu_device_wb_init(struct amdgpu_device *adev)
d38ceaf9
AD
477{
478 int r;
479
480 if (adev->wb.wb_obj == NULL) {
97407b63
AD
481 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
482 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
483 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
484 &adev->wb.wb_obj, &adev->wb.gpu_addr,
485 (void **)&adev->wb.wb);
d38ceaf9
AD
486 if (r) {
487 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
488 return r;
489 }
d38ceaf9
AD
490
491 adev->wb.num_wb = AMDGPU_MAX_WB;
492 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
493
494 /* clear wb memory */
73469585 495 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
d38ceaf9
AD
496 }
497
498 return 0;
499}
500
501/**
131b4b36 502 * amdgpu_device_wb_get - Allocate a wb entry
d38ceaf9
AD
503 *
504 * @adev: amdgpu_device pointer
505 * @wb: wb index
506 *
507 * Allocate a wb slot for use by the driver (all asics).
508 * Returns 0 on success or -EINVAL on failure.
509 */
131b4b36 510int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
d38ceaf9
AD
511{
512 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 513
97407b63 514 if (offset < adev->wb.num_wb) {
7014285a 515 __set_bit(offset, adev->wb.used);
63ae07ca 516 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
517 return 0;
518 } else {
519 return -EINVAL;
520 }
521}
522
d38ceaf9 523/**
131b4b36 524 * amdgpu_device_wb_free - Free a wb entry
d38ceaf9
AD
525 *
526 * @adev: amdgpu_device pointer
527 * @wb: wb index
528 *
529 * Free a wb slot allocated for use by the driver (all asics)
530 */
131b4b36 531void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
d38ceaf9 532{
73469585 533 wb >>= 3;
d38ceaf9 534 if (wb < adev->wb.num_wb)
73469585 535 __clear_bit(wb, adev->wb.used);
d38ceaf9
AD
536}
537
538/**
2543e28a 539 * amdgpu_device_vram_location - try to find VRAM location
d38ceaf9
AD
540 * @adev: amdgpu device structure holding all necessary informations
541 * @mc: memory controller structure holding memory informations
542 * @base: base address at which to put VRAM
543 *
455a7bc2 544 * Function will try to place VRAM at base address provided
3d647c8f 545 * as parameter.
d38ceaf9 546 */
2543e28a 547void amdgpu_device_vram_location(struct amdgpu_device *adev,
770d13b1 548 struct amdgpu_gmc *mc, u64 base)
d38ceaf9
AD
549{
550 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
551
552 mc->vram_start = base;
d38ceaf9
AD
553 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
554 if (limit && limit < mc->real_vram_size)
555 mc->real_vram_size = limit;
556 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
557 mc->mc_vram_size >> 20, mc->vram_start,
558 mc->vram_end, mc->real_vram_size >> 20);
559}
560
561/**
2543e28a 562 * amdgpu_device_gart_location - try to find GTT location
d38ceaf9
AD
563 * @adev: amdgpu device structure holding all necessary informations
564 * @mc: memory controller structure holding memory informations
565 *
566 * Function will place try to place GTT before or after VRAM.
567 *
568 * If GTT size is bigger than space left then we ajust GTT size.
569 * Thus function will never fails.
570 *
571 * FIXME: when reducing GTT size align new size on power of 2.
572 */
2543e28a 573void amdgpu_device_gart_location(struct amdgpu_device *adev,
770d13b1 574 struct amdgpu_gmc *mc)
d38ceaf9
AD
575{
576 u64 size_af, size_bf;
577
770d13b1 578 size_af = adev->gmc.mc_mask - mc->vram_end;
ed21c047 579 size_bf = mc->vram_start;
d38ceaf9 580 if (size_bf > size_af) {
6f02a696 581 if (mc->gart_size > size_bf) {
d38ceaf9 582 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 583 mc->gart_size = size_bf;
d38ceaf9 584 }
6f02a696 585 mc->gart_start = 0;
d38ceaf9 586 } else {
6f02a696 587 if (mc->gart_size > size_af) {
d38ceaf9 588 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 589 mc->gart_size = size_af;
d38ceaf9 590 }
b98f1b9e
CK
591 /* VCE doesn't like it when BOs cross a 4GB segment, so align
592 * the GART base on a 4GB boundary as well.
593 */
594 mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
d38ceaf9 595 }
6f02a696 596 mc->gart_end = mc->gart_start + mc->gart_size - 1;
d38ceaf9 597 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6f02a696 598 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
d38ceaf9
AD
599}
600
d6895ad3
CK
601/**
602 * amdgpu_device_resize_fb_bar - try to resize FB BAR
603 *
604 * @adev: amdgpu_device pointer
605 *
606 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
607 * to fail, but if any of the BARs is not accessible after the size we abort
608 * driver loading by returning -ENODEV.
609 */
610int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
611{
770d13b1 612 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
d6895ad3 613 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
31b8adab
CK
614 struct pci_bus *root;
615 struct resource *res;
616 unsigned i;
d6895ad3
CK
617 u16 cmd;
618 int r;
619
0c03b912 620 /* Bypass for VF */
621 if (amdgpu_sriov_vf(adev))
622 return 0;
623
31b8adab
CK
624 /* Check if the root BUS has 64bit memory resources */
625 root = adev->pdev->bus;
626 while (root->parent)
627 root = root->parent;
628
629 pci_bus_for_each_resource(root, res, i) {
0ebb7c54 630 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
31b8adab
CK
631 res->start > 0x100000000ull)
632 break;
633 }
634
635 /* Trying to resize is pointless without a root hub window above 4GB */
636 if (!res)
637 return 0;
638
d6895ad3
CK
639 /* Disable memory decoding while we change the BAR addresses and size */
640 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
641 pci_write_config_word(adev->pdev, PCI_COMMAND,
642 cmd & ~PCI_COMMAND_MEMORY);
643
644 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
06ec9070 645 amdgpu_device_doorbell_fini(adev);
d6895ad3
CK
646 if (adev->asic_type >= CHIP_BONAIRE)
647 pci_release_resource(adev->pdev, 2);
648
649 pci_release_resource(adev->pdev, 0);
650
651 r = pci_resize_resource(adev->pdev, 0, rbar_size);
652 if (r == -ENOSPC)
653 DRM_INFO("Not enough PCI address space for a large BAR.");
654 else if (r && r != -ENOTSUPP)
655 DRM_ERROR("Problem resizing BAR0 (%d).", r);
656
657 pci_assign_unassigned_bus_resources(adev->pdev->bus);
658
659 /* When the doorbell or fb BAR isn't available we have no chance of
660 * using the device.
661 */
06ec9070 662 r = amdgpu_device_doorbell_init(adev);
d6895ad3
CK
663 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
664 return -ENODEV;
665
666 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
667
668 return 0;
669}
a05502e5 670
d38ceaf9
AD
671/*
672 * GPU helpers function.
673 */
674/**
39c640c0 675 * amdgpu_device_need_post - check if the hw need post or not
d38ceaf9
AD
676 *
677 * @adev: amdgpu_device pointer
678 *
c836fec5
JQ
679 * Check if the asic has been initialized (all asics) at driver startup
680 * or post is needed if hw reset is performed.
681 * Returns true if need or false if not.
d38ceaf9 682 */
39c640c0 683bool amdgpu_device_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
684{
685 uint32_t reg;
686
bec86378
ML
687 if (amdgpu_sriov_vf(adev))
688 return false;
689
690 if (amdgpu_passthrough(adev)) {
1da2c326
ML
691 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
692 * some old smc fw still need driver do vPost otherwise gpu hang, while
693 * those smc fw version above 22.15 doesn't have this flaw, so we force
694 * vpost executed for smc version below 22.15
bec86378
ML
695 */
696 if (adev->asic_type == CHIP_FIJI) {
697 int err;
698 uint32_t fw_ver;
699 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
700 /* force vPost if error occured */
701 if (err)
702 return true;
703
704 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
705 if (fw_ver < 0x00160e00)
706 return true;
bec86378 707 }
bec86378 708 }
91fe77eb 709
710 if (adev->has_hw_reset) {
711 adev->has_hw_reset = false;
712 return true;
713 }
714
715 /* bios scratch used on CIK+ */
716 if (adev->asic_type >= CHIP_BONAIRE)
717 return amdgpu_atombios_scratch_need_asic_init(adev);
718
719 /* check MEM_SIZE for older asics */
720 reg = amdgpu_asic_get_config_memsize(adev);
721
722 if ((reg != 0) && (reg != 0xffffffff))
723 return false;
724
725 return true;
bec86378
ML
726}
727
d38ceaf9
AD
728/* if we get transitioned to only one device, take VGA back */
729/**
06ec9070 730 * amdgpu_device_vga_set_decode - enable/disable vga decode
d38ceaf9
AD
731 *
732 * @cookie: amdgpu_device pointer
733 * @state: enable/disable vga decode
734 *
735 * Enable/disable vga decode (all asics).
736 * Returns VGA resource flags.
737 */
06ec9070 738static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
d38ceaf9
AD
739{
740 struct amdgpu_device *adev = cookie;
741 amdgpu_asic_set_vga_state(adev, state);
742 if (state)
743 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
744 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
745 else
746 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
747}
748
06ec9070 749static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
750{
751 /* defines number of bits in page table versus page directory,
752 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
753 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
754 if (amdgpu_vm_block_size == -1)
755 return;
a1adf8be 756
bab4fee7 757 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
758 dev_warn(adev->dev, "VM page table size (%d) too small\n",
759 amdgpu_vm_block_size);
97489129 760 amdgpu_vm_block_size = -1;
a1adf8be 761 }
a1adf8be
CZ
762}
763
06ec9070 764static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
83ca145d 765{
64dab074
AD
766 /* no need to check the default value */
767 if (amdgpu_vm_size == -1)
768 return;
769
83ca145d
ZJ
770 if (amdgpu_vm_size < 1) {
771 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
772 amdgpu_vm_size);
f3368128 773 amdgpu_vm_size = -1;
83ca145d 774 }
83ca145d
ZJ
775}
776
d38ceaf9 777/**
06ec9070 778 * amdgpu_device_check_arguments - validate module params
d38ceaf9
AD
779 *
780 * @adev: amdgpu_device pointer
781 *
782 * Validates certain module parameters and updates
783 * the associated values used by the driver (all asics).
784 */
06ec9070 785static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
d38ceaf9 786{
5b011235
CZ
787 if (amdgpu_sched_jobs < 4) {
788 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
789 amdgpu_sched_jobs);
790 amdgpu_sched_jobs = 4;
76117507 791 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
792 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
793 amdgpu_sched_jobs);
794 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
795 }
d38ceaf9 796
83e74db6 797 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
798 /* gart size must be greater or equal to 32M */
799 dev_warn(adev->dev, "gart size (%d) too small\n",
800 amdgpu_gart_size);
83e74db6 801 amdgpu_gart_size = -1;
d38ceaf9
AD
802 }
803
36d38372 804 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 805 /* gtt size must be greater or equal to 32M */
36d38372
CK
806 dev_warn(adev->dev, "gtt size (%d) too small\n",
807 amdgpu_gtt_size);
808 amdgpu_gtt_size = -1;
d38ceaf9
AD
809 }
810
d07f14be
RH
811 /* valid range is between 4 and 9 inclusive */
812 if (amdgpu_vm_fragment_size != -1 &&
813 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
814 dev_warn(adev->dev, "valid range is between 4 and 9\n");
815 amdgpu_vm_fragment_size = -1;
816 }
817
06ec9070 818 amdgpu_device_check_vm_size(adev);
d38ceaf9 819
06ec9070 820 amdgpu_device_check_block_size(adev);
6a7f76e7 821
526bae37 822 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 823 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
824 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
825 amdgpu_vram_page_split);
826 amdgpu_vram_page_split = 1024;
827 }
8854695a
AG
828
829 if (amdgpu_lockup_timeout == 0) {
830 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
831 amdgpu_lockup_timeout = 10000;
832 }
d38ceaf9
AD
833}
834
835/**
836 * amdgpu_switcheroo_set_state - set switcheroo state
837 *
838 * @pdev: pci dev pointer
1694467b 839 * @state: vga_switcheroo state
d38ceaf9
AD
840 *
841 * Callback for the switcheroo driver. Suspends or resumes the
842 * the asics before or after it is powered up using ACPI methods.
843 */
844static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
845{
846 struct drm_device *dev = pci_get_drvdata(pdev);
847
848 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
849 return;
850
851 if (state == VGA_SWITCHEROO_ON) {
7ca85295 852 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
853 /* don't suspend or resume card normally */
854 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
855
810ddc3a 856 amdgpu_device_resume(dev, true, true);
d38ceaf9 857
d38ceaf9
AD
858 dev->switch_power_state = DRM_SWITCH_POWER_ON;
859 drm_kms_helper_poll_enable(dev);
860 } else {
7ca85295 861 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
862 drm_kms_helper_poll_disable(dev);
863 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 864 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
865 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
866 }
867}
868
869/**
870 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
871 *
872 * @pdev: pci dev pointer
873 *
874 * Callback for the switcheroo driver. Check of the switcheroo
875 * state can be changed.
876 * Returns true if the state can be changed, false if not.
877 */
878static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
879{
880 struct drm_device *dev = pci_get_drvdata(pdev);
881
882 /*
883 * FIXME: open_count is protected by drm_global_mutex but that would lead to
884 * locking inversion with the driver load path. And the access here is
885 * completely racy anyway. So don't bother with locking for now.
886 */
887 return dev->open_count == 0;
888}
889
890static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
891 .set_gpu_state = amdgpu_switcheroo_set_state,
892 .reprobe = NULL,
893 .can_switch = amdgpu_switcheroo_can_switch,
894};
895
2990a1fc
AD
896int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
897 enum amd_ip_block_type block_type,
898 enum amd_clockgating_state state)
d38ceaf9
AD
899{
900 int i, r = 0;
901
902 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 903 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 904 continue;
c722865a
RZ
905 if (adev->ip_blocks[i].version->type != block_type)
906 continue;
907 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
908 continue;
909 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
910 (void *)adev, state);
911 if (r)
912 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
913 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
914 }
915 return r;
916}
917
2990a1fc
AD
918int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
919 enum amd_ip_block_type block_type,
920 enum amd_powergating_state state)
d38ceaf9
AD
921{
922 int i, r = 0;
923
924 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 925 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 926 continue;
c722865a
RZ
927 if (adev->ip_blocks[i].version->type != block_type)
928 continue;
929 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
930 continue;
931 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
932 (void *)adev, state);
933 if (r)
934 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
935 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
936 }
937 return r;
938}
939
2990a1fc
AD
940void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
941 u32 *flags)
6cb2d4e4
HR
942{
943 int i;
944
945 for (i = 0; i < adev->num_ip_blocks; i++) {
946 if (!adev->ip_blocks[i].status.valid)
947 continue;
948 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
949 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
950 }
951}
952
2990a1fc
AD
953int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
954 enum amd_ip_block_type block_type)
5dbbb60b
AD
955{
956 int i, r;
957
958 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 959 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 960 continue;
a1255107
AD
961 if (adev->ip_blocks[i].version->type == block_type) {
962 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
963 if (r)
964 return r;
965 break;
966 }
967 }
968 return 0;
969
970}
971
2990a1fc
AD
972bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
973 enum amd_ip_block_type block_type)
5dbbb60b
AD
974{
975 int i;
976
977 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 978 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 979 continue;
a1255107
AD
980 if (adev->ip_blocks[i].version->type == block_type)
981 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
982 }
983 return true;
984
985}
986
2990a1fc
AD
987struct amdgpu_ip_block *
988amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
989 enum amd_ip_block_type type)
d38ceaf9
AD
990{
991 int i;
992
993 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 994 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
995 return &adev->ip_blocks[i];
996
997 return NULL;
998}
999
1000/**
2990a1fc 1001 * amdgpu_device_ip_block_version_cmp
d38ceaf9
AD
1002 *
1003 * @adev: amdgpu_device pointer
5fc3aeeb 1004 * @type: enum amd_ip_block_type
d38ceaf9
AD
1005 * @major: major version
1006 * @minor: minor version
1007 *
1008 * return 0 if equal or greater
1009 * return 1 if smaller or the ip_block doesn't exist
1010 */
2990a1fc
AD
1011int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1012 enum amd_ip_block_type type,
1013 u32 major, u32 minor)
d38ceaf9 1014{
2990a1fc 1015 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
d38ceaf9 1016
a1255107
AD
1017 if (ip_block && ((ip_block->version->major > major) ||
1018 ((ip_block->version->major == major) &&
1019 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1020 return 0;
1021
1022 return 1;
1023}
1024
a1255107 1025/**
2990a1fc 1026 * amdgpu_device_ip_block_add
a1255107
AD
1027 *
1028 * @adev: amdgpu_device pointer
1029 * @ip_block_version: pointer to the IP to add
1030 *
1031 * Adds the IP block driver information to the collection of IPs
1032 * on the asic.
1033 */
2990a1fc
AD
1034int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1035 const struct amdgpu_ip_block_version *ip_block_version)
a1255107
AD
1036{
1037 if (!ip_block_version)
1038 return -EINVAL;
1039
e966a725 1040 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
a0bae357
HR
1041 ip_block_version->funcs->name);
1042
a1255107
AD
1043 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1044
1045 return 0;
1046}
1047
483ef985 1048static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1049{
1050 adev->enable_virtual_display = false;
1051
1052 if (amdgpu_virtual_display) {
1053 struct drm_device *ddev = adev->ddev;
1054 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1055 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1056
1057 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1058 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1059 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1060 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1061 if (!strcmp("all", pciaddname)
1062 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1063 long num_crtc;
1064 int res = -1;
1065
9accf2fd 1066 adev->enable_virtual_display = true;
0f66356d
ED
1067
1068 if (pciaddname_tmp)
1069 res = kstrtol(pciaddname_tmp, 10,
1070 &num_crtc);
1071
1072 if (!res) {
1073 if (num_crtc < 1)
1074 num_crtc = 1;
1075 if (num_crtc > 6)
1076 num_crtc = 6;
1077 adev->mode_info.num_crtc = num_crtc;
1078 } else {
1079 adev->mode_info.num_crtc = 1;
1080 }
9accf2fd
ED
1081 break;
1082 }
1083 }
1084
0f66356d
ED
1085 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1086 amdgpu_virtual_display, pci_address_name,
1087 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1088
1089 kfree(pciaddstr);
1090 }
1091}
1092
e2a75f88
AD
1093static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1094{
e2a75f88
AD
1095 const char *chip_name;
1096 char fw_name[30];
1097 int err;
1098 const struct gpu_info_firmware_header_v1_0 *hdr;
1099
ab4fe3e1
HR
1100 adev->firmware.gpu_info_fw = NULL;
1101
e2a75f88
AD
1102 switch (adev->asic_type) {
1103 case CHIP_TOPAZ:
1104 case CHIP_TONGA:
1105 case CHIP_FIJI:
1106 case CHIP_POLARIS11:
1107 case CHIP_POLARIS10:
1108 case CHIP_POLARIS12:
1109 case CHIP_CARRIZO:
1110 case CHIP_STONEY:
1111#ifdef CONFIG_DRM_AMDGPU_SI
1112 case CHIP_VERDE:
1113 case CHIP_TAHITI:
1114 case CHIP_PITCAIRN:
1115 case CHIP_OLAND:
1116 case CHIP_HAINAN:
1117#endif
1118#ifdef CONFIG_DRM_AMDGPU_CIK
1119 case CHIP_BONAIRE:
1120 case CHIP_HAWAII:
1121 case CHIP_KAVERI:
1122 case CHIP_KABINI:
1123 case CHIP_MULLINS:
1124#endif
1125 default:
1126 return 0;
1127 case CHIP_VEGA10:
1128 chip_name = "vega10";
1129 break;
2d2e5e7e
AD
1130 case CHIP_RAVEN:
1131 chip_name = "raven";
1132 break;
e2a75f88
AD
1133 }
1134
1135 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1136 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1137 if (err) {
1138 dev_err(adev->dev,
1139 "Failed to load gpu_info firmware \"%s\"\n",
1140 fw_name);
1141 goto out;
1142 }
ab4fe3e1 1143 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1144 if (err) {
1145 dev_err(adev->dev,
1146 "Failed to validate gpu_info firmware \"%s\"\n",
1147 fw_name);
1148 goto out;
1149 }
1150
ab4fe3e1 1151 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1152 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1153
1154 switch (hdr->version_major) {
1155 case 1:
1156 {
1157 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1158 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1159 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1160
b5ab16bf
AD
1161 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1162 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1163 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1164 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1165 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1166 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1167 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1168 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1169 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1170 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1171 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1172 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1173 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1174 adev->gfx.cu_info.max_waves_per_simd =
1175 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1176 adev->gfx.cu_info.max_scratch_slots_per_cu =
1177 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1178 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1179 break;
1180 }
1181 default:
1182 dev_err(adev->dev,
1183 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1184 err = -EINVAL;
1185 goto out;
1186 }
1187out:
e2a75f88
AD
1188 return err;
1189}
1190
06ec9070 1191static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
d38ceaf9 1192{
aaa36a97 1193 int i, r;
d38ceaf9 1194
483ef985 1195 amdgpu_device_enable_virtual_display(adev);
a6be7570 1196
d38ceaf9 1197 switch (adev->asic_type) {
aaa36a97
AD
1198 case CHIP_TOPAZ:
1199 case CHIP_TONGA:
48299f95 1200 case CHIP_FIJI:
2cc0c0b5
FC
1201 case CHIP_POLARIS11:
1202 case CHIP_POLARIS10:
c4642a47 1203 case CHIP_POLARIS12:
aaa36a97 1204 case CHIP_CARRIZO:
39bb0c92
SL
1205 case CHIP_STONEY:
1206 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1207 adev->family = AMDGPU_FAMILY_CZ;
1208 else
1209 adev->family = AMDGPU_FAMILY_VI;
1210
1211 r = vi_set_ip_blocks(adev);
1212 if (r)
1213 return r;
1214 break;
33f34802
KW
1215#ifdef CONFIG_DRM_AMDGPU_SI
1216 case CHIP_VERDE:
1217 case CHIP_TAHITI:
1218 case CHIP_PITCAIRN:
1219 case CHIP_OLAND:
1220 case CHIP_HAINAN:
295d0daf 1221 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1222 r = si_set_ip_blocks(adev);
1223 if (r)
1224 return r;
1225 break;
1226#endif
a2e73f56
AD
1227#ifdef CONFIG_DRM_AMDGPU_CIK
1228 case CHIP_BONAIRE:
1229 case CHIP_HAWAII:
1230 case CHIP_KAVERI:
1231 case CHIP_KABINI:
1232 case CHIP_MULLINS:
1233 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1234 adev->family = AMDGPU_FAMILY_CI;
1235 else
1236 adev->family = AMDGPU_FAMILY_KV;
1237
1238 r = cik_set_ip_blocks(adev);
1239 if (r)
1240 return r;
1241 break;
1242#endif
2ca8a5d2
CZ
1243 case CHIP_VEGA10:
1244 case CHIP_RAVEN:
1245 if (adev->asic_type == CHIP_RAVEN)
1246 adev->family = AMDGPU_FAMILY_RV;
1247 else
1248 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1249
1250 r = soc15_set_ip_blocks(adev);
1251 if (r)
1252 return r;
1253 break;
d38ceaf9
AD
1254 default:
1255 /* FIXME: not supported yet */
1256 return -EINVAL;
1257 }
1258
e2a75f88
AD
1259 r = amdgpu_device_parse_gpu_info_fw(adev);
1260 if (r)
1261 return r;
1262
1884734a 1263 amdgpu_amdkfd_device_probe(adev);
1264
3149d9da
XY
1265 if (amdgpu_sriov_vf(adev)) {
1266 r = amdgpu_virt_request_full_gpu(adev, true);
1267 if (r)
5ffa61c1 1268 return -EAGAIN;
3149d9da
XY
1269 }
1270
d38ceaf9
AD
1271 for (i = 0; i < adev->num_ip_blocks; i++) {
1272 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1273 DRM_ERROR("disabled ip block: %d <%s>\n",
1274 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1275 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1276 } else {
a1255107
AD
1277 if (adev->ip_blocks[i].version->funcs->early_init) {
1278 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1279 if (r == -ENOENT) {
a1255107 1280 adev->ip_blocks[i].status.valid = false;
2c1a2784 1281 } else if (r) {
a1255107
AD
1282 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1283 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1284 return r;
2c1a2784 1285 } else {
a1255107 1286 adev->ip_blocks[i].status.valid = true;
2c1a2784 1287 }
974e6b64 1288 } else {
a1255107 1289 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1290 }
d38ceaf9
AD
1291 }
1292 }
1293
395d1fb9
NH
1294 adev->cg_flags &= amdgpu_cg_mask;
1295 adev->pg_flags &= amdgpu_pg_mask;
1296
d38ceaf9
AD
1297 return 0;
1298}
1299
06ec9070 1300static int amdgpu_device_ip_init(struct amdgpu_device *adev)
d38ceaf9
AD
1301{
1302 int i, r;
1303
1304 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1305 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1306 continue;
a1255107 1307 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1308 if (r) {
a1255107
AD
1309 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1310 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1311 return r;
2c1a2784 1312 }
a1255107 1313 adev->ip_blocks[i].status.sw = true;
bfca0289 1314
d38ceaf9 1315 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1316 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
06ec9070 1317 r = amdgpu_device_vram_scratch_init(adev);
2c1a2784
AD
1318 if (r) {
1319 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1320 return r;
2c1a2784 1321 }
a1255107 1322 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1323 if (r) {
1324 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1325 return r;
2c1a2784 1326 }
06ec9070 1327 r = amdgpu_device_wb_init(adev);
2c1a2784 1328 if (r) {
06ec9070 1329 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
d38ceaf9 1330 return r;
2c1a2784 1331 }
a1255107 1332 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1333
1334 /* right after GMC hw init, we create CSA */
1335 if (amdgpu_sriov_vf(adev)) {
1336 r = amdgpu_allocate_static_csa(adev);
1337 if (r) {
1338 DRM_ERROR("allocate CSA failed %d\n", r);
1339 return r;
1340 }
1341 }
d38ceaf9
AD
1342 }
1343 }
1344
1345 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1346 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1347 continue;
bfca0289 1348 if (adev->ip_blocks[i].status.hw)
d38ceaf9 1349 continue;
a1255107 1350 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1351 if (r) {
a1255107
AD
1352 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1353 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1354 return r;
2c1a2784 1355 }
a1255107 1356 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1357 }
1358
1884734a 1359 amdgpu_amdkfd_device_init(adev);
c6332b97 1360
1361 if (amdgpu_sriov_vf(adev))
1362 amdgpu_virt_release_full_gpu(adev, true);
1363
d38ceaf9
AD
1364 return 0;
1365}
1366
06ec9070 1367static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
0c49e0b8
CZ
1368{
1369 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1370}
1371
06ec9070 1372static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
0c49e0b8
CZ
1373{
1374 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1375 AMDGPU_RESET_MAGIC_NUM);
1376}
1377
06ec9070 1378static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
d38ceaf9
AD
1379{
1380 int i = 0, r;
1381
4a2ba394
SL
1382 if (amdgpu_emu_mode == 1)
1383 return 0;
1384
d38ceaf9 1385 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1386 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1387 continue;
4a446d55 1388 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1389 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1390 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1391 /* enable clockgating to save power */
a1255107
AD
1392 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1393 AMD_CG_STATE_GATE);
4a446d55
AD
1394 if (r) {
1395 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1396 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1397 return r;
1398 }
b0b00ff1 1399 }
d38ceaf9 1400 }
2dc80b00
S
1401 return 0;
1402}
1403
06ec9070 1404static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2dc80b00
S
1405{
1406 int i = 0, r;
1407
1408 for (i = 0; i < adev->num_ip_blocks; i++) {
1409 if (!adev->ip_blocks[i].status.valid)
1410 continue;
1411 if (adev->ip_blocks[i].version->funcs->late_init) {
1412 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1413 if (r) {
1414 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1415 adev->ip_blocks[i].version->funcs->name, r);
1416 return r;
1417 }
1418 adev->ip_blocks[i].status.late_initialized = true;
1419 }
1420 }
1421
1422 mod_delayed_work(system_wq, &adev->late_init_work,
1423 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1424
06ec9070 1425 amdgpu_device_fill_reset_magic(adev);
d38ceaf9
AD
1426
1427 return 0;
1428}
1429
06ec9070 1430static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
d38ceaf9
AD
1431{
1432 int i, r;
1433
1884734a 1434 amdgpu_amdkfd_device_fini(adev);
3e96dbfd
AD
1435 /* need to disable SMC first */
1436 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1437 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1438 continue;
a1255107 1439 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1440 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1441 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1442 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1443 if (r) {
1444 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1445 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1446 return r;
1447 }
a1255107 1448 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1449 /* XXX handle errors */
1450 if (r) {
1451 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1452 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1453 }
a1255107 1454 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1455 break;
1456 }
1457 }
1458
d38ceaf9 1459 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1460 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1461 continue;
a1255107 1462 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
84e5b516 1463 amdgpu_free_static_csa(adev);
06ec9070
AD
1464 amdgpu_device_wb_fini(adev);
1465 amdgpu_device_vram_scratch_fini(adev);
d38ceaf9 1466 }
8201a67a
RZ
1467
1468 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1469 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1470 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1471 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1472 AMD_CG_STATE_UNGATE);
1473 if (r) {
1474 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1475 adev->ip_blocks[i].version->funcs->name, r);
1476 return r;
1477 }
2c1a2784 1478 }
8201a67a 1479
a1255107 1480 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1481 /* XXX handle errors */
2c1a2784 1482 if (r) {
a1255107
AD
1483 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1484 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1485 }
8201a67a 1486
a1255107 1487 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1488 }
1489
9950cda2
AD
1490 /* disable all interrupts */
1491 amdgpu_irq_disable_all(adev);
1492
d38ceaf9 1493 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1494 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1495 continue;
a1255107 1496 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1497 /* XXX handle errors */
2c1a2784 1498 if (r) {
a1255107
AD
1499 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1500 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1501 }
a1255107
AD
1502 adev->ip_blocks[i].status.sw = false;
1503 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1504 }
1505
a6dcfd9c 1506 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1507 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1508 continue;
a1255107
AD
1509 if (adev->ip_blocks[i].version->funcs->late_fini)
1510 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1511 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1512 }
1513
030308fc 1514 if (amdgpu_sriov_vf(adev))
24136135
ML
1515 if (amdgpu_virt_release_full_gpu(adev, false))
1516 DRM_ERROR("failed to release exclusive mode on fini\n");
2493664f 1517
d38ceaf9
AD
1518 return 0;
1519}
1520
06ec9070 1521static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
2dc80b00
S
1522{
1523 struct amdgpu_device *adev =
1524 container_of(work, struct amdgpu_device, late_init_work.work);
06ec9070 1525 amdgpu_device_ip_late_set_cg_state(adev);
2dc80b00
S
1526}
1527
cdd61df6 1528int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1529{
1530 int i, r;
1531
e941ea99
XY
1532 if (amdgpu_sriov_vf(adev))
1533 amdgpu_virt_request_full_gpu(adev, false);
1534
c5a93a28 1535 /* ungate SMC block first */
2990a1fc
AD
1536 r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1537 AMD_CG_STATE_UNGATE);
c5a93a28 1538 if (r) {
2990a1fc 1539 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
c5a93a28
FC
1540 }
1541
d38ceaf9 1542 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1543 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1544 continue;
1545 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1546 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1547 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1548 AMD_CG_STATE_UNGATE);
c5a93a28 1549 if (r) {
a1255107
AD
1550 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1551 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1552 }
2c1a2784 1553 }
d38ceaf9 1554 /* XXX handle errors */
a1255107 1555 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1556 /* XXX handle errors */
2c1a2784 1557 if (r) {
a1255107
AD
1558 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1559 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1560 }
d38ceaf9
AD
1561 }
1562
e941ea99
XY
1563 if (amdgpu_sriov_vf(adev))
1564 amdgpu_virt_release_full_gpu(adev, false);
1565
d38ceaf9
AD
1566 return 0;
1567}
1568
06ec9070 1569static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1570{
1571 int i, r;
1572
2cb681b6
ML
1573 static enum amd_ip_block_type ip_order[] = {
1574 AMD_IP_BLOCK_TYPE_GMC,
1575 AMD_IP_BLOCK_TYPE_COMMON,
2cb681b6
ML
1576 AMD_IP_BLOCK_TYPE_IH,
1577 };
a90ad3c2 1578
2cb681b6
ML
1579 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1580 int j;
1581 struct amdgpu_ip_block *block;
a90ad3c2 1582
2cb681b6
ML
1583 for (j = 0; j < adev->num_ip_blocks; j++) {
1584 block = &adev->ip_blocks[j];
1585
1586 if (block->version->type != ip_order[i] ||
1587 !block->status.valid)
1588 continue;
1589
1590 r = block->version->funcs->hw_init(adev);
1591 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1592 }
1593 }
1594
1595 return 0;
1596}
1597
06ec9070 1598static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1599{
1600 int i, r;
1601
2cb681b6
ML
1602 static enum amd_ip_block_type ip_order[] = {
1603 AMD_IP_BLOCK_TYPE_SMC,
ef4c166d 1604 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
1605 AMD_IP_BLOCK_TYPE_DCE,
1606 AMD_IP_BLOCK_TYPE_GFX,
1607 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
1608 AMD_IP_BLOCK_TYPE_UVD,
1609 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 1610 };
a90ad3c2 1611
2cb681b6
ML
1612 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1613 int j;
1614 struct amdgpu_ip_block *block;
a90ad3c2 1615
2cb681b6
ML
1616 for (j = 0; j < adev->num_ip_blocks; j++) {
1617 block = &adev->ip_blocks[j];
1618
1619 if (block->version->type != ip_order[i] ||
1620 !block->status.valid)
1621 continue;
1622
1623 r = block->version->funcs->hw_init(adev);
1624 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1625 }
1626 }
1627
1628 return 0;
1629}
1630
06ec9070 1631static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
1632{
1633 int i, r;
1634
a90ad3c2
ML
1635 for (i = 0; i < adev->num_ip_blocks; i++) {
1636 if (!adev->ip_blocks[i].status.valid)
1637 continue;
a90ad3c2
ML
1638 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1639 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
fcf0649f
CZ
1640 adev->ip_blocks[i].version->type ==
1641 AMD_IP_BLOCK_TYPE_IH) {
1642 r = adev->ip_blocks[i].version->funcs->resume(adev);
1643 if (r) {
1644 DRM_ERROR("resume of IP block <%s> failed %d\n",
1645 adev->ip_blocks[i].version->funcs->name, r);
1646 return r;
1647 }
a90ad3c2
ML
1648 }
1649 }
1650
1651 return 0;
1652}
1653
06ec9070 1654static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
1655{
1656 int i, r;
1657
1658 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1659 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1660 continue;
fcf0649f
CZ
1661 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1662 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1663 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1664 continue;
a1255107 1665 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 1666 if (r) {
a1255107
AD
1667 DRM_ERROR("resume of IP block <%s> failed %d\n",
1668 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1669 return r;
2c1a2784 1670 }
d38ceaf9
AD
1671 }
1672
1673 return 0;
1674}
1675
06ec9070 1676static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
fcf0649f
CZ
1677{
1678 int r;
1679
06ec9070 1680 r = amdgpu_device_ip_resume_phase1(adev);
fcf0649f
CZ
1681 if (r)
1682 return r;
06ec9070 1683 r = amdgpu_device_ip_resume_phase2(adev);
fcf0649f
CZ
1684
1685 return r;
1686}
1687
4e99a44e 1688static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 1689{
6867e1b5
ML
1690 if (amdgpu_sriov_vf(adev)) {
1691 if (adev->is_atom_fw) {
1692 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1693 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1694 } else {
1695 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1696 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1697 }
1698
1699 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
1700 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 1701 }
048765ad
AR
1702}
1703
4562236b
HW
1704bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
1705{
1706 switch (asic_type) {
1707#if defined(CONFIG_DRM_AMD_DC)
1708 case CHIP_BONAIRE:
1709 case CHIP_HAWAII:
0d6fbccb 1710 case CHIP_KAVERI:
367e6687
AD
1711 case CHIP_KABINI:
1712 case CHIP_MULLINS:
4562236b
HW
1713 case CHIP_CARRIZO:
1714 case CHIP_STONEY:
1715 case CHIP_POLARIS11:
1716 case CHIP_POLARIS10:
2c8ad2d5 1717 case CHIP_POLARIS12:
4562236b
HW
1718 case CHIP_TONGA:
1719 case CHIP_FIJI:
1720#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
1721 return amdgpu_dc != 0;
4562236b 1722#endif
42f8ffa1
HW
1723 case CHIP_VEGA10:
1724#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 1725 case CHIP_RAVEN:
42f8ffa1 1726#endif
fd187853 1727 return amdgpu_dc != 0;
4562236b
HW
1728#endif
1729 default:
1730 return false;
1731 }
1732}
1733
1734/**
1735 * amdgpu_device_has_dc_support - check if dc is supported
1736 *
1737 * @adev: amdgpu_device_pointer
1738 *
1739 * Returns true for supported, false for not supported
1740 */
1741bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
1742{
2555039d
XY
1743 if (amdgpu_sriov_vf(adev))
1744 return false;
1745
4562236b
HW
1746 return amdgpu_device_asic_has_dc_support(adev->asic_type);
1747}
1748
d38ceaf9
AD
1749/**
1750 * amdgpu_device_init - initialize the driver
1751 *
1752 * @adev: amdgpu_device pointer
1753 * @pdev: drm dev pointer
1754 * @pdev: pci dev pointer
1755 * @flags: driver flags
1756 *
1757 * Initializes the driver info and hw (all asics).
1758 * Returns 0 for success or an error on failure.
1759 * Called at driver startup.
1760 */
1761int amdgpu_device_init(struct amdgpu_device *adev,
1762 struct drm_device *ddev,
1763 struct pci_dev *pdev,
1764 uint32_t flags)
1765{
1766 int r, i;
1767 bool runtime = false;
95844d20 1768 u32 max_MBps;
d38ceaf9
AD
1769
1770 adev->shutdown = false;
1771 adev->dev = &pdev->dev;
1772 adev->ddev = ddev;
1773 adev->pdev = pdev;
1774 adev->flags = flags;
2f7d10b3 1775 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 1776 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
593aa2d2
SL
1777 if (amdgpu_emu_mode == 1)
1778 adev->usec_timeout *= 2;
770d13b1 1779 adev->gmc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
1780 adev->accel_working = false;
1781 adev->num_rings = 0;
1782 adev->mman.buffer_funcs = NULL;
1783 adev->mman.buffer_funcs_ring = NULL;
1784 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 1785 adev->vm_manager.vm_pte_num_rings = 0;
132f34e4 1786 adev->gmc.gmc_funcs = NULL;
f54d1867 1787 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 1788 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
1789
1790 adev->smc_rreg = &amdgpu_invalid_rreg;
1791 adev->smc_wreg = &amdgpu_invalid_wreg;
1792 adev->pcie_rreg = &amdgpu_invalid_rreg;
1793 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
1794 adev->pciep_rreg = &amdgpu_invalid_rreg;
1795 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1796 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1797 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1798 adev->didt_rreg = &amdgpu_invalid_rreg;
1799 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
1800 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1801 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1802 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1803 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1804
3e39ab90
AD
1805 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1806 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1807 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
1808
1809 /* mutex initialization are all done here so we
1810 * can recall function without having locking issues */
d38ceaf9 1811 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 1812 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
1813 mutex_init(&adev->pm.mutex);
1814 mutex_init(&adev->gfx.gpu_clock_mutex);
1815 mutex_init(&adev->srbm_mutex);
b8866c26 1816 mutex_init(&adev->gfx.pipe_reserve_mutex);
d38ceaf9 1817 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 1818 mutex_init(&adev->mn_lock);
e23b74aa 1819 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 1820 hash_init(adev->mn_hash);
13a752e3 1821 mutex_init(&adev->lock_reset);
d38ceaf9 1822
06ec9070 1823 amdgpu_device_check_arguments(adev);
d38ceaf9 1824
d38ceaf9
AD
1825 spin_lock_init(&adev->mmio_idx_lock);
1826 spin_lock_init(&adev->smc_idx_lock);
1827 spin_lock_init(&adev->pcie_idx_lock);
1828 spin_lock_init(&adev->uvd_ctx_idx_lock);
1829 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 1830 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 1831 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 1832 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 1833 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 1834
0c4e7fa5
CZ
1835 INIT_LIST_HEAD(&adev->shadow_list);
1836 mutex_init(&adev->shadow_list_lock);
1837
795f2813
AR
1838 INIT_LIST_HEAD(&adev->ring_lru_list);
1839 spin_lock_init(&adev->ring_lru_list_lock);
1840
06ec9070
AD
1841 INIT_DELAYED_WORK(&adev->late_init_work,
1842 amdgpu_device_ip_late_init_func_handler);
2dc80b00 1843
0fa49558
AX
1844 /* Registers mapping */
1845 /* TODO: block userspace mapping of io register */
da69c161
KW
1846 if (adev->asic_type >= CHIP_BONAIRE) {
1847 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1848 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1849 } else {
1850 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1851 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1852 }
d38ceaf9 1853
d38ceaf9
AD
1854 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1855 if (adev->rmmio == NULL) {
1856 return -ENOMEM;
1857 }
1858 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1859 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1860
705e519e 1861 /* doorbell bar mapping */
06ec9070 1862 amdgpu_device_doorbell_init(adev);
d38ceaf9
AD
1863
1864 /* io port mapping */
1865 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1866 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1867 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1868 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1869 break;
1870 }
1871 }
1872 if (adev->rio_mem == NULL)
b64a18c5 1873 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
1874
1875 /* early init functions */
06ec9070 1876 r = amdgpu_device_ip_early_init(adev);
d38ceaf9
AD
1877 if (r)
1878 return r;
1879
1880 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1881 /* this will fail for cards that aren't VGA class devices, just
1882 * ignore it */
06ec9070 1883 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
d38ceaf9 1884
e9bef455 1885 if (amdgpu_device_is_px(ddev))
d38ceaf9 1886 runtime = true;
84c8b22e
LW
1887 if (!pci_is_thunderbolt_attached(adev->pdev))
1888 vga_switcheroo_register_client(adev->pdev,
1889 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
1890 if (runtime)
1891 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1892
9475a943
SL
1893 if (amdgpu_emu_mode == 1) {
1894 /* post the asic on emulation mode */
1895 emu_soc_asic_init(adev);
bfca0289 1896 goto fence_driver_init;
9475a943 1897 }
bfca0289 1898
d38ceaf9 1899 /* Read BIOS */
83ba126a
AD
1900 if (!amdgpu_get_bios(adev)) {
1901 r = -EINVAL;
1902 goto failed;
1903 }
f7e9e9fe 1904
d38ceaf9 1905 r = amdgpu_atombios_init(adev);
2c1a2784
AD
1906 if (r) {
1907 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 1908 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 1909 goto failed;
2c1a2784 1910 }
d38ceaf9 1911
4e99a44e
ML
1912 /* detect if we are with an SRIOV vbios */
1913 amdgpu_device_detect_sriov_bios(adev);
048765ad 1914
d38ceaf9 1915 /* Post card if necessary */
39c640c0 1916 if (amdgpu_device_need_post(adev)) {
d38ceaf9 1917 if (!adev->bios) {
bec86378 1918 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
1919 r = -EINVAL;
1920 goto failed;
d38ceaf9 1921 }
bec86378 1922 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
1923 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1924 if (r) {
1925 dev_err(adev->dev, "gpu post error!\n");
1926 goto failed;
1927 }
d38ceaf9
AD
1928 }
1929
88b64e95
AD
1930 if (adev->is_atom_fw) {
1931 /* Initialize clocks */
1932 r = amdgpu_atomfirmware_get_clock_info(adev);
1933 if (r) {
1934 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 1935 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
1936 goto failed;
1937 }
1938 } else {
a5bde2f9
AD
1939 /* Initialize clocks */
1940 r = amdgpu_atombios_get_clock_info(adev);
1941 if (r) {
1942 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 1943 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 1944 goto failed;
a5bde2f9
AD
1945 }
1946 /* init i2c buses */
4562236b
HW
1947 if (!amdgpu_device_has_dc_support(adev))
1948 amdgpu_atombios_i2c_init(adev);
2c1a2784 1949 }
d38ceaf9 1950
bfca0289 1951fence_driver_init:
d38ceaf9
AD
1952 /* Fence driver */
1953 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
1954 if (r) {
1955 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 1956 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 1957 goto failed;
2c1a2784 1958 }
d38ceaf9
AD
1959
1960 /* init the mode config */
1961 drm_mode_config_init(adev->ddev);
1962
06ec9070 1963 r = amdgpu_device_ip_init(adev);
d38ceaf9 1964 if (r) {
8840a387 1965 /* failed in exclusive mode due to timeout */
1966 if (amdgpu_sriov_vf(adev) &&
1967 !amdgpu_sriov_runtime(adev) &&
1968 amdgpu_virt_mmio_blocked(adev) &&
1969 !amdgpu_virt_wait_reset(adev)) {
1970 dev_err(adev->dev, "VF exclusive mode timeout\n");
1daee8b4
PD
1971 /* Don't send request since VF is inactive. */
1972 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
1973 adev->virt.ops = NULL;
8840a387 1974 r = -EAGAIN;
1975 goto failed;
1976 }
06ec9070 1977 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
e23b74aa 1978 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
06ec9070 1979 amdgpu_device_ip_fini(adev);
83ba126a 1980 goto failed;
d38ceaf9
AD
1981 }
1982
1983 adev->accel_working = true;
1984
e59c0205
AX
1985 amdgpu_vm_check_compute_bug(adev);
1986
95844d20
MO
1987 /* Initialize the buffer migration limit. */
1988 if (amdgpu_moverate >= 0)
1989 max_MBps = amdgpu_moverate;
1990 else
1991 max_MBps = 8; /* Allow 8 MB/s. */
1992 /* Get a log2 for easy divisions. */
1993 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
1994
d38ceaf9
AD
1995 r = amdgpu_ib_pool_init(adev);
1996 if (r) {
1997 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 1998 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 1999 goto failed;
d38ceaf9
AD
2000 }
2001
2002 r = amdgpu_ib_ring_tests(adev);
2003 if (r)
2004 DRM_ERROR("ib ring test failed (%d).\n", r);
2005
2dc8f81e
HC
2006 if (amdgpu_sriov_vf(adev))
2007 amdgpu_virt_init_data_exchange(adev);
2008
9bc92b9c
ML
2009 amdgpu_fbdev_init(adev);
2010
d2f52ac8
RZ
2011 r = amdgpu_pm_sysfs_init(adev);
2012 if (r)
2013 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2014
75758255 2015 r = amdgpu_debugfs_gem_init(adev);
3f14e623 2016 if (r)
d38ceaf9 2017 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2018
2019 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2020 if (r)
d38ceaf9 2021 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2022
50ab2533 2023 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2024 if (r)
50ab2533 2025 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2026
763efb6c 2027 r = amdgpu_debugfs_init(adev);
db95e218 2028 if (r)
763efb6c 2029 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
db95e218 2030
d38ceaf9
AD
2031 if ((amdgpu_testing & 1)) {
2032 if (adev->accel_working)
2033 amdgpu_test_moves(adev);
2034 else
2035 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2036 }
d38ceaf9
AD
2037 if (amdgpu_benchmarking) {
2038 if (adev->accel_working)
2039 amdgpu_benchmark(adev, amdgpu_benchmarking);
2040 else
2041 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2042 }
2043
2044 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2045 * explicit gating rather than handling it automatically.
2046 */
06ec9070 2047 r = amdgpu_device_ip_late_init(adev);
2c1a2784 2048 if (r) {
06ec9070 2049 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
e23b74aa 2050 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2051 goto failed;
2c1a2784 2052 }
d38ceaf9
AD
2053
2054 return 0;
83ba126a
AD
2055
2056failed:
89041940 2057 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2058 if (runtime)
2059 vga_switcheroo_fini_domain_pm_ops(adev->dev);
8840a387 2060
83ba126a 2061 return r;
d38ceaf9
AD
2062}
2063
d38ceaf9
AD
2064/**
2065 * amdgpu_device_fini - tear down the driver
2066 *
2067 * @adev: amdgpu_device pointer
2068 *
2069 * Tear down the driver info (all asics).
2070 * Called at driver shutdown.
2071 */
2072void amdgpu_device_fini(struct amdgpu_device *adev)
2073{
2074 int r;
2075
2076 DRM_INFO("amdgpu: finishing device.\n");
2077 adev->shutdown = true;
db2c2a97
PD
2078 if (adev->mode_info.mode_config_initialized)
2079 drm_crtc_force_disable_all(adev->ddev);
b9141cd3 2080
d38ceaf9
AD
2081 amdgpu_ib_pool_fini(adev);
2082 amdgpu_fence_driver_fini(adev);
2083 amdgpu_fbdev_fini(adev);
06ec9070 2084 r = amdgpu_device_ip_fini(adev);
ab4fe3e1
HR
2085 if (adev->firmware.gpu_info_fw) {
2086 release_firmware(adev->firmware.gpu_info_fw);
2087 adev->firmware.gpu_info_fw = NULL;
2088 }
d38ceaf9 2089 adev->accel_working = false;
2dc80b00 2090 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2091 /* free i2c buses */
4562236b
HW
2092 if (!amdgpu_device_has_dc_support(adev))
2093 amdgpu_i2c_fini(adev);
bfca0289
SL
2094
2095 if (amdgpu_emu_mode != 1)
2096 amdgpu_atombios_fini(adev);
2097
d38ceaf9
AD
2098 kfree(adev->bios);
2099 adev->bios = NULL;
84c8b22e
LW
2100 if (!pci_is_thunderbolt_attached(adev->pdev))
2101 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2102 if (adev->flags & AMD_IS_PX)
2103 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2104 vga_client_register(adev->pdev, NULL, NULL, NULL);
2105 if (adev->rio_mem)
2106 pci_iounmap(adev->pdev, adev->rio_mem);
2107 adev->rio_mem = NULL;
2108 iounmap(adev->rmmio);
2109 adev->rmmio = NULL;
06ec9070 2110 amdgpu_device_doorbell_fini(adev);
d2f52ac8 2111 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2112 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2113}
2114
2115
2116/*
2117 * Suspend & resume.
2118 */
2119/**
810ddc3a 2120 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2121 *
2122 * @pdev: drm dev pointer
2123 * @state: suspend state
2124 *
2125 * Puts the hw in the suspend state (all asics).
2126 * Returns 0 for success or an error on failure.
2127 * Called at driver suspend.
2128 */
810ddc3a 2129int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2130{
2131 struct amdgpu_device *adev;
2132 struct drm_crtc *crtc;
2133 struct drm_connector *connector;
5ceb54c6 2134 int r;
d38ceaf9
AD
2135
2136 if (dev == NULL || dev->dev_private == NULL) {
2137 return -ENODEV;
2138 }
2139
2140 adev = dev->dev_private;
2141
2142 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2143 return 0;
2144
2145 drm_kms_helper_poll_disable(dev);
2146
4562236b
HW
2147 if (!amdgpu_device_has_dc_support(adev)) {
2148 /* turn off display hw */
2149 drm_modeset_lock_all(dev);
2150 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2151 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2152 }
2153 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2154 }
2155
ba997709
YZ
2156 amdgpu_amdkfd_suspend(adev);
2157
756e6880 2158 /* unpin the front buffers and cursors */
d38ceaf9 2159 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2160 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2161 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2162 struct amdgpu_bo *robj;
2163
756e6880
AD
2164 if (amdgpu_crtc->cursor_bo) {
2165 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2166 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2167 if (r == 0) {
2168 amdgpu_bo_unpin(aobj);
2169 amdgpu_bo_unreserve(aobj);
2170 }
2171 }
2172
d38ceaf9
AD
2173 if (rfb == NULL || rfb->obj == NULL) {
2174 continue;
2175 }
2176 robj = gem_to_amdgpu_bo(rfb->obj);
2177 /* don't unpin kernel fb objects */
2178 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2179 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2180 if (r == 0) {
2181 amdgpu_bo_unpin(robj);
2182 amdgpu_bo_unreserve(robj);
2183 }
2184 }
2185 }
2186 /* evict vram memory */
2187 amdgpu_bo_evict_vram(adev);
2188
5ceb54c6 2189 amdgpu_fence_driver_suspend(adev);
d38ceaf9 2190
cdd61df6 2191 r = amdgpu_device_ip_suspend(adev);
d38ceaf9 2192
a0a71e49
AD
2193 /* evict remaining vram memory
2194 * This second call to evict vram is to evict the gart page table
2195 * using the CPU.
2196 */
d38ceaf9
AD
2197 amdgpu_bo_evict_vram(adev);
2198
2199 pci_save_state(dev->pdev);
2200 if (suspend) {
2201 /* Shut down the device */
2202 pci_disable_device(dev->pdev);
2203 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2204 } else {
2205 r = amdgpu_asic_reset(adev);
2206 if (r)
2207 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2208 }
2209
2210 if (fbcon) {
2211 console_lock();
2212 amdgpu_fbdev_set_suspend(adev, 1);
2213 console_unlock();
2214 }
2215 return 0;
2216}
2217
2218/**
810ddc3a 2219 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2220 *
2221 * @pdev: drm dev pointer
2222 *
2223 * Bring the hw back to operating state (all asics).
2224 * Returns 0 for success or an error on failure.
2225 * Called at driver resume.
2226 */
810ddc3a 2227int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2228{
2229 struct drm_connector *connector;
2230 struct amdgpu_device *adev = dev->dev_private;
756e6880 2231 struct drm_crtc *crtc;
03161a6e 2232 int r = 0;
d38ceaf9
AD
2233
2234 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2235 return 0;
2236
74b0b157 2237 if (fbcon)
d38ceaf9 2238 console_lock();
74b0b157 2239
d38ceaf9
AD
2240 if (resume) {
2241 pci_set_power_state(dev->pdev, PCI_D0);
2242 pci_restore_state(dev->pdev);
74b0b157 2243 r = pci_enable_device(dev->pdev);
03161a6e
HR
2244 if (r)
2245 goto unlock;
d38ceaf9
AD
2246 }
2247
2248 /* post card */
39c640c0 2249 if (amdgpu_device_need_post(adev)) {
74b0b157 2250 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2251 if (r)
2252 DRM_ERROR("amdgpu asic init failed\n");
2253 }
d38ceaf9 2254
06ec9070 2255 r = amdgpu_device_ip_resume(adev);
e6707218 2256 if (r) {
06ec9070 2257 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
03161a6e 2258 goto unlock;
e6707218 2259 }
5ceb54c6
AD
2260 amdgpu_fence_driver_resume(adev);
2261
ca198528
FC
2262 if (resume) {
2263 r = amdgpu_ib_ring_tests(adev);
2264 if (r)
2265 DRM_ERROR("ib ring test failed (%d).\n", r);
2266 }
d38ceaf9 2267
06ec9070 2268 r = amdgpu_device_ip_late_init(adev);
03161a6e
HR
2269 if (r)
2270 goto unlock;
d38ceaf9 2271
756e6880
AD
2272 /* pin cursors */
2273 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2274 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2275
2276 if (amdgpu_crtc->cursor_bo) {
2277 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2278 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2279 if (r == 0) {
2280 r = amdgpu_bo_pin(aobj,
2281 AMDGPU_GEM_DOMAIN_VRAM,
2282 &amdgpu_crtc->cursor_addr);
2283 if (r != 0)
2284 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2285 amdgpu_bo_unreserve(aobj);
2286 }
2287 }
2288 }
ba997709
YZ
2289 r = amdgpu_amdkfd_resume(adev);
2290 if (r)
2291 return r;
756e6880 2292
d38ceaf9
AD
2293 /* blat the mode back in */
2294 if (fbcon) {
4562236b
HW
2295 if (!amdgpu_device_has_dc_support(adev)) {
2296 /* pre DCE11 */
2297 drm_helper_resume_force_mode(dev);
2298
2299 /* turn on display hw */
2300 drm_modeset_lock_all(dev);
2301 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2302 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2303 }
2304 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2305 }
2306 }
2307
2308 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2309
2310 /*
2311 * Most of the connector probing functions try to acquire runtime pm
2312 * refs to ensure that the GPU is powered on when connector polling is
2313 * performed. Since we're calling this from a runtime PM callback,
2314 * trying to acquire rpm refs will cause us to deadlock.
2315 *
2316 * Since we're guaranteed to be holding the rpm lock, it's safe to
2317 * temporarily disable the rpm helpers so this doesn't deadlock us.
2318 */
2319#ifdef CONFIG_PM
2320 dev->dev->power.disable_depth++;
2321#endif
4562236b
HW
2322 if (!amdgpu_device_has_dc_support(adev))
2323 drm_helper_hpd_irq_event(dev);
2324 else
2325 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2326#ifdef CONFIG_PM
2327 dev->dev->power.disable_depth--;
2328#endif
d38ceaf9 2329
03161a6e 2330 if (fbcon)
d38ceaf9 2331 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2332
2333unlock:
2334 if (fbcon)
d38ceaf9 2335 console_unlock();
d38ceaf9 2336
03161a6e 2337 return r;
d38ceaf9
AD
2338}
2339
06ec9070 2340static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
63fbf42f
CZ
2341{
2342 int i;
2343 bool asic_hang = false;
2344
f993d628
ML
2345 if (amdgpu_sriov_vf(adev))
2346 return true;
2347
63fbf42f 2348 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2349 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2350 continue;
a1255107
AD
2351 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2352 adev->ip_blocks[i].status.hang =
2353 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2354 if (adev->ip_blocks[i].status.hang) {
2355 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2356 asic_hang = true;
2357 }
2358 }
2359 return asic_hang;
2360}
2361
06ec9070 2362static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2363{
2364 int i, r = 0;
2365
2366 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2367 if (!adev->ip_blocks[i].status.valid)
d31a501e 2368 continue;
a1255107
AD
2369 if (adev->ip_blocks[i].status.hang &&
2370 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2371 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2372 if (r)
2373 return r;
2374 }
2375 }
2376
2377 return 0;
2378}
2379
06ec9070 2380static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
35d782fe 2381{
da146d3b
AD
2382 int i;
2383
2384 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2385 if (!adev->ip_blocks[i].status.valid)
da146d3b 2386 continue;
a1255107
AD
2387 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2388 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2389 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
2390 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2391 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 2392 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2393 DRM_INFO("Some block need full reset!\n");
2394 return true;
2395 }
2396 }
35d782fe
CZ
2397 }
2398 return false;
2399}
2400
06ec9070 2401static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2402{
2403 int i, r = 0;
2404
2405 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2406 if (!adev->ip_blocks[i].status.valid)
35d782fe 2407 continue;
a1255107
AD
2408 if (adev->ip_blocks[i].status.hang &&
2409 adev->ip_blocks[i].version->funcs->soft_reset) {
2410 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2411 if (r)
2412 return r;
2413 }
2414 }
2415
2416 return 0;
2417}
2418
06ec9070 2419static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2420{
2421 int i, r = 0;
2422
2423 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2424 if (!adev->ip_blocks[i].status.valid)
35d782fe 2425 continue;
a1255107
AD
2426 if (adev->ip_blocks[i].status.hang &&
2427 adev->ip_blocks[i].version->funcs->post_soft_reset)
2428 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2429 if (r)
2430 return r;
2431 }
2432
2433 return 0;
2434}
2435
06ec9070
AD
2436static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
2437 struct amdgpu_ring *ring,
2438 struct amdgpu_bo *bo,
2439 struct dma_fence **fence)
53cdccd5
CZ
2440{
2441 uint32_t domain;
2442 int r;
2443
23d2e504
RH
2444 if (!bo->shadow)
2445 return 0;
2446
1d284797 2447 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2448 if (r)
2449 return r;
2450 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2451 /* if bo has been evicted, then no need to recover */
2452 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2453 r = amdgpu_bo_validate(bo->shadow);
2454 if (r) {
2455 DRM_ERROR("bo validate failed!\n");
2456 goto err;
2457 }
2458
23d2e504 2459 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2460 NULL, fence, true);
23d2e504
RH
2461 if (r) {
2462 DRM_ERROR("recover page table failed!\n");
2463 goto err;
2464 }
2465 }
53cdccd5 2466err:
23d2e504
RH
2467 amdgpu_bo_unreserve(bo);
2468 return r;
53cdccd5
CZ
2469}
2470
5740682e 2471/*
06ec9070 2472 * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
a90ad3c2
ML
2473 *
2474 * @adev: amdgpu device pointer
5740682e 2475 * @reset_flags: output param tells caller the reset result
a90ad3c2 2476 *
5740682e
ML
2477 * attempt to do soft-reset or full-reset and reinitialize Asic
2478 * return 0 means successed otherwise failed
2479*/
06ec9070
AD
2480static int amdgpu_device_reset(struct amdgpu_device *adev,
2481 uint64_t* reset_flags)
a90ad3c2 2482{
5740682e
ML
2483 bool need_full_reset, vram_lost = 0;
2484 int r;
a90ad3c2 2485
06ec9070 2486 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
a90ad3c2 2487
5740682e 2488 if (!need_full_reset) {
06ec9070
AD
2489 amdgpu_device_ip_pre_soft_reset(adev);
2490 r = amdgpu_device_ip_soft_reset(adev);
2491 amdgpu_device_ip_post_soft_reset(adev);
2492 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5740682e
ML
2493 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2494 need_full_reset = true;
2495 }
a90ad3c2 2496
5740682e 2497 }
a90ad3c2 2498
5740682e 2499 if (need_full_reset) {
cdd61df6 2500 r = amdgpu_device_ip_suspend(adev);
a90ad3c2 2501
5740682e 2502retry:
5740682e 2503 r = amdgpu_asic_reset(adev);
5740682e
ML
2504 /* post card */
2505 amdgpu_atom_asic_init(adev->mode_info.atom_context);
65781c78 2506
5740682e
ML
2507 if (!r) {
2508 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
06ec9070 2509 r = amdgpu_device_ip_resume_phase1(adev);
5740682e
ML
2510 if (r)
2511 goto out;
65781c78 2512
06ec9070 2513 vram_lost = amdgpu_device_check_vram_lost(adev);
5740682e
ML
2514 if (vram_lost) {
2515 DRM_ERROR("VRAM is lost!\n");
2516 atomic_inc(&adev->vram_lost_counter);
2517 }
2518
c1c7ce8f
CK
2519 r = amdgpu_gtt_mgr_recover(
2520 &adev->mman.bdev.man[TTM_PL_TT]);
5740682e
ML
2521 if (r)
2522 goto out;
2523
06ec9070 2524 r = amdgpu_device_ip_resume_phase2(adev);
5740682e
ML
2525 if (r)
2526 goto out;
2527
2528 if (vram_lost)
06ec9070 2529 amdgpu_device_fill_reset_magic(adev);
65781c78 2530 }
5740682e 2531 }
65781c78 2532
5740682e
ML
2533out:
2534 if (!r) {
2535 amdgpu_irq_gpu_reset_resume_helper(adev);
2536 r = amdgpu_ib_ring_tests(adev);
2537 if (r) {
2538 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
cdd61df6 2539 r = amdgpu_device_ip_suspend(adev);
5740682e
ML
2540 need_full_reset = true;
2541 goto retry;
2542 }
2543 }
65781c78 2544
5740682e
ML
2545 if (reset_flags) {
2546 if (vram_lost)
2547 (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
a90ad3c2 2548
5740682e
ML
2549 if (need_full_reset)
2550 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
65781c78 2551 }
a90ad3c2 2552
5740682e
ML
2553 return r;
2554}
a90ad3c2 2555
5740682e 2556/*
06ec9070 2557 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5740682e
ML
2558 *
2559 * @adev: amdgpu device pointer
2560 * @reset_flags: output param tells caller the reset result
2561 *
2562 * do VF FLR and reinitialize Asic
2563 * return 0 means successed otherwise failed
2564*/
06ec9070
AD
2565static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
2566 uint64_t *reset_flags,
2567 bool from_hypervisor)
5740682e
ML
2568{
2569 int r;
2570
2571 if (from_hypervisor)
2572 r = amdgpu_virt_request_full_gpu(adev, true);
2573 else
2574 r = amdgpu_virt_reset_gpu(adev);
2575 if (r)
2576 return r;
a90ad3c2
ML
2577
2578 /* Resume IP prior to SMC */
06ec9070 2579 r = amdgpu_device_ip_reinit_early_sriov(adev);
5740682e
ML
2580 if (r)
2581 goto error;
a90ad3c2
ML
2582
2583 /* we need recover gart prior to run SMC/CP/SDMA resume */
c1c7ce8f 2584 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
a90ad3c2
ML
2585
2586 /* now we are okay to resume SMC/CP/SDMA */
06ec9070 2587 r = amdgpu_device_ip_reinit_late_sriov(adev);
5740682e
ML
2588 if (r)
2589 goto error;
a90ad3c2
ML
2590
2591 amdgpu_irq_gpu_reset_resume_helper(adev);
5740682e
ML
2592 r = amdgpu_ib_ring_tests(adev);
2593 if (r)
a90ad3c2
ML
2594 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2595
5740682e 2596error:
a90ad3c2
ML
2597 /* release full control of GPU after ib test */
2598 amdgpu_virt_release_full_gpu(adev, true);
2599
5740682e 2600 if (reset_flags) {
75bc6099
ML
2601 if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
2602 (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
2603 atomic_inc(&adev->vram_lost_counter);
2604 }
a90ad3c2 2605
5740682e
ML
2606 /* VF FLR or hotlink reset is always full-reset */
2607 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
a90ad3c2
ML
2608 }
2609
2610 return r;
2611}
2612
d38ceaf9 2613/**
5f152b5e 2614 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
d38ceaf9
AD
2615 *
2616 * @adev: amdgpu device pointer
5740682e 2617 * @job: which job trigger hang
dcebf026 2618 * @force forces reset regardless of amdgpu_gpu_recovery
d38ceaf9 2619 *
5740682e 2620 * Attempt to reset the GPU if it has hung (all asics).
d38ceaf9
AD
2621 * Returns 0 for success or an error on failure.
2622 */
5f152b5e
AD
2623int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
2624 struct amdgpu_job *job, bool force)
d38ceaf9 2625{
4562236b 2626 struct drm_atomic_state *state = NULL;
5740682e
ML
2627 uint64_t reset_flags = 0;
2628 int i, r, resched;
fb140b29 2629
54bc1398 2630 if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
63fbf42f
CZ
2631 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2632 return 0;
2633 }
d38ceaf9 2634
dcebf026
AG
2635 if (!force && (amdgpu_gpu_recovery == 0 ||
2636 (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
2637 DRM_INFO("GPU recovery disabled.\n");
2638 return 0;
2639 }
2640
5740682e
ML
2641 dev_info(adev->dev, "GPU reset begin!\n");
2642
13a752e3 2643 mutex_lock(&adev->lock_reset);
d94aed5a 2644 atomic_inc(&adev->gpu_reset_counter);
13a752e3 2645 adev->in_gpu_reset = 1;
d38ceaf9 2646
a3c47d6b
CZ
2647 /* block TTM */
2648 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
4562236b
HW
2649 /* store modesetting */
2650 if (amdgpu_device_has_dc_support(adev))
2651 state = drm_atomic_helper_suspend(adev->ddev);
a3c47d6b 2652
0875dc9e
CZ
2653 /* block scheduler */
2654 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2655 struct amdgpu_ring *ring = adev->rings[i];
2656
51687759 2657 if (!ring || !ring->sched.thread)
0875dc9e 2658 continue;
5740682e
ML
2659
2660 /* only focus on the ring hit timeout if &job not NULL */
2661 if (job && job->ring->idx != i)
2662 continue;
2663
0875dc9e 2664 kthread_park(ring->sched.thread);
1b1f42d8 2665 drm_sched_hw_job_reset(&ring->sched, &job->base);
5740682e 2666
2f9d4084
ML
2667 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2668 amdgpu_fence_driver_force_completion(ring);
0875dc9e 2669 }
d38ceaf9 2670
5740682e 2671 if (amdgpu_sriov_vf(adev))
06ec9070 2672 r = amdgpu_device_reset_sriov(adev, &reset_flags, job ? false : true);
5740682e 2673 else
06ec9070 2674 r = amdgpu_device_reset(adev, &reset_flags);
35d782fe 2675
d38ceaf9 2676 if (!r) {
5740682e
ML
2677 if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
2678 (reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) {
53cdccd5
CZ
2679 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2680 struct amdgpu_bo *bo, *tmp;
f54d1867 2681 struct dma_fence *fence = NULL, *next = NULL;
53cdccd5
CZ
2682
2683 DRM_INFO("recover vram bo from shadow\n");
2684 mutex_lock(&adev->shadow_list_lock);
2685 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 2686 next = NULL;
06ec9070 2687 amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
53cdccd5 2688 if (fence) {
f54d1867 2689 r = dma_fence_wait(fence, false);
53cdccd5 2690 if (r) {
1d7b17b0 2691 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5
CZ
2692 break;
2693 }
2694 }
1f465087 2695
f54d1867 2696 dma_fence_put(fence);
53cdccd5
CZ
2697 fence = next;
2698 }
2699 mutex_unlock(&adev->shadow_list_lock);
2700 if (fence) {
f54d1867 2701 r = dma_fence_wait(fence, false);
53cdccd5 2702 if (r)
1d7b17b0 2703 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5 2704 }
f54d1867 2705 dma_fence_put(fence);
53cdccd5 2706 }
5740682e 2707
d38ceaf9
AD
2708 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2709 struct amdgpu_ring *ring = adev->rings[i];
51687759
CZ
2710
2711 if (!ring || !ring->sched.thread)
d38ceaf9 2712 continue;
53cdccd5 2713
5740682e
ML
2714 /* only focus on the ring hit timeout if &job not NULL */
2715 if (job && job->ring->idx != i)
2716 continue;
2717
1b1f42d8 2718 drm_sched_job_recovery(&ring->sched);
0875dc9e 2719 kthread_unpark(ring->sched.thread);
d38ceaf9 2720 }
d38ceaf9 2721 } else {
d38ceaf9 2722 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5740682e
ML
2723 struct amdgpu_ring *ring = adev->rings[i];
2724
2725 if (!ring || !ring->sched.thread)
2726 continue;
2727
2728 /* only focus on the ring hit timeout if &job not NULL */
2729 if (job && job->ring->idx != i)
2730 continue;
2731
2732 kthread_unpark(adev->rings[i]->sched.thread);
d38ceaf9
AD
2733 }
2734 }
2735
4562236b 2736 if (amdgpu_device_has_dc_support(adev)) {
5740682e
ML
2737 if (drm_atomic_helper_resume(adev->ddev, state))
2738 dev_info(adev->dev, "drm resume failed:%d\n", r);
5740682e 2739 } else {
4562236b 2740 drm_helper_resume_force_mode(adev->ddev);
5740682e 2741 }
d38ceaf9
AD
2742
2743 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
5740682e 2744
89041940 2745 if (r) {
d38ceaf9 2746 /* bad news, how to tell it to userspace ? */
5740682e
ML
2747 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
2748 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
2749 } else {
2750 dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
89041940 2751 }
d38ceaf9 2752
89041940 2753 amdgpu_vf_error_trans_all(adev);
13a752e3
ML
2754 adev->in_gpu_reset = 0;
2755 mutex_unlock(&adev->lock_reset);
d38ceaf9
AD
2756 return r;
2757}
2758
041d9d93 2759void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
d0dd7f0c
AD
2760{
2761 u32 mask;
2762 int ret;
2763
cd474ba0
AD
2764 if (amdgpu_pcie_gen_cap)
2765 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 2766
cd474ba0
AD
2767 if (amdgpu_pcie_lane_cap)
2768 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 2769
cd474ba0
AD
2770 /* covers APUs as well */
2771 if (pci_is_root_bus(adev->pdev->bus)) {
2772 if (adev->pm.pcie_gen_mask == 0)
2773 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2774 if (adev->pm.pcie_mlw_mask == 0)
2775 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 2776 return;
cd474ba0 2777 }
d0dd7f0c 2778
cd474ba0
AD
2779 if (adev->pm.pcie_gen_mask == 0) {
2780 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2781 if (!ret) {
2782 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2783 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2784 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2785
2786 if (mask & DRM_PCIE_SPEED_25)
2787 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2788 if (mask & DRM_PCIE_SPEED_50)
2789 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2790 if (mask & DRM_PCIE_SPEED_80)
2791 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2792 } else {
2793 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2794 }
2795 }
2796 if (adev->pm.pcie_mlw_mask == 0) {
2797 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2798 if (!ret) {
2799 switch (mask) {
2800 case 32:
2801 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2802 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2803 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2804 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2805 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2806 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2807 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2808 break;
2809 case 16:
2810 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2811 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2812 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2813 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2814 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2815 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2816 break;
2817 case 12:
2818 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2819 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2820 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2821 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2822 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2823 break;
2824 case 8:
2825 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2826 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2827 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2828 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2829 break;
2830 case 4:
2831 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2832 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2833 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2834 break;
2835 case 2:
2836 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2837 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2838 break;
2839 case 1:
2840 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2841 break;
2842 default:
2843 break;
2844 }
2845 } else {
2846 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
2847 }
2848 }
2849}
d38ceaf9 2850