drm/amdgpu: Avoid get vram info from atom bios on emulation mode
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
d38ceaf9
AD
31#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
4562236b 33#include <drm/drm_atomic_helper.h>
d38ceaf9
AD
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
f4b373f4 39#include "amdgpu_trace.h"
d38ceaf9
AD
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
a5bde2f9 43#include "amdgpu_atomfirmware.h"
d0dd7f0c 44#include "amd_pcie.h"
33f34802
KW
45#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
a2e73f56
AD
48#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
aaa36a97 51#include "vi.h"
460826e6 52#include "soc15.h"
d38ceaf9 53#include "bif/bif_4_1_d.h"
9accf2fd 54#include <linux/pci.h>
bec86378 55#include <linux/firmware.h>
89041940 56#include "amdgpu_vf_error.h"
d38ceaf9 57
ba997709 58#include "amdgpu_amdkfd.h"
d2f52ac8 59#include "amdgpu_pm.h"
d38ceaf9 60
e2a75f88 61MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
2d2e5e7e 62MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
e2a75f88 63
2dc80b00
S
64#define AMDGPU_RESUME_MS 2000
65
d38ceaf9 66static const char *amdgpu_asic_name[] = {
da69c161
KW
67 "TAHITI",
68 "PITCAIRN",
69 "VERDE",
70 "OLAND",
71 "HAINAN",
d38ceaf9
AD
72 "BONAIRE",
73 "KAVERI",
74 "KABINI",
75 "HAWAII",
76 "MULLINS",
77 "TOPAZ",
78 "TONGA",
48299f95 79 "FIJI",
d38ceaf9 80 "CARRIZO",
139f4917 81 "STONEY",
2cc0c0b5
FC
82 "POLARIS10",
83 "POLARIS11",
c4642a47 84 "POLARIS12",
d4196f01 85 "VEGA10",
2ca8a5d2 86 "RAVEN",
d38ceaf9
AD
87 "LAST",
88};
89
90bool amdgpu_device_is_px(struct drm_device *dev)
91{
92 struct amdgpu_device *adev = dev->dev_private;
93
2f7d10b3 94 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
95 return true;
96 return false;
97}
98
99/*
100 * MMIO register access helper functions.
101 */
102uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 103 uint32_t acc_flags)
d38ceaf9 104{
f4b373f4
TSD
105 uint32_t ret;
106
43ca8efa 107 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 108 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 109
15d72fd7 110 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 111 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
112 else {
113 unsigned long flags;
d38ceaf9
AD
114
115 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
116 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
117 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
118 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 119 }
f4b373f4
TSD
120 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
121 return ret;
d38ceaf9
AD
122}
123
124void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 125 uint32_t acc_flags)
d38ceaf9 126{
f4b373f4 127 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 128
47ed4e1c
KW
129 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
130 adev->last_mm_index = v;
131 }
132
43ca8efa 133 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 134 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 135
15d72fd7 136 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
137 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
138 else {
139 unsigned long flags;
140
141 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
142 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
143 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
144 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
145 }
47ed4e1c
KW
146
147 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
148 udelay(500);
149 }
d38ceaf9
AD
150}
151
152u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
153{
154 if ((reg * 4) < adev->rio_mem_size)
155 return ioread32(adev->rio_mem + (reg * 4));
156 else {
157 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
158 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
159 }
160}
161
162void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
163{
47ed4e1c
KW
164 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
165 adev->last_mm_index = v;
166 }
d38ceaf9
AD
167
168 if ((reg * 4) < adev->rio_mem_size)
169 iowrite32(v, adev->rio_mem + (reg * 4));
170 else {
171 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
172 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
173 }
47ed4e1c
KW
174
175 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
176 udelay(500);
177 }
d38ceaf9
AD
178}
179
180/**
181 * amdgpu_mm_rdoorbell - read a doorbell dword
182 *
183 * @adev: amdgpu_device pointer
184 * @index: doorbell index
185 *
186 * Returns the value in the doorbell aperture at the
187 * requested doorbell index (CIK).
188 */
189u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
190{
191 if (index < adev->doorbell.num_doorbells) {
192 return readl(adev->doorbell.ptr + index);
193 } else {
194 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
195 return 0;
196 }
197}
198
199/**
200 * amdgpu_mm_wdoorbell - write a doorbell dword
201 *
202 * @adev: amdgpu_device pointer
203 * @index: doorbell index
204 * @v: value to write
205 *
206 * Writes @v to the doorbell aperture at the
207 * requested doorbell index (CIK).
208 */
209void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
210{
211 if (index < adev->doorbell.num_doorbells) {
212 writel(v, adev->doorbell.ptr + index);
213 } else {
214 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
215 }
216}
217
832be404
KW
218/**
219 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
220 *
221 * @adev: amdgpu_device pointer
222 * @index: doorbell index
223 *
224 * Returns the value in the doorbell aperture at the
225 * requested doorbell index (VEGA10+).
226 */
227u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
228{
229 if (index < adev->doorbell.num_doorbells) {
230 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
231 } else {
232 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
233 return 0;
234 }
235}
236
237/**
238 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
239 *
240 * @adev: amdgpu_device pointer
241 * @index: doorbell index
242 * @v: value to write
243 *
244 * Writes @v to the doorbell aperture at the
245 * requested doorbell index (VEGA10+).
246 */
247void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
248{
249 if (index < adev->doorbell.num_doorbells) {
250 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
251 } else {
252 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
253 }
254}
255
d38ceaf9
AD
256/**
257 * amdgpu_invalid_rreg - dummy reg read function
258 *
259 * @adev: amdgpu device pointer
260 * @reg: offset of register
261 *
262 * Dummy register read function. Used for register blocks
263 * that certain asics don't have (all asics).
264 * Returns the value in the register.
265 */
266static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
267{
268 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
269 BUG();
270 return 0;
271}
272
273/**
274 * amdgpu_invalid_wreg - dummy reg write function
275 *
276 * @adev: amdgpu device pointer
277 * @reg: offset of register
278 * @v: value to write to the register
279 *
280 * Dummy register read function. Used for register blocks
281 * that certain asics don't have (all asics).
282 */
283static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
284{
285 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
286 reg, v);
287 BUG();
288}
289
290/**
291 * amdgpu_block_invalid_rreg - dummy reg read function
292 *
293 * @adev: amdgpu device pointer
294 * @block: offset of instance
295 * @reg: offset of register
296 *
297 * Dummy register read function. Used for register blocks
298 * that certain asics don't have (all asics).
299 * Returns the value in the register.
300 */
301static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
302 uint32_t block, uint32_t reg)
303{
304 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
305 reg, block);
306 BUG();
307 return 0;
308}
309
310/**
311 * amdgpu_block_invalid_wreg - dummy reg write function
312 *
313 * @adev: amdgpu device pointer
314 * @block: offset of instance
315 * @reg: offset of register
316 * @v: value to write to the register
317 *
318 * Dummy register read function. Used for register blocks
319 * that certain asics don't have (all asics).
320 */
321static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
322 uint32_t block,
323 uint32_t reg, uint32_t v)
324{
325 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
326 reg, block, v);
327 BUG();
328}
329
06ec9070 330static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
d38ceaf9 331{
a4a02777
CK
332 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
333 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
334 &adev->vram_scratch.robj,
335 &adev->vram_scratch.gpu_addr,
336 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
337}
338
06ec9070 339static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
d38ceaf9 340{
078af1a3 341 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
342}
343
344/**
9c3f2b54 345 * amdgpu_device_program_register_sequence - program an array of registers.
d38ceaf9
AD
346 *
347 * @adev: amdgpu_device pointer
348 * @registers: pointer to the register array
349 * @array_size: size of the register array
350 *
351 * Programs an array or registers with and and or masks.
352 * This is a helper for setting golden registers.
353 */
9c3f2b54
AD
354void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
355 const u32 *registers,
356 const u32 array_size)
d38ceaf9
AD
357{
358 u32 tmp, reg, and_mask, or_mask;
359 int i;
360
361 if (array_size % 3)
362 return;
363
364 for (i = 0; i < array_size; i +=3) {
365 reg = registers[i + 0];
366 and_mask = registers[i + 1];
367 or_mask = registers[i + 2];
368
369 if (and_mask == 0xffffffff) {
370 tmp = or_mask;
371 } else {
372 tmp = RREG32(reg);
373 tmp &= ~and_mask;
374 tmp |= or_mask;
375 }
376 WREG32(reg, tmp);
377 }
378}
379
8111c387 380void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
d38ceaf9
AD
381{
382 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
383}
384
385/*
386 * GPU doorbell aperture helpers function.
387 */
388/**
06ec9070 389 * amdgpu_device_doorbell_init - Init doorbell driver information.
d38ceaf9
AD
390 *
391 * @adev: amdgpu_device pointer
392 *
393 * Init doorbell driver information (CIK)
394 * Returns 0 on success, error on failure.
395 */
06ec9070 396static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
d38ceaf9 397{
705e519e
CK
398 /* No doorbell on SI hardware generation */
399 if (adev->asic_type < CHIP_BONAIRE) {
400 adev->doorbell.base = 0;
401 adev->doorbell.size = 0;
402 adev->doorbell.num_doorbells = 0;
403 adev->doorbell.ptr = NULL;
404 return 0;
405 }
406
d6895ad3
CK
407 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
408 return -EINVAL;
409
d38ceaf9
AD
410 /* doorbell bar mapping */
411 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
412 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
413
edf600da 414 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
415 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
416 if (adev->doorbell.num_doorbells == 0)
417 return -EINVAL;
418
8972e5d2
CK
419 adev->doorbell.ptr = ioremap(adev->doorbell.base,
420 adev->doorbell.num_doorbells *
421 sizeof(u32));
422 if (adev->doorbell.ptr == NULL)
d38ceaf9 423 return -ENOMEM;
d38ceaf9
AD
424
425 return 0;
426}
427
428/**
06ec9070 429 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
d38ceaf9
AD
430 *
431 * @adev: amdgpu_device pointer
432 *
433 * Tear down doorbell driver information (CIK)
434 */
06ec9070 435static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
d38ceaf9
AD
436{
437 iounmap(adev->doorbell.ptr);
438 adev->doorbell.ptr = NULL;
439}
440
22cb0164 441
d38ceaf9
AD
442
443/*
06ec9070 444 * amdgpu_device_wb_*()
455a7bc2 445 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 446 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
447 */
448
449/**
06ec9070 450 * amdgpu_device_wb_fini - Disable Writeback and free memory
d38ceaf9
AD
451 *
452 * @adev: amdgpu_device pointer
453 *
454 * Disables Writeback and frees the Writeback memory (all asics).
455 * Used at driver shutdown.
456 */
06ec9070 457static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
d38ceaf9
AD
458{
459 if (adev->wb.wb_obj) {
a76ed485
AD
460 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
461 &adev->wb.gpu_addr,
462 (void **)&adev->wb.wb);
d38ceaf9
AD
463 adev->wb.wb_obj = NULL;
464 }
465}
466
467/**
06ec9070 468 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
d38ceaf9
AD
469 *
470 * @adev: amdgpu_device pointer
471 *
455a7bc2 472 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
473 * Used at driver startup.
474 * Returns 0 on success or an -error on failure.
475 */
06ec9070 476static int amdgpu_device_wb_init(struct amdgpu_device *adev)
d38ceaf9
AD
477{
478 int r;
479
480 if (adev->wb.wb_obj == NULL) {
97407b63
AD
481 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
482 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
483 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
484 &adev->wb.wb_obj, &adev->wb.gpu_addr,
485 (void **)&adev->wb.wb);
d38ceaf9
AD
486 if (r) {
487 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
488 return r;
489 }
d38ceaf9
AD
490
491 adev->wb.num_wb = AMDGPU_MAX_WB;
492 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
493
494 /* clear wb memory */
60a970a6 495 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
d38ceaf9
AD
496 }
497
498 return 0;
499}
500
501/**
131b4b36 502 * amdgpu_device_wb_get - Allocate a wb entry
d38ceaf9
AD
503 *
504 * @adev: amdgpu_device pointer
505 * @wb: wb index
506 *
507 * Allocate a wb slot for use by the driver (all asics).
508 * Returns 0 on success or -EINVAL on failure.
509 */
131b4b36 510int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
d38ceaf9
AD
511{
512 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 513
97407b63 514 if (offset < adev->wb.num_wb) {
7014285a 515 __set_bit(offset, adev->wb.used);
63ae07ca 516 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
517 return 0;
518 } else {
519 return -EINVAL;
520 }
521}
522
d38ceaf9 523/**
131b4b36 524 * amdgpu_device_wb_free - Free a wb entry
d38ceaf9
AD
525 *
526 * @adev: amdgpu_device pointer
527 * @wb: wb index
528 *
529 * Free a wb slot allocated for use by the driver (all asics)
530 */
131b4b36 531void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
d38ceaf9
AD
532{
533 if (wb < adev->wb.num_wb)
63ae07ca 534 __clear_bit(wb >> 3, adev->wb.used);
d38ceaf9
AD
535}
536
537/**
2543e28a 538 * amdgpu_device_vram_location - try to find VRAM location
d38ceaf9
AD
539 * @adev: amdgpu device structure holding all necessary informations
540 * @mc: memory controller structure holding memory informations
541 * @base: base address at which to put VRAM
542 *
455a7bc2 543 * Function will try to place VRAM at base address provided
3d647c8f 544 * as parameter.
d38ceaf9 545 */
2543e28a 546void amdgpu_device_vram_location(struct amdgpu_device *adev,
770d13b1 547 struct amdgpu_gmc *mc, u64 base)
d38ceaf9
AD
548{
549 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
550
551 mc->vram_start = base;
d38ceaf9
AD
552 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
553 if (limit && limit < mc->real_vram_size)
554 mc->real_vram_size = limit;
555 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
556 mc->mc_vram_size >> 20, mc->vram_start,
557 mc->vram_end, mc->real_vram_size >> 20);
558}
559
560/**
2543e28a 561 * amdgpu_device_gart_location - try to find GTT location
d38ceaf9
AD
562 * @adev: amdgpu device structure holding all necessary informations
563 * @mc: memory controller structure holding memory informations
564 *
565 * Function will place try to place GTT before or after VRAM.
566 *
567 * If GTT size is bigger than space left then we ajust GTT size.
568 * Thus function will never fails.
569 *
570 * FIXME: when reducing GTT size align new size on power of 2.
571 */
2543e28a 572void amdgpu_device_gart_location(struct amdgpu_device *adev,
770d13b1 573 struct amdgpu_gmc *mc)
d38ceaf9
AD
574{
575 u64 size_af, size_bf;
576
770d13b1 577 size_af = adev->gmc.mc_mask - mc->vram_end;
ed21c047 578 size_bf = mc->vram_start;
d38ceaf9 579 if (size_bf > size_af) {
6f02a696 580 if (mc->gart_size > size_bf) {
d38ceaf9 581 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 582 mc->gart_size = size_bf;
d38ceaf9 583 }
6f02a696 584 mc->gart_start = 0;
d38ceaf9 585 } else {
6f02a696 586 if (mc->gart_size > size_af) {
d38ceaf9 587 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 588 mc->gart_size = size_af;
d38ceaf9 589 }
b98f1b9e
CK
590 /* VCE doesn't like it when BOs cross a 4GB segment, so align
591 * the GART base on a 4GB boundary as well.
592 */
593 mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
d38ceaf9 594 }
6f02a696 595 mc->gart_end = mc->gart_start + mc->gart_size - 1;
d38ceaf9 596 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6f02a696 597 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
d38ceaf9
AD
598}
599
d6895ad3
CK
600/**
601 * amdgpu_device_resize_fb_bar - try to resize FB BAR
602 *
603 * @adev: amdgpu_device pointer
604 *
605 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
606 * to fail, but if any of the BARs is not accessible after the size we abort
607 * driver loading by returning -ENODEV.
608 */
609int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
610{
770d13b1 611 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
d6895ad3 612 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
31b8adab
CK
613 struct pci_bus *root;
614 struct resource *res;
615 unsigned i;
d6895ad3
CK
616 u16 cmd;
617 int r;
618
0c03b912 619 /* Bypass for VF */
620 if (amdgpu_sriov_vf(adev))
621 return 0;
622
31b8adab
CK
623 /* Check if the root BUS has 64bit memory resources */
624 root = adev->pdev->bus;
625 while (root->parent)
626 root = root->parent;
627
628 pci_bus_for_each_resource(root, res, i) {
0ebb7c54 629 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
31b8adab
CK
630 res->start > 0x100000000ull)
631 break;
632 }
633
634 /* Trying to resize is pointless without a root hub window above 4GB */
635 if (!res)
636 return 0;
637
d6895ad3
CK
638 /* Disable memory decoding while we change the BAR addresses and size */
639 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
640 pci_write_config_word(adev->pdev, PCI_COMMAND,
641 cmd & ~PCI_COMMAND_MEMORY);
642
643 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
06ec9070 644 amdgpu_device_doorbell_fini(adev);
d6895ad3
CK
645 if (adev->asic_type >= CHIP_BONAIRE)
646 pci_release_resource(adev->pdev, 2);
647
648 pci_release_resource(adev->pdev, 0);
649
650 r = pci_resize_resource(adev->pdev, 0, rbar_size);
651 if (r == -ENOSPC)
652 DRM_INFO("Not enough PCI address space for a large BAR.");
653 else if (r && r != -ENOTSUPP)
654 DRM_ERROR("Problem resizing BAR0 (%d).", r);
655
656 pci_assign_unassigned_bus_resources(adev->pdev->bus);
657
658 /* When the doorbell or fb BAR isn't available we have no chance of
659 * using the device.
660 */
06ec9070 661 r = amdgpu_device_doorbell_init(adev);
d6895ad3
CK
662 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
663 return -ENODEV;
664
665 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
666
667 return 0;
668}
a05502e5 669
d38ceaf9
AD
670/*
671 * GPU helpers function.
672 */
673/**
39c640c0 674 * amdgpu_device_need_post - check if the hw need post or not
d38ceaf9
AD
675 *
676 * @adev: amdgpu_device pointer
677 *
c836fec5
JQ
678 * Check if the asic has been initialized (all asics) at driver startup
679 * or post is needed if hw reset is performed.
680 * Returns true if need or false if not.
d38ceaf9 681 */
39c640c0 682bool amdgpu_device_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
683{
684 uint32_t reg;
685
bec86378
ML
686 if (amdgpu_sriov_vf(adev))
687 return false;
688
689 if (amdgpu_passthrough(adev)) {
1da2c326
ML
690 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
691 * some old smc fw still need driver do vPost otherwise gpu hang, while
692 * those smc fw version above 22.15 doesn't have this flaw, so we force
693 * vpost executed for smc version below 22.15
bec86378
ML
694 */
695 if (adev->asic_type == CHIP_FIJI) {
696 int err;
697 uint32_t fw_ver;
698 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
699 /* force vPost if error occured */
700 if (err)
701 return true;
702
703 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
704 if (fw_ver < 0x00160e00)
705 return true;
bec86378 706 }
bec86378 707 }
91fe77eb 708
709 if (adev->has_hw_reset) {
710 adev->has_hw_reset = false;
711 return true;
712 }
713
714 /* bios scratch used on CIK+ */
715 if (adev->asic_type >= CHIP_BONAIRE)
716 return amdgpu_atombios_scratch_need_asic_init(adev);
717
718 /* check MEM_SIZE for older asics */
719 reg = amdgpu_asic_get_config_memsize(adev);
720
721 if ((reg != 0) && (reg != 0xffffffff))
722 return false;
723
724 return true;
bec86378
ML
725}
726
d38ceaf9
AD
727/* if we get transitioned to only one device, take VGA back */
728/**
06ec9070 729 * amdgpu_device_vga_set_decode - enable/disable vga decode
d38ceaf9
AD
730 *
731 * @cookie: amdgpu_device pointer
732 * @state: enable/disable vga decode
733 *
734 * Enable/disable vga decode (all asics).
735 * Returns VGA resource flags.
736 */
06ec9070 737static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
d38ceaf9
AD
738{
739 struct amdgpu_device *adev = cookie;
740 amdgpu_asic_set_vga_state(adev, state);
741 if (state)
742 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
743 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
744 else
745 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
746}
747
06ec9070 748static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
749{
750 /* defines number of bits in page table versus page directory,
751 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
752 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
753 if (amdgpu_vm_block_size == -1)
754 return;
a1adf8be 755
bab4fee7 756 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
757 dev_warn(adev->dev, "VM page table size (%d) too small\n",
758 amdgpu_vm_block_size);
97489129 759 amdgpu_vm_block_size = -1;
a1adf8be 760 }
a1adf8be
CZ
761}
762
06ec9070 763static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
83ca145d 764{
64dab074
AD
765 /* no need to check the default value */
766 if (amdgpu_vm_size == -1)
767 return;
768
83ca145d
ZJ
769 if (amdgpu_vm_size < 1) {
770 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
771 amdgpu_vm_size);
f3368128 772 amdgpu_vm_size = -1;
83ca145d 773 }
83ca145d
ZJ
774}
775
d38ceaf9 776/**
06ec9070 777 * amdgpu_device_check_arguments - validate module params
d38ceaf9
AD
778 *
779 * @adev: amdgpu_device pointer
780 *
781 * Validates certain module parameters and updates
782 * the associated values used by the driver (all asics).
783 */
06ec9070 784static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
d38ceaf9 785{
5b011235
CZ
786 if (amdgpu_sched_jobs < 4) {
787 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
788 amdgpu_sched_jobs);
789 amdgpu_sched_jobs = 4;
76117507 790 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
791 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
792 amdgpu_sched_jobs);
793 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
794 }
d38ceaf9 795
83e74db6 796 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
797 /* gart size must be greater or equal to 32M */
798 dev_warn(adev->dev, "gart size (%d) too small\n",
799 amdgpu_gart_size);
83e74db6 800 amdgpu_gart_size = -1;
d38ceaf9
AD
801 }
802
36d38372 803 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 804 /* gtt size must be greater or equal to 32M */
36d38372
CK
805 dev_warn(adev->dev, "gtt size (%d) too small\n",
806 amdgpu_gtt_size);
807 amdgpu_gtt_size = -1;
d38ceaf9
AD
808 }
809
d07f14be
RH
810 /* valid range is between 4 and 9 inclusive */
811 if (amdgpu_vm_fragment_size != -1 &&
812 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
813 dev_warn(adev->dev, "valid range is between 4 and 9\n");
814 amdgpu_vm_fragment_size = -1;
815 }
816
06ec9070 817 amdgpu_device_check_vm_size(adev);
d38ceaf9 818
06ec9070 819 amdgpu_device_check_block_size(adev);
6a7f76e7 820
526bae37 821 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 822 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
823 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
824 amdgpu_vram_page_split);
825 amdgpu_vram_page_split = 1024;
826 }
8854695a
AG
827
828 if (amdgpu_lockup_timeout == 0) {
829 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
830 amdgpu_lockup_timeout = 10000;
831 }
d38ceaf9
AD
832}
833
834/**
835 * amdgpu_switcheroo_set_state - set switcheroo state
836 *
837 * @pdev: pci dev pointer
1694467b 838 * @state: vga_switcheroo state
d38ceaf9
AD
839 *
840 * Callback for the switcheroo driver. Suspends or resumes the
841 * the asics before or after it is powered up using ACPI methods.
842 */
843static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
844{
845 struct drm_device *dev = pci_get_drvdata(pdev);
846
847 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
848 return;
849
850 if (state == VGA_SWITCHEROO_ON) {
7ca85295 851 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
852 /* don't suspend or resume card normally */
853 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
854
810ddc3a 855 amdgpu_device_resume(dev, true, true);
d38ceaf9 856
d38ceaf9
AD
857 dev->switch_power_state = DRM_SWITCH_POWER_ON;
858 drm_kms_helper_poll_enable(dev);
859 } else {
7ca85295 860 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
861 drm_kms_helper_poll_disable(dev);
862 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 863 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
864 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
865 }
866}
867
868/**
869 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
870 *
871 * @pdev: pci dev pointer
872 *
873 * Callback for the switcheroo driver. Check of the switcheroo
874 * state can be changed.
875 * Returns true if the state can be changed, false if not.
876 */
877static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
878{
879 struct drm_device *dev = pci_get_drvdata(pdev);
880
881 /*
882 * FIXME: open_count is protected by drm_global_mutex but that would lead to
883 * locking inversion with the driver load path. And the access here is
884 * completely racy anyway. So don't bother with locking for now.
885 */
886 return dev->open_count == 0;
887}
888
889static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
890 .set_gpu_state = amdgpu_switcheroo_set_state,
891 .reprobe = NULL,
892 .can_switch = amdgpu_switcheroo_can_switch,
893};
894
2990a1fc
AD
895int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
896 enum amd_ip_block_type block_type,
897 enum amd_clockgating_state state)
d38ceaf9
AD
898{
899 int i, r = 0;
900
901 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 902 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 903 continue;
c722865a
RZ
904 if (adev->ip_blocks[i].version->type != block_type)
905 continue;
906 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
907 continue;
908 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
909 (void *)adev, state);
910 if (r)
911 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
912 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
913 }
914 return r;
915}
916
2990a1fc
AD
917int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
918 enum amd_ip_block_type block_type,
919 enum amd_powergating_state state)
d38ceaf9
AD
920{
921 int i, r = 0;
922
923 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 924 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 925 continue;
c722865a
RZ
926 if (adev->ip_blocks[i].version->type != block_type)
927 continue;
928 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
929 continue;
930 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
931 (void *)adev, state);
932 if (r)
933 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
934 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
935 }
936 return r;
937}
938
2990a1fc
AD
939void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
940 u32 *flags)
6cb2d4e4
HR
941{
942 int i;
943
944 for (i = 0; i < adev->num_ip_blocks; i++) {
945 if (!adev->ip_blocks[i].status.valid)
946 continue;
947 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
948 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
949 }
950}
951
2990a1fc
AD
952int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
953 enum amd_ip_block_type block_type)
5dbbb60b
AD
954{
955 int i, r;
956
957 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 958 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 959 continue;
a1255107
AD
960 if (adev->ip_blocks[i].version->type == block_type) {
961 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
962 if (r)
963 return r;
964 break;
965 }
966 }
967 return 0;
968
969}
970
2990a1fc
AD
971bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
972 enum amd_ip_block_type block_type)
5dbbb60b
AD
973{
974 int i;
975
976 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 977 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 978 continue;
a1255107
AD
979 if (adev->ip_blocks[i].version->type == block_type)
980 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
981 }
982 return true;
983
984}
985
2990a1fc
AD
986struct amdgpu_ip_block *
987amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
988 enum amd_ip_block_type type)
d38ceaf9
AD
989{
990 int i;
991
992 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 993 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
994 return &adev->ip_blocks[i];
995
996 return NULL;
997}
998
999/**
2990a1fc 1000 * amdgpu_device_ip_block_version_cmp
d38ceaf9
AD
1001 *
1002 * @adev: amdgpu_device pointer
5fc3aeeb 1003 * @type: enum amd_ip_block_type
d38ceaf9
AD
1004 * @major: major version
1005 * @minor: minor version
1006 *
1007 * return 0 if equal or greater
1008 * return 1 if smaller or the ip_block doesn't exist
1009 */
2990a1fc
AD
1010int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1011 enum amd_ip_block_type type,
1012 u32 major, u32 minor)
d38ceaf9 1013{
2990a1fc 1014 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
d38ceaf9 1015
a1255107
AD
1016 if (ip_block && ((ip_block->version->major > major) ||
1017 ((ip_block->version->major == major) &&
1018 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1019 return 0;
1020
1021 return 1;
1022}
1023
a1255107 1024/**
2990a1fc 1025 * amdgpu_device_ip_block_add
a1255107
AD
1026 *
1027 * @adev: amdgpu_device pointer
1028 * @ip_block_version: pointer to the IP to add
1029 *
1030 * Adds the IP block driver information to the collection of IPs
1031 * on the asic.
1032 */
2990a1fc
AD
1033int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1034 const struct amdgpu_ip_block_version *ip_block_version)
a1255107
AD
1035{
1036 if (!ip_block_version)
1037 return -EINVAL;
1038
e966a725 1039 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
a0bae357
HR
1040 ip_block_version->funcs->name);
1041
a1255107
AD
1042 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1043
1044 return 0;
1045}
1046
483ef985 1047static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1048{
1049 adev->enable_virtual_display = false;
1050
1051 if (amdgpu_virtual_display) {
1052 struct drm_device *ddev = adev->ddev;
1053 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1054 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1055
1056 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1057 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1058 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1059 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1060 if (!strcmp("all", pciaddname)
1061 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1062 long num_crtc;
1063 int res = -1;
1064
9accf2fd 1065 adev->enable_virtual_display = true;
0f66356d
ED
1066
1067 if (pciaddname_tmp)
1068 res = kstrtol(pciaddname_tmp, 10,
1069 &num_crtc);
1070
1071 if (!res) {
1072 if (num_crtc < 1)
1073 num_crtc = 1;
1074 if (num_crtc > 6)
1075 num_crtc = 6;
1076 adev->mode_info.num_crtc = num_crtc;
1077 } else {
1078 adev->mode_info.num_crtc = 1;
1079 }
9accf2fd
ED
1080 break;
1081 }
1082 }
1083
0f66356d
ED
1084 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1085 amdgpu_virtual_display, pci_address_name,
1086 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1087
1088 kfree(pciaddstr);
1089 }
1090}
1091
e2a75f88
AD
1092static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1093{
e2a75f88
AD
1094 const char *chip_name;
1095 char fw_name[30];
1096 int err;
1097 const struct gpu_info_firmware_header_v1_0 *hdr;
1098
ab4fe3e1
HR
1099 adev->firmware.gpu_info_fw = NULL;
1100
e2a75f88
AD
1101 switch (adev->asic_type) {
1102 case CHIP_TOPAZ:
1103 case CHIP_TONGA:
1104 case CHIP_FIJI:
1105 case CHIP_POLARIS11:
1106 case CHIP_POLARIS10:
1107 case CHIP_POLARIS12:
1108 case CHIP_CARRIZO:
1109 case CHIP_STONEY:
1110#ifdef CONFIG_DRM_AMDGPU_SI
1111 case CHIP_VERDE:
1112 case CHIP_TAHITI:
1113 case CHIP_PITCAIRN:
1114 case CHIP_OLAND:
1115 case CHIP_HAINAN:
1116#endif
1117#ifdef CONFIG_DRM_AMDGPU_CIK
1118 case CHIP_BONAIRE:
1119 case CHIP_HAWAII:
1120 case CHIP_KAVERI:
1121 case CHIP_KABINI:
1122 case CHIP_MULLINS:
1123#endif
1124 default:
1125 return 0;
1126 case CHIP_VEGA10:
1127 chip_name = "vega10";
1128 break;
2d2e5e7e
AD
1129 case CHIP_RAVEN:
1130 chip_name = "raven";
1131 break;
e2a75f88
AD
1132 }
1133
1134 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1135 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1136 if (err) {
1137 dev_err(adev->dev,
1138 "Failed to load gpu_info firmware \"%s\"\n",
1139 fw_name);
1140 goto out;
1141 }
ab4fe3e1 1142 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1143 if (err) {
1144 dev_err(adev->dev,
1145 "Failed to validate gpu_info firmware \"%s\"\n",
1146 fw_name);
1147 goto out;
1148 }
1149
ab4fe3e1 1150 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1151 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1152
1153 switch (hdr->version_major) {
1154 case 1:
1155 {
1156 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1157 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1158 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1159
b5ab16bf
AD
1160 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1161 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1162 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1163 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1164 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1165 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1166 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1167 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1168 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1169 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1170 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1171 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1172 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1173 adev->gfx.cu_info.max_waves_per_simd =
1174 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1175 adev->gfx.cu_info.max_scratch_slots_per_cu =
1176 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1177 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1178 break;
1179 }
1180 default:
1181 dev_err(adev->dev,
1182 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1183 err = -EINVAL;
1184 goto out;
1185 }
1186out:
e2a75f88
AD
1187 return err;
1188}
1189
06ec9070 1190static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
d38ceaf9 1191{
aaa36a97 1192 int i, r;
d38ceaf9 1193
483ef985 1194 amdgpu_device_enable_virtual_display(adev);
a6be7570 1195
d38ceaf9 1196 switch (adev->asic_type) {
aaa36a97
AD
1197 case CHIP_TOPAZ:
1198 case CHIP_TONGA:
48299f95 1199 case CHIP_FIJI:
2cc0c0b5
FC
1200 case CHIP_POLARIS11:
1201 case CHIP_POLARIS10:
c4642a47 1202 case CHIP_POLARIS12:
aaa36a97 1203 case CHIP_CARRIZO:
39bb0c92
SL
1204 case CHIP_STONEY:
1205 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1206 adev->family = AMDGPU_FAMILY_CZ;
1207 else
1208 adev->family = AMDGPU_FAMILY_VI;
1209
1210 r = vi_set_ip_blocks(adev);
1211 if (r)
1212 return r;
1213 break;
33f34802
KW
1214#ifdef CONFIG_DRM_AMDGPU_SI
1215 case CHIP_VERDE:
1216 case CHIP_TAHITI:
1217 case CHIP_PITCAIRN:
1218 case CHIP_OLAND:
1219 case CHIP_HAINAN:
295d0daf 1220 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1221 r = si_set_ip_blocks(adev);
1222 if (r)
1223 return r;
1224 break;
1225#endif
a2e73f56
AD
1226#ifdef CONFIG_DRM_AMDGPU_CIK
1227 case CHIP_BONAIRE:
1228 case CHIP_HAWAII:
1229 case CHIP_KAVERI:
1230 case CHIP_KABINI:
1231 case CHIP_MULLINS:
1232 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1233 adev->family = AMDGPU_FAMILY_CI;
1234 else
1235 adev->family = AMDGPU_FAMILY_KV;
1236
1237 r = cik_set_ip_blocks(adev);
1238 if (r)
1239 return r;
1240 break;
1241#endif
2ca8a5d2
CZ
1242 case CHIP_VEGA10:
1243 case CHIP_RAVEN:
1244 if (adev->asic_type == CHIP_RAVEN)
1245 adev->family = AMDGPU_FAMILY_RV;
1246 else
1247 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1248
1249 r = soc15_set_ip_blocks(adev);
1250 if (r)
1251 return r;
1252 break;
d38ceaf9
AD
1253 default:
1254 /* FIXME: not supported yet */
1255 return -EINVAL;
1256 }
1257
e2a75f88
AD
1258 r = amdgpu_device_parse_gpu_info_fw(adev);
1259 if (r)
1260 return r;
1261
1884734a 1262 amdgpu_amdkfd_device_probe(adev);
1263
3149d9da
XY
1264 if (amdgpu_sriov_vf(adev)) {
1265 r = amdgpu_virt_request_full_gpu(adev, true);
1266 if (r)
5ffa61c1 1267 return -EAGAIN;
3149d9da
XY
1268 }
1269
d38ceaf9
AD
1270 for (i = 0; i < adev->num_ip_blocks; i++) {
1271 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1272 DRM_ERROR("disabled ip block: %d <%s>\n",
1273 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1274 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1275 } else {
a1255107
AD
1276 if (adev->ip_blocks[i].version->funcs->early_init) {
1277 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1278 if (r == -ENOENT) {
a1255107 1279 adev->ip_blocks[i].status.valid = false;
2c1a2784 1280 } else if (r) {
a1255107
AD
1281 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1282 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1283 return r;
2c1a2784 1284 } else {
a1255107 1285 adev->ip_blocks[i].status.valid = true;
2c1a2784 1286 }
974e6b64 1287 } else {
a1255107 1288 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1289 }
d38ceaf9
AD
1290 }
1291 }
1292
395d1fb9
NH
1293 adev->cg_flags &= amdgpu_cg_mask;
1294 adev->pg_flags &= amdgpu_pg_mask;
1295
d38ceaf9
AD
1296 return 0;
1297}
1298
06ec9070 1299static int amdgpu_device_ip_init(struct amdgpu_device *adev)
d38ceaf9
AD
1300{
1301 int i, r;
1302
1303 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1304 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1305 continue;
a1255107 1306 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1307 if (r) {
a1255107
AD
1308 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1309 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1310 return r;
2c1a2784 1311 }
a1255107 1312 adev->ip_blocks[i].status.sw = true;
bfca0289
SL
1313
1314 if (amdgpu_emu_mode == 1) {
1315 /* Need to do common hw init first on emulation */
1316 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
1317 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1318 if (r) {
1319 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1320 adev->ip_blocks[i].version->funcs->name, r);
1321 return r;
1322 }
1323 adev->ip_blocks[i].status.hw = true;
1324 }
1325 }
1326
d38ceaf9 1327 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1328 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
06ec9070 1329 r = amdgpu_device_vram_scratch_init(adev);
2c1a2784
AD
1330 if (r) {
1331 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1332 return r;
2c1a2784 1333 }
a1255107 1334 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1335 if (r) {
1336 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1337 return r;
2c1a2784 1338 }
06ec9070 1339 r = amdgpu_device_wb_init(adev);
2c1a2784 1340 if (r) {
06ec9070 1341 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
d38ceaf9 1342 return r;
2c1a2784 1343 }
a1255107 1344 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1345
1346 /* right after GMC hw init, we create CSA */
1347 if (amdgpu_sriov_vf(adev)) {
1348 r = amdgpu_allocate_static_csa(adev);
1349 if (r) {
1350 DRM_ERROR("allocate CSA failed %d\n", r);
1351 return r;
1352 }
1353 }
d38ceaf9
AD
1354 }
1355 }
1356
1357 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1358 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1359 continue;
bfca0289 1360 if (adev->ip_blocks[i].status.hw)
d38ceaf9 1361 continue;
a1255107 1362 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1363 if (r) {
a1255107
AD
1364 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1365 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1366 return r;
2c1a2784 1367 }
a1255107 1368 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1369 }
1370
1884734a 1371 amdgpu_amdkfd_device_init(adev);
c6332b97 1372
1373 if (amdgpu_sriov_vf(adev))
1374 amdgpu_virt_release_full_gpu(adev, true);
1375
d38ceaf9
AD
1376 return 0;
1377}
1378
06ec9070 1379static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
0c49e0b8
CZ
1380{
1381 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1382}
1383
06ec9070 1384static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
0c49e0b8
CZ
1385{
1386 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1387 AMDGPU_RESET_MAGIC_NUM);
1388}
1389
06ec9070 1390static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
d38ceaf9
AD
1391{
1392 int i = 0, r;
1393
4a2ba394
SL
1394 if (amdgpu_emu_mode == 1)
1395 return 0;
1396
d38ceaf9 1397 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1398 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1399 continue;
4a446d55 1400 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1401 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1402 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1403 /* enable clockgating to save power */
a1255107
AD
1404 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1405 AMD_CG_STATE_GATE);
4a446d55
AD
1406 if (r) {
1407 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1408 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1409 return r;
1410 }
b0b00ff1 1411 }
d38ceaf9 1412 }
2dc80b00
S
1413 return 0;
1414}
1415
06ec9070 1416static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2dc80b00
S
1417{
1418 int i = 0, r;
1419
1420 for (i = 0; i < adev->num_ip_blocks; i++) {
1421 if (!adev->ip_blocks[i].status.valid)
1422 continue;
1423 if (adev->ip_blocks[i].version->funcs->late_init) {
1424 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1425 if (r) {
1426 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1427 adev->ip_blocks[i].version->funcs->name, r);
1428 return r;
1429 }
1430 adev->ip_blocks[i].status.late_initialized = true;
1431 }
1432 }
1433
1434 mod_delayed_work(system_wq, &adev->late_init_work,
1435 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1436
06ec9070 1437 amdgpu_device_fill_reset_magic(adev);
d38ceaf9
AD
1438
1439 return 0;
1440}
1441
06ec9070 1442static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
d38ceaf9
AD
1443{
1444 int i, r;
1445
1884734a 1446 amdgpu_amdkfd_device_fini(adev);
3e96dbfd
AD
1447 /* need to disable SMC first */
1448 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1449 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1450 continue;
a1255107 1451 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1452 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1453 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1454 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1455 if (r) {
1456 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1457 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1458 return r;
1459 }
a1255107 1460 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1461 /* XXX handle errors */
1462 if (r) {
1463 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1464 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1465 }
a1255107 1466 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1467 break;
1468 }
1469 }
1470
d38ceaf9 1471 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1472 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1473 continue;
a1255107 1474 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
84e5b516 1475 amdgpu_free_static_csa(adev);
06ec9070
AD
1476 amdgpu_device_wb_fini(adev);
1477 amdgpu_device_vram_scratch_fini(adev);
d38ceaf9 1478 }
8201a67a
RZ
1479
1480 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1481 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1482 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1483 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1484 AMD_CG_STATE_UNGATE);
1485 if (r) {
1486 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1487 adev->ip_blocks[i].version->funcs->name, r);
1488 return r;
1489 }
2c1a2784 1490 }
8201a67a 1491
a1255107 1492 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1493 /* XXX handle errors */
2c1a2784 1494 if (r) {
a1255107
AD
1495 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1496 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1497 }
8201a67a 1498
a1255107 1499 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1500 }
1501
9950cda2
AD
1502 /* disable all interrupts */
1503 amdgpu_irq_disable_all(adev);
1504
d38ceaf9 1505 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1506 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1507 continue;
a1255107 1508 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1509 /* XXX handle errors */
2c1a2784 1510 if (r) {
a1255107
AD
1511 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1512 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1513 }
a1255107
AD
1514 adev->ip_blocks[i].status.sw = false;
1515 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1516 }
1517
a6dcfd9c 1518 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1519 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1520 continue;
a1255107
AD
1521 if (adev->ip_blocks[i].version->funcs->late_fini)
1522 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1523 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1524 }
1525
030308fc 1526 if (amdgpu_sriov_vf(adev))
24136135
ML
1527 if (amdgpu_virt_release_full_gpu(adev, false))
1528 DRM_ERROR("failed to release exclusive mode on fini\n");
2493664f 1529
d38ceaf9
AD
1530 return 0;
1531}
1532
06ec9070 1533static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
2dc80b00
S
1534{
1535 struct amdgpu_device *adev =
1536 container_of(work, struct amdgpu_device, late_init_work.work);
06ec9070 1537 amdgpu_device_ip_late_set_cg_state(adev);
2dc80b00
S
1538}
1539
cdd61df6 1540int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1541{
1542 int i, r;
1543
e941ea99
XY
1544 if (amdgpu_sriov_vf(adev))
1545 amdgpu_virt_request_full_gpu(adev, false);
1546
c5a93a28 1547 /* ungate SMC block first */
2990a1fc
AD
1548 r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1549 AMD_CG_STATE_UNGATE);
c5a93a28 1550 if (r) {
2990a1fc 1551 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
c5a93a28
FC
1552 }
1553
d38ceaf9 1554 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1555 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1556 continue;
1557 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1558 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1559 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1560 AMD_CG_STATE_UNGATE);
c5a93a28 1561 if (r) {
a1255107
AD
1562 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1563 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1564 }
2c1a2784 1565 }
d38ceaf9 1566 /* XXX handle errors */
a1255107 1567 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1568 /* XXX handle errors */
2c1a2784 1569 if (r) {
a1255107
AD
1570 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1571 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1572 }
d38ceaf9
AD
1573 }
1574
e941ea99
XY
1575 if (amdgpu_sriov_vf(adev))
1576 amdgpu_virt_release_full_gpu(adev, false);
1577
d38ceaf9
AD
1578 return 0;
1579}
1580
06ec9070 1581static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1582{
1583 int i, r;
1584
2cb681b6
ML
1585 static enum amd_ip_block_type ip_order[] = {
1586 AMD_IP_BLOCK_TYPE_GMC,
1587 AMD_IP_BLOCK_TYPE_COMMON,
2cb681b6
ML
1588 AMD_IP_BLOCK_TYPE_IH,
1589 };
a90ad3c2 1590
2cb681b6
ML
1591 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1592 int j;
1593 struct amdgpu_ip_block *block;
a90ad3c2 1594
2cb681b6
ML
1595 for (j = 0; j < adev->num_ip_blocks; j++) {
1596 block = &adev->ip_blocks[j];
1597
1598 if (block->version->type != ip_order[i] ||
1599 !block->status.valid)
1600 continue;
1601
1602 r = block->version->funcs->hw_init(adev);
1603 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1604 }
1605 }
1606
1607 return 0;
1608}
1609
06ec9070 1610static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1611{
1612 int i, r;
1613
2cb681b6
ML
1614 static enum amd_ip_block_type ip_order[] = {
1615 AMD_IP_BLOCK_TYPE_SMC,
ef4c166d 1616 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
1617 AMD_IP_BLOCK_TYPE_DCE,
1618 AMD_IP_BLOCK_TYPE_GFX,
1619 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
1620 AMD_IP_BLOCK_TYPE_UVD,
1621 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 1622 };
a90ad3c2 1623
2cb681b6
ML
1624 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1625 int j;
1626 struct amdgpu_ip_block *block;
a90ad3c2 1627
2cb681b6
ML
1628 for (j = 0; j < adev->num_ip_blocks; j++) {
1629 block = &adev->ip_blocks[j];
1630
1631 if (block->version->type != ip_order[i] ||
1632 !block->status.valid)
1633 continue;
1634
1635 r = block->version->funcs->hw_init(adev);
1636 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1637 }
1638 }
1639
1640 return 0;
1641}
1642
06ec9070 1643static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
1644{
1645 int i, r;
1646
a90ad3c2
ML
1647 for (i = 0; i < adev->num_ip_blocks; i++) {
1648 if (!adev->ip_blocks[i].status.valid)
1649 continue;
a90ad3c2
ML
1650 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1651 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
fcf0649f
CZ
1652 adev->ip_blocks[i].version->type ==
1653 AMD_IP_BLOCK_TYPE_IH) {
1654 r = adev->ip_blocks[i].version->funcs->resume(adev);
1655 if (r) {
1656 DRM_ERROR("resume of IP block <%s> failed %d\n",
1657 adev->ip_blocks[i].version->funcs->name, r);
1658 return r;
1659 }
a90ad3c2
ML
1660 }
1661 }
1662
1663 return 0;
1664}
1665
06ec9070 1666static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
1667{
1668 int i, r;
1669
1670 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1671 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1672 continue;
fcf0649f
CZ
1673 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1674 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1675 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1676 continue;
a1255107 1677 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 1678 if (r) {
a1255107
AD
1679 DRM_ERROR("resume of IP block <%s> failed %d\n",
1680 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1681 return r;
2c1a2784 1682 }
d38ceaf9
AD
1683 }
1684
1685 return 0;
1686}
1687
06ec9070 1688static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
fcf0649f
CZ
1689{
1690 int r;
1691
06ec9070 1692 r = amdgpu_device_ip_resume_phase1(adev);
fcf0649f
CZ
1693 if (r)
1694 return r;
06ec9070 1695 r = amdgpu_device_ip_resume_phase2(adev);
fcf0649f
CZ
1696
1697 return r;
1698}
1699
4e99a44e 1700static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 1701{
6867e1b5
ML
1702 if (amdgpu_sriov_vf(adev)) {
1703 if (adev->is_atom_fw) {
1704 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1705 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1706 } else {
1707 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1708 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1709 }
1710
1711 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
1712 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 1713 }
048765ad
AR
1714}
1715
4562236b
HW
1716bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
1717{
1718 switch (asic_type) {
1719#if defined(CONFIG_DRM_AMD_DC)
1720 case CHIP_BONAIRE:
1721 case CHIP_HAWAII:
0d6fbccb 1722 case CHIP_KAVERI:
367e6687
AD
1723 case CHIP_KABINI:
1724 case CHIP_MULLINS:
4562236b
HW
1725 case CHIP_CARRIZO:
1726 case CHIP_STONEY:
1727 case CHIP_POLARIS11:
1728 case CHIP_POLARIS10:
2c8ad2d5 1729 case CHIP_POLARIS12:
4562236b
HW
1730 case CHIP_TONGA:
1731 case CHIP_FIJI:
1732#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
1733 return amdgpu_dc != 0;
4562236b 1734#endif
42f8ffa1
HW
1735 case CHIP_VEGA10:
1736#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 1737 case CHIP_RAVEN:
42f8ffa1 1738#endif
fd187853 1739 return amdgpu_dc != 0;
4562236b
HW
1740#endif
1741 default:
1742 return false;
1743 }
1744}
1745
1746/**
1747 * amdgpu_device_has_dc_support - check if dc is supported
1748 *
1749 * @adev: amdgpu_device_pointer
1750 *
1751 * Returns true for supported, false for not supported
1752 */
1753bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
1754{
2555039d
XY
1755 if (amdgpu_sriov_vf(adev))
1756 return false;
1757
4562236b
HW
1758 return amdgpu_device_asic_has_dc_support(adev->asic_type);
1759}
1760
d38ceaf9
AD
1761/**
1762 * amdgpu_device_init - initialize the driver
1763 *
1764 * @adev: amdgpu_device pointer
1765 * @pdev: drm dev pointer
1766 * @pdev: pci dev pointer
1767 * @flags: driver flags
1768 *
1769 * Initializes the driver info and hw (all asics).
1770 * Returns 0 for success or an error on failure.
1771 * Called at driver startup.
1772 */
1773int amdgpu_device_init(struct amdgpu_device *adev,
1774 struct drm_device *ddev,
1775 struct pci_dev *pdev,
1776 uint32_t flags)
1777{
1778 int r, i;
1779 bool runtime = false;
95844d20 1780 u32 max_MBps;
d38ceaf9
AD
1781
1782 adev->shutdown = false;
1783 adev->dev = &pdev->dev;
1784 adev->ddev = ddev;
1785 adev->pdev = pdev;
1786 adev->flags = flags;
2f7d10b3 1787 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 1788 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
770d13b1 1789 adev->gmc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
1790 adev->accel_working = false;
1791 adev->num_rings = 0;
1792 adev->mman.buffer_funcs = NULL;
1793 adev->mman.buffer_funcs_ring = NULL;
1794 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 1795 adev->vm_manager.vm_pte_num_rings = 0;
132f34e4 1796 adev->gmc.gmc_funcs = NULL;
f54d1867 1797 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 1798 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
1799
1800 adev->smc_rreg = &amdgpu_invalid_rreg;
1801 adev->smc_wreg = &amdgpu_invalid_wreg;
1802 adev->pcie_rreg = &amdgpu_invalid_rreg;
1803 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
1804 adev->pciep_rreg = &amdgpu_invalid_rreg;
1805 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1806 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1807 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1808 adev->didt_rreg = &amdgpu_invalid_rreg;
1809 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
1810 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1811 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1812 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1813 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1814
3e39ab90
AD
1815 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1816 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1817 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
1818
1819 /* mutex initialization are all done here so we
1820 * can recall function without having locking issues */
d38ceaf9 1821 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 1822 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
1823 mutex_init(&adev->pm.mutex);
1824 mutex_init(&adev->gfx.gpu_clock_mutex);
1825 mutex_init(&adev->srbm_mutex);
b8866c26 1826 mutex_init(&adev->gfx.pipe_reserve_mutex);
d38ceaf9 1827 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 1828 mutex_init(&adev->mn_lock);
e23b74aa 1829 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 1830 hash_init(adev->mn_hash);
13a752e3 1831 mutex_init(&adev->lock_reset);
d38ceaf9 1832
06ec9070 1833 amdgpu_device_check_arguments(adev);
d38ceaf9 1834
d38ceaf9
AD
1835 spin_lock_init(&adev->mmio_idx_lock);
1836 spin_lock_init(&adev->smc_idx_lock);
1837 spin_lock_init(&adev->pcie_idx_lock);
1838 spin_lock_init(&adev->uvd_ctx_idx_lock);
1839 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 1840 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 1841 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 1842 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 1843 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 1844
0c4e7fa5
CZ
1845 INIT_LIST_HEAD(&adev->shadow_list);
1846 mutex_init(&adev->shadow_list_lock);
1847
795f2813
AR
1848 INIT_LIST_HEAD(&adev->ring_lru_list);
1849 spin_lock_init(&adev->ring_lru_list_lock);
1850
06ec9070
AD
1851 INIT_DELAYED_WORK(&adev->late_init_work,
1852 amdgpu_device_ip_late_init_func_handler);
2dc80b00 1853
0fa49558
AX
1854 /* Registers mapping */
1855 /* TODO: block userspace mapping of io register */
da69c161
KW
1856 if (adev->asic_type >= CHIP_BONAIRE) {
1857 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1858 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1859 } else {
1860 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1861 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1862 }
d38ceaf9 1863
d38ceaf9
AD
1864 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1865 if (adev->rmmio == NULL) {
1866 return -ENOMEM;
1867 }
1868 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1869 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1870
705e519e 1871 /* doorbell bar mapping */
06ec9070 1872 amdgpu_device_doorbell_init(adev);
d38ceaf9
AD
1873
1874 /* io port mapping */
1875 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1876 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1877 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1878 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1879 break;
1880 }
1881 }
1882 if (adev->rio_mem == NULL)
b64a18c5 1883 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
1884
1885 /* early init functions */
06ec9070 1886 r = amdgpu_device_ip_early_init(adev);
d38ceaf9
AD
1887 if (r)
1888 return r;
1889
1890 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1891 /* this will fail for cards that aren't VGA class devices, just
1892 * ignore it */
06ec9070 1893 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
d38ceaf9 1894
e9bef455 1895 if (amdgpu_device_is_px(ddev))
d38ceaf9 1896 runtime = true;
84c8b22e
LW
1897 if (!pci_is_thunderbolt_attached(adev->pdev))
1898 vga_switcheroo_register_client(adev->pdev,
1899 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
1900 if (runtime)
1901 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1902
bfca0289
SL
1903 if (amdgpu_emu_mode == 1)
1904 goto fence_driver_init;
1905
d38ceaf9 1906 /* Read BIOS */
83ba126a
AD
1907 if (!amdgpu_get_bios(adev)) {
1908 r = -EINVAL;
1909 goto failed;
1910 }
f7e9e9fe 1911
d38ceaf9 1912 r = amdgpu_atombios_init(adev);
2c1a2784
AD
1913 if (r) {
1914 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 1915 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 1916 goto failed;
2c1a2784 1917 }
d38ceaf9 1918
4e99a44e
ML
1919 /* detect if we are with an SRIOV vbios */
1920 amdgpu_device_detect_sriov_bios(adev);
048765ad 1921
d38ceaf9 1922 /* Post card if necessary */
39c640c0 1923 if (amdgpu_device_need_post(adev)) {
d38ceaf9 1924 if (!adev->bios) {
bec86378 1925 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
1926 r = -EINVAL;
1927 goto failed;
d38ceaf9 1928 }
bec86378 1929 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
1930 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1931 if (r) {
1932 dev_err(adev->dev, "gpu post error!\n");
1933 goto failed;
1934 }
d38ceaf9
AD
1935 }
1936
88b64e95
AD
1937 if (adev->is_atom_fw) {
1938 /* Initialize clocks */
1939 r = amdgpu_atomfirmware_get_clock_info(adev);
1940 if (r) {
1941 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 1942 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
1943 goto failed;
1944 }
1945 } else {
a5bde2f9
AD
1946 /* Initialize clocks */
1947 r = amdgpu_atombios_get_clock_info(adev);
1948 if (r) {
1949 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 1950 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 1951 goto failed;
a5bde2f9
AD
1952 }
1953 /* init i2c buses */
4562236b
HW
1954 if (!amdgpu_device_has_dc_support(adev))
1955 amdgpu_atombios_i2c_init(adev);
2c1a2784 1956 }
d38ceaf9 1957
bfca0289 1958fence_driver_init:
d38ceaf9
AD
1959 /* Fence driver */
1960 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
1961 if (r) {
1962 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 1963 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 1964 goto failed;
2c1a2784 1965 }
d38ceaf9
AD
1966
1967 /* init the mode config */
1968 drm_mode_config_init(adev->ddev);
1969
06ec9070 1970 r = amdgpu_device_ip_init(adev);
d38ceaf9 1971 if (r) {
8840a387 1972 /* failed in exclusive mode due to timeout */
1973 if (amdgpu_sriov_vf(adev) &&
1974 !amdgpu_sriov_runtime(adev) &&
1975 amdgpu_virt_mmio_blocked(adev) &&
1976 !amdgpu_virt_wait_reset(adev)) {
1977 dev_err(adev->dev, "VF exclusive mode timeout\n");
1daee8b4
PD
1978 /* Don't send request since VF is inactive. */
1979 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
1980 adev->virt.ops = NULL;
8840a387 1981 r = -EAGAIN;
1982 goto failed;
1983 }
06ec9070 1984 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
e23b74aa 1985 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
06ec9070 1986 amdgpu_device_ip_fini(adev);
83ba126a 1987 goto failed;
d38ceaf9
AD
1988 }
1989
1990 adev->accel_working = true;
1991
e59c0205
AX
1992 amdgpu_vm_check_compute_bug(adev);
1993
95844d20
MO
1994 /* Initialize the buffer migration limit. */
1995 if (amdgpu_moverate >= 0)
1996 max_MBps = amdgpu_moverate;
1997 else
1998 max_MBps = 8; /* Allow 8 MB/s. */
1999 /* Get a log2 for easy divisions. */
2000 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2001
d38ceaf9
AD
2002 r = amdgpu_ib_pool_init(adev);
2003 if (r) {
2004 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 2005 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 2006 goto failed;
d38ceaf9
AD
2007 }
2008
2009 r = amdgpu_ib_ring_tests(adev);
2010 if (r)
2011 DRM_ERROR("ib ring test failed (%d).\n", r);
2012
2dc8f81e
HC
2013 if (amdgpu_sriov_vf(adev))
2014 amdgpu_virt_init_data_exchange(adev);
2015
9bc92b9c
ML
2016 amdgpu_fbdev_init(adev);
2017
d2f52ac8
RZ
2018 r = amdgpu_pm_sysfs_init(adev);
2019 if (r)
2020 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2021
75758255 2022 r = amdgpu_debugfs_gem_init(adev);
3f14e623 2023 if (r)
d38ceaf9 2024 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2025
2026 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2027 if (r)
d38ceaf9 2028 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2029
50ab2533 2030 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2031 if (r)
50ab2533 2032 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2033
763efb6c 2034 r = amdgpu_debugfs_init(adev);
db95e218 2035 if (r)
763efb6c 2036 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
db95e218 2037
d38ceaf9
AD
2038 if ((amdgpu_testing & 1)) {
2039 if (adev->accel_working)
2040 amdgpu_test_moves(adev);
2041 else
2042 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2043 }
d38ceaf9
AD
2044 if (amdgpu_benchmarking) {
2045 if (adev->accel_working)
2046 amdgpu_benchmark(adev, amdgpu_benchmarking);
2047 else
2048 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2049 }
2050
2051 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2052 * explicit gating rather than handling it automatically.
2053 */
06ec9070 2054 r = amdgpu_device_ip_late_init(adev);
2c1a2784 2055 if (r) {
06ec9070 2056 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
e23b74aa 2057 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2058 goto failed;
2c1a2784 2059 }
d38ceaf9
AD
2060
2061 return 0;
83ba126a
AD
2062
2063failed:
89041940 2064 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2065 if (runtime)
2066 vga_switcheroo_fini_domain_pm_ops(adev->dev);
8840a387 2067
83ba126a 2068 return r;
d38ceaf9
AD
2069}
2070
d38ceaf9
AD
2071/**
2072 * amdgpu_device_fini - tear down the driver
2073 *
2074 * @adev: amdgpu_device pointer
2075 *
2076 * Tear down the driver info (all asics).
2077 * Called at driver shutdown.
2078 */
2079void amdgpu_device_fini(struct amdgpu_device *adev)
2080{
2081 int r;
2082
2083 DRM_INFO("amdgpu: finishing device.\n");
2084 adev->shutdown = true;
db2c2a97
PD
2085 if (adev->mode_info.mode_config_initialized)
2086 drm_crtc_force_disable_all(adev->ddev);
b9141cd3 2087
d38ceaf9
AD
2088 amdgpu_ib_pool_fini(adev);
2089 amdgpu_fence_driver_fini(adev);
2090 amdgpu_fbdev_fini(adev);
06ec9070 2091 r = amdgpu_device_ip_fini(adev);
ab4fe3e1
HR
2092 if (adev->firmware.gpu_info_fw) {
2093 release_firmware(adev->firmware.gpu_info_fw);
2094 adev->firmware.gpu_info_fw = NULL;
2095 }
d38ceaf9 2096 adev->accel_working = false;
2dc80b00 2097 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2098 /* free i2c buses */
4562236b
HW
2099 if (!amdgpu_device_has_dc_support(adev))
2100 amdgpu_i2c_fini(adev);
bfca0289
SL
2101
2102 if (amdgpu_emu_mode != 1)
2103 amdgpu_atombios_fini(adev);
2104
d38ceaf9
AD
2105 kfree(adev->bios);
2106 adev->bios = NULL;
84c8b22e
LW
2107 if (!pci_is_thunderbolt_attached(adev->pdev))
2108 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2109 if (adev->flags & AMD_IS_PX)
2110 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2111 vga_client_register(adev->pdev, NULL, NULL, NULL);
2112 if (adev->rio_mem)
2113 pci_iounmap(adev->pdev, adev->rio_mem);
2114 adev->rio_mem = NULL;
2115 iounmap(adev->rmmio);
2116 adev->rmmio = NULL;
06ec9070 2117 amdgpu_device_doorbell_fini(adev);
d2f52ac8 2118 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2119 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2120}
2121
2122
2123/*
2124 * Suspend & resume.
2125 */
2126/**
810ddc3a 2127 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2128 *
2129 * @pdev: drm dev pointer
2130 * @state: suspend state
2131 *
2132 * Puts the hw in the suspend state (all asics).
2133 * Returns 0 for success or an error on failure.
2134 * Called at driver suspend.
2135 */
810ddc3a 2136int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2137{
2138 struct amdgpu_device *adev;
2139 struct drm_crtc *crtc;
2140 struct drm_connector *connector;
5ceb54c6 2141 int r;
d38ceaf9
AD
2142
2143 if (dev == NULL || dev->dev_private == NULL) {
2144 return -ENODEV;
2145 }
2146
2147 adev = dev->dev_private;
2148
2149 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2150 return 0;
2151
2152 drm_kms_helper_poll_disable(dev);
2153
4562236b
HW
2154 if (!amdgpu_device_has_dc_support(adev)) {
2155 /* turn off display hw */
2156 drm_modeset_lock_all(dev);
2157 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2158 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2159 }
2160 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2161 }
2162
ba997709
YZ
2163 amdgpu_amdkfd_suspend(adev);
2164
756e6880 2165 /* unpin the front buffers and cursors */
d38ceaf9 2166 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2167 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2168 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2169 struct amdgpu_bo *robj;
2170
756e6880
AD
2171 if (amdgpu_crtc->cursor_bo) {
2172 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2173 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2174 if (r == 0) {
2175 amdgpu_bo_unpin(aobj);
2176 amdgpu_bo_unreserve(aobj);
2177 }
2178 }
2179
d38ceaf9
AD
2180 if (rfb == NULL || rfb->obj == NULL) {
2181 continue;
2182 }
2183 robj = gem_to_amdgpu_bo(rfb->obj);
2184 /* don't unpin kernel fb objects */
2185 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2186 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2187 if (r == 0) {
2188 amdgpu_bo_unpin(robj);
2189 amdgpu_bo_unreserve(robj);
2190 }
2191 }
2192 }
2193 /* evict vram memory */
2194 amdgpu_bo_evict_vram(adev);
2195
5ceb54c6 2196 amdgpu_fence_driver_suspend(adev);
d38ceaf9 2197
cdd61df6 2198 r = amdgpu_device_ip_suspend(adev);
d38ceaf9 2199
a0a71e49
AD
2200 /* evict remaining vram memory
2201 * This second call to evict vram is to evict the gart page table
2202 * using the CPU.
2203 */
d38ceaf9
AD
2204 amdgpu_bo_evict_vram(adev);
2205
2206 pci_save_state(dev->pdev);
2207 if (suspend) {
2208 /* Shut down the device */
2209 pci_disable_device(dev->pdev);
2210 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2211 } else {
2212 r = amdgpu_asic_reset(adev);
2213 if (r)
2214 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2215 }
2216
2217 if (fbcon) {
2218 console_lock();
2219 amdgpu_fbdev_set_suspend(adev, 1);
2220 console_unlock();
2221 }
2222 return 0;
2223}
2224
2225/**
810ddc3a 2226 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2227 *
2228 * @pdev: drm dev pointer
2229 *
2230 * Bring the hw back to operating state (all asics).
2231 * Returns 0 for success or an error on failure.
2232 * Called at driver resume.
2233 */
810ddc3a 2234int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2235{
2236 struct drm_connector *connector;
2237 struct amdgpu_device *adev = dev->dev_private;
756e6880 2238 struct drm_crtc *crtc;
03161a6e 2239 int r = 0;
d38ceaf9
AD
2240
2241 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2242 return 0;
2243
74b0b157 2244 if (fbcon)
d38ceaf9 2245 console_lock();
74b0b157 2246
d38ceaf9
AD
2247 if (resume) {
2248 pci_set_power_state(dev->pdev, PCI_D0);
2249 pci_restore_state(dev->pdev);
74b0b157 2250 r = pci_enable_device(dev->pdev);
03161a6e
HR
2251 if (r)
2252 goto unlock;
d38ceaf9
AD
2253 }
2254
2255 /* post card */
39c640c0 2256 if (amdgpu_device_need_post(adev)) {
74b0b157 2257 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2258 if (r)
2259 DRM_ERROR("amdgpu asic init failed\n");
2260 }
d38ceaf9 2261
06ec9070 2262 r = amdgpu_device_ip_resume(adev);
e6707218 2263 if (r) {
06ec9070 2264 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
03161a6e 2265 goto unlock;
e6707218 2266 }
5ceb54c6
AD
2267 amdgpu_fence_driver_resume(adev);
2268
ca198528
FC
2269 if (resume) {
2270 r = amdgpu_ib_ring_tests(adev);
2271 if (r)
2272 DRM_ERROR("ib ring test failed (%d).\n", r);
2273 }
d38ceaf9 2274
06ec9070 2275 r = amdgpu_device_ip_late_init(adev);
03161a6e
HR
2276 if (r)
2277 goto unlock;
d38ceaf9 2278
756e6880
AD
2279 /* pin cursors */
2280 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2281 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2282
2283 if (amdgpu_crtc->cursor_bo) {
2284 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2285 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2286 if (r == 0) {
2287 r = amdgpu_bo_pin(aobj,
2288 AMDGPU_GEM_DOMAIN_VRAM,
2289 &amdgpu_crtc->cursor_addr);
2290 if (r != 0)
2291 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2292 amdgpu_bo_unreserve(aobj);
2293 }
2294 }
2295 }
ba997709
YZ
2296 r = amdgpu_amdkfd_resume(adev);
2297 if (r)
2298 return r;
756e6880 2299
d38ceaf9
AD
2300 /* blat the mode back in */
2301 if (fbcon) {
4562236b
HW
2302 if (!amdgpu_device_has_dc_support(adev)) {
2303 /* pre DCE11 */
2304 drm_helper_resume_force_mode(dev);
2305
2306 /* turn on display hw */
2307 drm_modeset_lock_all(dev);
2308 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2309 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2310 }
2311 drm_modeset_unlock_all(dev);
2312 } else {
2313 /*
2314 * There is no equivalent atomic helper to turn on
2315 * display, so we defined our own function for this,
2316 * once suspend resume is supported by the atomic
2317 * framework this will be reworked
2318 */
2319 amdgpu_dm_display_resume(adev);
d38ceaf9
AD
2320 }
2321 }
2322
2323 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2324
2325 /*
2326 * Most of the connector probing functions try to acquire runtime pm
2327 * refs to ensure that the GPU is powered on when connector polling is
2328 * performed. Since we're calling this from a runtime PM callback,
2329 * trying to acquire rpm refs will cause us to deadlock.
2330 *
2331 * Since we're guaranteed to be holding the rpm lock, it's safe to
2332 * temporarily disable the rpm helpers so this doesn't deadlock us.
2333 */
2334#ifdef CONFIG_PM
2335 dev->dev->power.disable_depth++;
2336#endif
4562236b
HW
2337 if (!amdgpu_device_has_dc_support(adev))
2338 drm_helper_hpd_irq_event(dev);
2339 else
2340 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2341#ifdef CONFIG_PM
2342 dev->dev->power.disable_depth--;
2343#endif
d38ceaf9 2344
03161a6e 2345 if (fbcon)
d38ceaf9 2346 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2347
2348unlock:
2349 if (fbcon)
d38ceaf9 2350 console_unlock();
d38ceaf9 2351
03161a6e 2352 return r;
d38ceaf9
AD
2353}
2354
06ec9070 2355static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
63fbf42f
CZ
2356{
2357 int i;
2358 bool asic_hang = false;
2359
f993d628
ML
2360 if (amdgpu_sriov_vf(adev))
2361 return true;
2362
63fbf42f 2363 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2364 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2365 continue;
a1255107
AD
2366 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2367 adev->ip_blocks[i].status.hang =
2368 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2369 if (adev->ip_blocks[i].status.hang) {
2370 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2371 asic_hang = true;
2372 }
2373 }
2374 return asic_hang;
2375}
2376
06ec9070 2377static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2378{
2379 int i, r = 0;
2380
2381 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2382 if (!adev->ip_blocks[i].status.valid)
d31a501e 2383 continue;
a1255107
AD
2384 if (adev->ip_blocks[i].status.hang &&
2385 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2386 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2387 if (r)
2388 return r;
2389 }
2390 }
2391
2392 return 0;
2393}
2394
06ec9070 2395static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
35d782fe 2396{
da146d3b
AD
2397 int i;
2398
2399 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2400 if (!adev->ip_blocks[i].status.valid)
da146d3b 2401 continue;
a1255107
AD
2402 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2403 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2404 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
2405 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2406 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 2407 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2408 DRM_INFO("Some block need full reset!\n");
2409 return true;
2410 }
2411 }
35d782fe
CZ
2412 }
2413 return false;
2414}
2415
06ec9070 2416static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2417{
2418 int i, r = 0;
2419
2420 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2421 if (!adev->ip_blocks[i].status.valid)
35d782fe 2422 continue;
a1255107
AD
2423 if (adev->ip_blocks[i].status.hang &&
2424 adev->ip_blocks[i].version->funcs->soft_reset) {
2425 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2426 if (r)
2427 return r;
2428 }
2429 }
2430
2431 return 0;
2432}
2433
06ec9070 2434static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2435{
2436 int i, r = 0;
2437
2438 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2439 if (!adev->ip_blocks[i].status.valid)
35d782fe 2440 continue;
a1255107
AD
2441 if (adev->ip_blocks[i].status.hang &&
2442 adev->ip_blocks[i].version->funcs->post_soft_reset)
2443 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2444 if (r)
2445 return r;
2446 }
2447
2448 return 0;
2449}
2450
06ec9070
AD
2451static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
2452 struct amdgpu_ring *ring,
2453 struct amdgpu_bo *bo,
2454 struct dma_fence **fence)
53cdccd5
CZ
2455{
2456 uint32_t domain;
2457 int r;
2458
23d2e504
RH
2459 if (!bo->shadow)
2460 return 0;
2461
1d284797 2462 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2463 if (r)
2464 return r;
2465 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2466 /* if bo has been evicted, then no need to recover */
2467 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2468 r = amdgpu_bo_validate(bo->shadow);
2469 if (r) {
2470 DRM_ERROR("bo validate failed!\n");
2471 goto err;
2472 }
2473
23d2e504 2474 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2475 NULL, fence, true);
23d2e504
RH
2476 if (r) {
2477 DRM_ERROR("recover page table failed!\n");
2478 goto err;
2479 }
2480 }
53cdccd5 2481err:
23d2e504
RH
2482 amdgpu_bo_unreserve(bo);
2483 return r;
53cdccd5
CZ
2484}
2485
5740682e 2486/*
06ec9070 2487 * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
a90ad3c2
ML
2488 *
2489 * @adev: amdgpu device pointer
5740682e 2490 * @reset_flags: output param tells caller the reset result
a90ad3c2 2491 *
5740682e
ML
2492 * attempt to do soft-reset or full-reset and reinitialize Asic
2493 * return 0 means successed otherwise failed
2494*/
06ec9070
AD
2495static int amdgpu_device_reset(struct amdgpu_device *adev,
2496 uint64_t* reset_flags)
a90ad3c2 2497{
5740682e
ML
2498 bool need_full_reset, vram_lost = 0;
2499 int r;
a90ad3c2 2500
06ec9070 2501 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
a90ad3c2 2502
5740682e 2503 if (!need_full_reset) {
06ec9070
AD
2504 amdgpu_device_ip_pre_soft_reset(adev);
2505 r = amdgpu_device_ip_soft_reset(adev);
2506 amdgpu_device_ip_post_soft_reset(adev);
2507 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5740682e
ML
2508 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2509 need_full_reset = true;
2510 }
a90ad3c2 2511
5740682e 2512 }
a90ad3c2 2513
5740682e 2514 if (need_full_reset) {
cdd61df6 2515 r = amdgpu_device_ip_suspend(adev);
a90ad3c2 2516
5740682e 2517retry:
5740682e 2518 r = amdgpu_asic_reset(adev);
5740682e
ML
2519 /* post card */
2520 amdgpu_atom_asic_init(adev->mode_info.atom_context);
65781c78 2521
5740682e
ML
2522 if (!r) {
2523 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
06ec9070 2524 r = amdgpu_device_ip_resume_phase1(adev);
5740682e
ML
2525 if (r)
2526 goto out;
65781c78 2527
06ec9070 2528 vram_lost = amdgpu_device_check_vram_lost(adev);
5740682e
ML
2529 if (vram_lost) {
2530 DRM_ERROR("VRAM is lost!\n");
2531 atomic_inc(&adev->vram_lost_counter);
2532 }
2533
c1c7ce8f
CK
2534 r = amdgpu_gtt_mgr_recover(
2535 &adev->mman.bdev.man[TTM_PL_TT]);
5740682e
ML
2536 if (r)
2537 goto out;
2538
06ec9070 2539 r = amdgpu_device_ip_resume_phase2(adev);
5740682e
ML
2540 if (r)
2541 goto out;
2542
2543 if (vram_lost)
06ec9070 2544 amdgpu_device_fill_reset_magic(adev);
65781c78 2545 }
5740682e 2546 }
65781c78 2547
5740682e
ML
2548out:
2549 if (!r) {
2550 amdgpu_irq_gpu_reset_resume_helper(adev);
2551 r = amdgpu_ib_ring_tests(adev);
2552 if (r) {
2553 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
cdd61df6 2554 r = amdgpu_device_ip_suspend(adev);
5740682e
ML
2555 need_full_reset = true;
2556 goto retry;
2557 }
2558 }
65781c78 2559
5740682e
ML
2560 if (reset_flags) {
2561 if (vram_lost)
2562 (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
a90ad3c2 2563
5740682e
ML
2564 if (need_full_reset)
2565 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
65781c78 2566 }
a90ad3c2 2567
5740682e
ML
2568 return r;
2569}
a90ad3c2 2570
5740682e 2571/*
06ec9070 2572 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5740682e
ML
2573 *
2574 * @adev: amdgpu device pointer
2575 * @reset_flags: output param tells caller the reset result
2576 *
2577 * do VF FLR and reinitialize Asic
2578 * return 0 means successed otherwise failed
2579*/
06ec9070
AD
2580static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
2581 uint64_t *reset_flags,
2582 bool from_hypervisor)
5740682e
ML
2583{
2584 int r;
2585
2586 if (from_hypervisor)
2587 r = amdgpu_virt_request_full_gpu(adev, true);
2588 else
2589 r = amdgpu_virt_reset_gpu(adev);
2590 if (r)
2591 return r;
a90ad3c2
ML
2592
2593 /* Resume IP prior to SMC */
06ec9070 2594 r = amdgpu_device_ip_reinit_early_sriov(adev);
5740682e
ML
2595 if (r)
2596 goto error;
a90ad3c2
ML
2597
2598 /* we need recover gart prior to run SMC/CP/SDMA resume */
c1c7ce8f 2599 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
a90ad3c2
ML
2600
2601 /* now we are okay to resume SMC/CP/SDMA */
06ec9070 2602 r = amdgpu_device_ip_reinit_late_sriov(adev);
5740682e
ML
2603 if (r)
2604 goto error;
a90ad3c2
ML
2605
2606 amdgpu_irq_gpu_reset_resume_helper(adev);
5740682e
ML
2607 r = amdgpu_ib_ring_tests(adev);
2608 if (r)
a90ad3c2
ML
2609 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2610
5740682e 2611error:
a90ad3c2
ML
2612 /* release full control of GPU after ib test */
2613 amdgpu_virt_release_full_gpu(adev, true);
2614
5740682e 2615 if (reset_flags) {
75bc6099
ML
2616 if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
2617 (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
2618 atomic_inc(&adev->vram_lost_counter);
2619 }
a90ad3c2 2620
5740682e
ML
2621 /* VF FLR or hotlink reset is always full-reset */
2622 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
a90ad3c2
ML
2623 }
2624
2625 return r;
2626}
2627
d38ceaf9 2628/**
5f152b5e 2629 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
d38ceaf9
AD
2630 *
2631 * @adev: amdgpu device pointer
5740682e 2632 * @job: which job trigger hang
dcebf026 2633 * @force forces reset regardless of amdgpu_gpu_recovery
d38ceaf9 2634 *
5740682e 2635 * Attempt to reset the GPU if it has hung (all asics).
d38ceaf9
AD
2636 * Returns 0 for success or an error on failure.
2637 */
5f152b5e
AD
2638int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
2639 struct amdgpu_job *job, bool force)
d38ceaf9 2640{
4562236b 2641 struct drm_atomic_state *state = NULL;
5740682e
ML
2642 uint64_t reset_flags = 0;
2643 int i, r, resched;
fb140b29 2644
54bc1398 2645 if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
63fbf42f
CZ
2646 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2647 return 0;
2648 }
d38ceaf9 2649
dcebf026
AG
2650 if (!force && (amdgpu_gpu_recovery == 0 ||
2651 (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
2652 DRM_INFO("GPU recovery disabled.\n");
2653 return 0;
2654 }
2655
5740682e
ML
2656 dev_info(adev->dev, "GPU reset begin!\n");
2657
13a752e3 2658 mutex_lock(&adev->lock_reset);
d94aed5a 2659 atomic_inc(&adev->gpu_reset_counter);
13a752e3 2660 adev->in_gpu_reset = 1;
d38ceaf9 2661
a3c47d6b
CZ
2662 /* block TTM */
2663 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
4562236b
HW
2664 /* store modesetting */
2665 if (amdgpu_device_has_dc_support(adev))
2666 state = drm_atomic_helper_suspend(adev->ddev);
a3c47d6b 2667
0875dc9e
CZ
2668 /* block scheduler */
2669 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2670 struct amdgpu_ring *ring = adev->rings[i];
2671
51687759 2672 if (!ring || !ring->sched.thread)
0875dc9e 2673 continue;
5740682e
ML
2674
2675 /* only focus on the ring hit timeout if &job not NULL */
2676 if (job && job->ring->idx != i)
2677 continue;
2678
0875dc9e 2679 kthread_park(ring->sched.thread);
1b1f42d8 2680 drm_sched_hw_job_reset(&ring->sched, &job->base);
5740682e 2681
2f9d4084
ML
2682 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2683 amdgpu_fence_driver_force_completion(ring);
0875dc9e 2684 }
d38ceaf9 2685
5740682e 2686 if (amdgpu_sriov_vf(adev))
06ec9070 2687 r = amdgpu_device_reset_sriov(adev, &reset_flags, job ? false : true);
5740682e 2688 else
06ec9070 2689 r = amdgpu_device_reset(adev, &reset_flags);
35d782fe 2690
d38ceaf9 2691 if (!r) {
5740682e
ML
2692 if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
2693 (reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) {
53cdccd5
CZ
2694 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2695 struct amdgpu_bo *bo, *tmp;
f54d1867 2696 struct dma_fence *fence = NULL, *next = NULL;
53cdccd5
CZ
2697
2698 DRM_INFO("recover vram bo from shadow\n");
2699 mutex_lock(&adev->shadow_list_lock);
2700 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 2701 next = NULL;
06ec9070 2702 amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
53cdccd5 2703 if (fence) {
f54d1867 2704 r = dma_fence_wait(fence, false);
53cdccd5 2705 if (r) {
1d7b17b0 2706 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5
CZ
2707 break;
2708 }
2709 }
1f465087 2710
f54d1867 2711 dma_fence_put(fence);
53cdccd5
CZ
2712 fence = next;
2713 }
2714 mutex_unlock(&adev->shadow_list_lock);
2715 if (fence) {
f54d1867 2716 r = dma_fence_wait(fence, false);
53cdccd5 2717 if (r)
1d7b17b0 2718 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5 2719 }
f54d1867 2720 dma_fence_put(fence);
53cdccd5 2721 }
5740682e 2722
d38ceaf9
AD
2723 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2724 struct amdgpu_ring *ring = adev->rings[i];
51687759
CZ
2725
2726 if (!ring || !ring->sched.thread)
d38ceaf9 2727 continue;
53cdccd5 2728
5740682e
ML
2729 /* only focus on the ring hit timeout if &job not NULL */
2730 if (job && job->ring->idx != i)
2731 continue;
2732
1b1f42d8 2733 drm_sched_job_recovery(&ring->sched);
0875dc9e 2734 kthread_unpark(ring->sched.thread);
d38ceaf9 2735 }
d38ceaf9 2736 } else {
d38ceaf9 2737 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5740682e
ML
2738 struct amdgpu_ring *ring = adev->rings[i];
2739
2740 if (!ring || !ring->sched.thread)
2741 continue;
2742
2743 /* only focus on the ring hit timeout if &job not NULL */
2744 if (job && job->ring->idx != i)
2745 continue;
2746
2747 kthread_unpark(adev->rings[i]->sched.thread);
d38ceaf9
AD
2748 }
2749 }
2750
4562236b 2751 if (amdgpu_device_has_dc_support(adev)) {
5740682e
ML
2752 if (drm_atomic_helper_resume(adev->ddev, state))
2753 dev_info(adev->dev, "drm resume failed:%d\n", r);
4562236b 2754 amdgpu_dm_display_resume(adev);
5740682e 2755 } else {
4562236b 2756 drm_helper_resume_force_mode(adev->ddev);
5740682e 2757 }
d38ceaf9
AD
2758
2759 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
5740682e 2760
89041940 2761 if (r) {
d38ceaf9 2762 /* bad news, how to tell it to userspace ? */
5740682e
ML
2763 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
2764 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
2765 } else {
2766 dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
89041940 2767 }
d38ceaf9 2768
89041940 2769 amdgpu_vf_error_trans_all(adev);
13a752e3
ML
2770 adev->in_gpu_reset = 0;
2771 mutex_unlock(&adev->lock_reset);
d38ceaf9
AD
2772 return r;
2773}
2774
041d9d93 2775void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
d0dd7f0c
AD
2776{
2777 u32 mask;
2778 int ret;
2779
cd474ba0
AD
2780 if (amdgpu_pcie_gen_cap)
2781 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 2782
cd474ba0
AD
2783 if (amdgpu_pcie_lane_cap)
2784 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 2785
cd474ba0
AD
2786 /* covers APUs as well */
2787 if (pci_is_root_bus(adev->pdev->bus)) {
2788 if (adev->pm.pcie_gen_mask == 0)
2789 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2790 if (adev->pm.pcie_mlw_mask == 0)
2791 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 2792 return;
cd474ba0 2793 }
d0dd7f0c 2794
cd474ba0
AD
2795 if (adev->pm.pcie_gen_mask == 0) {
2796 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2797 if (!ret) {
2798 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2799 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2800 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2801
2802 if (mask & DRM_PCIE_SPEED_25)
2803 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2804 if (mask & DRM_PCIE_SPEED_50)
2805 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2806 if (mask & DRM_PCIE_SPEED_80)
2807 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2808 } else {
2809 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2810 }
2811 }
2812 if (adev->pm.pcie_mlw_mask == 0) {
2813 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2814 if (!ret) {
2815 switch (mask) {
2816 case 32:
2817 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2818 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2819 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2820 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2821 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2822 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2823 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2824 break;
2825 case 16:
2826 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2827 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2828 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2829 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2830 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2831 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2832 break;
2833 case 12:
2834 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2835 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2836 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2837 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2838 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2839 break;
2840 case 8:
2841 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2842 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2843 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2844 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2845 break;
2846 case 4:
2847 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2848 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2849 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2850 break;
2851 case 2:
2852 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2853 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2854 break;
2855 case 1:
2856 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2857 break;
2858 default:
2859 break;
2860 }
2861 } else {
2862 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
2863 }
2864 }
2865}
d38ceaf9 2866