drm/amdgpu: rename ip block helper functions
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
d38ceaf9
AD
31#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
4562236b 33#include <drm/drm_atomic_helper.h>
d38ceaf9
AD
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
f4b373f4 39#include "amdgpu_trace.h"
d38ceaf9
AD
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
a5bde2f9 43#include "amdgpu_atomfirmware.h"
d0dd7f0c 44#include "amd_pcie.h"
33f34802
KW
45#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
a2e73f56
AD
48#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
aaa36a97 51#include "vi.h"
460826e6 52#include "soc15.h"
d38ceaf9 53#include "bif/bif_4_1_d.h"
9accf2fd 54#include <linux/pci.h>
bec86378 55#include <linux/firmware.h>
89041940 56#include "amdgpu_vf_error.h"
d38ceaf9 57
ba997709 58#include "amdgpu_amdkfd.h"
d2f52ac8 59#include "amdgpu_pm.h"
d38ceaf9 60
e2a75f88 61MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
2d2e5e7e 62MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
e2a75f88 63
2dc80b00
S
64#define AMDGPU_RESUME_MS 2000
65
d38ceaf9 66static const char *amdgpu_asic_name[] = {
da69c161
KW
67 "TAHITI",
68 "PITCAIRN",
69 "VERDE",
70 "OLAND",
71 "HAINAN",
d38ceaf9
AD
72 "BONAIRE",
73 "KAVERI",
74 "KABINI",
75 "HAWAII",
76 "MULLINS",
77 "TOPAZ",
78 "TONGA",
48299f95 79 "FIJI",
d38ceaf9 80 "CARRIZO",
139f4917 81 "STONEY",
2cc0c0b5
FC
82 "POLARIS10",
83 "POLARIS11",
c4642a47 84 "POLARIS12",
d4196f01 85 "VEGA10",
2ca8a5d2 86 "RAVEN",
d38ceaf9
AD
87 "LAST",
88};
89
90bool amdgpu_device_is_px(struct drm_device *dev)
91{
92 struct amdgpu_device *adev = dev->dev_private;
93
2f7d10b3 94 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
95 return true;
96 return false;
97}
98
99/*
100 * MMIO register access helper functions.
101 */
102uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 103 uint32_t acc_flags)
d38ceaf9 104{
f4b373f4
TSD
105 uint32_t ret;
106
43ca8efa 107 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 108 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 109
15d72fd7 110 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 111 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
112 else {
113 unsigned long flags;
d38ceaf9
AD
114
115 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
116 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
117 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
118 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 119 }
f4b373f4
TSD
120 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
121 return ret;
d38ceaf9
AD
122}
123
124void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 125 uint32_t acc_flags)
d38ceaf9 126{
f4b373f4 127 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 128
47ed4e1c
KW
129 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
130 adev->last_mm_index = v;
131 }
132
43ca8efa 133 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 134 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 135
15d72fd7 136 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
137 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
138 else {
139 unsigned long flags;
140
141 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
142 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
143 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
144 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
145 }
47ed4e1c
KW
146
147 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
148 udelay(500);
149 }
d38ceaf9
AD
150}
151
152u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
153{
154 if ((reg * 4) < adev->rio_mem_size)
155 return ioread32(adev->rio_mem + (reg * 4));
156 else {
157 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
158 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
159 }
160}
161
162void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
163{
47ed4e1c
KW
164 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
165 adev->last_mm_index = v;
166 }
d38ceaf9
AD
167
168 if ((reg * 4) < adev->rio_mem_size)
169 iowrite32(v, adev->rio_mem + (reg * 4));
170 else {
171 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
172 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
173 }
47ed4e1c
KW
174
175 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
176 udelay(500);
177 }
d38ceaf9
AD
178}
179
180/**
181 * amdgpu_mm_rdoorbell - read a doorbell dword
182 *
183 * @adev: amdgpu_device pointer
184 * @index: doorbell index
185 *
186 * Returns the value in the doorbell aperture at the
187 * requested doorbell index (CIK).
188 */
189u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
190{
191 if (index < adev->doorbell.num_doorbells) {
192 return readl(adev->doorbell.ptr + index);
193 } else {
194 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
195 return 0;
196 }
197}
198
199/**
200 * amdgpu_mm_wdoorbell - write a doorbell dword
201 *
202 * @adev: amdgpu_device pointer
203 * @index: doorbell index
204 * @v: value to write
205 *
206 * Writes @v to the doorbell aperture at the
207 * requested doorbell index (CIK).
208 */
209void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
210{
211 if (index < adev->doorbell.num_doorbells) {
212 writel(v, adev->doorbell.ptr + index);
213 } else {
214 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
215 }
216}
217
832be404
KW
218/**
219 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
220 *
221 * @adev: amdgpu_device pointer
222 * @index: doorbell index
223 *
224 * Returns the value in the doorbell aperture at the
225 * requested doorbell index (VEGA10+).
226 */
227u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
228{
229 if (index < adev->doorbell.num_doorbells) {
230 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
231 } else {
232 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
233 return 0;
234 }
235}
236
237/**
238 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
239 *
240 * @adev: amdgpu_device pointer
241 * @index: doorbell index
242 * @v: value to write
243 *
244 * Writes @v to the doorbell aperture at the
245 * requested doorbell index (VEGA10+).
246 */
247void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
248{
249 if (index < adev->doorbell.num_doorbells) {
250 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
251 } else {
252 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
253 }
254}
255
d38ceaf9
AD
256/**
257 * amdgpu_invalid_rreg - dummy reg read function
258 *
259 * @adev: amdgpu device pointer
260 * @reg: offset of register
261 *
262 * Dummy register read function. Used for register blocks
263 * that certain asics don't have (all asics).
264 * Returns the value in the register.
265 */
266static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
267{
268 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
269 BUG();
270 return 0;
271}
272
273/**
274 * amdgpu_invalid_wreg - dummy reg write function
275 *
276 * @adev: amdgpu device pointer
277 * @reg: offset of register
278 * @v: value to write to the register
279 *
280 * Dummy register read function. Used for register blocks
281 * that certain asics don't have (all asics).
282 */
283static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
284{
285 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
286 reg, v);
287 BUG();
288}
289
290/**
291 * amdgpu_block_invalid_rreg - dummy reg read function
292 *
293 * @adev: amdgpu device pointer
294 * @block: offset of instance
295 * @reg: offset of register
296 *
297 * Dummy register read function. Used for register blocks
298 * that certain asics don't have (all asics).
299 * Returns the value in the register.
300 */
301static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
302 uint32_t block, uint32_t reg)
303{
304 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
305 reg, block);
306 BUG();
307 return 0;
308}
309
310/**
311 * amdgpu_block_invalid_wreg - dummy reg write function
312 *
313 * @adev: amdgpu device pointer
314 * @block: offset of instance
315 * @reg: offset of register
316 * @v: value to write to the register
317 *
318 * Dummy register read function. Used for register blocks
319 * that certain asics don't have (all asics).
320 */
321static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
322 uint32_t block,
323 uint32_t reg, uint32_t v)
324{
325 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
326 reg, block, v);
327 BUG();
328}
329
06ec9070 330static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
d38ceaf9 331{
a4a02777
CK
332 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
333 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
334 &adev->vram_scratch.robj,
335 &adev->vram_scratch.gpu_addr,
336 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
337}
338
06ec9070 339static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
d38ceaf9 340{
078af1a3 341 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
342}
343
344/**
9c3f2b54 345 * amdgpu_device_program_register_sequence - program an array of registers.
d38ceaf9
AD
346 *
347 * @adev: amdgpu_device pointer
348 * @registers: pointer to the register array
349 * @array_size: size of the register array
350 *
351 * Programs an array or registers with and and or masks.
352 * This is a helper for setting golden registers.
353 */
9c3f2b54
AD
354void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
355 const u32 *registers,
356 const u32 array_size)
d38ceaf9
AD
357{
358 u32 tmp, reg, and_mask, or_mask;
359 int i;
360
361 if (array_size % 3)
362 return;
363
364 for (i = 0; i < array_size; i +=3) {
365 reg = registers[i + 0];
366 and_mask = registers[i + 1];
367 or_mask = registers[i + 2];
368
369 if (and_mask == 0xffffffff) {
370 tmp = or_mask;
371 } else {
372 tmp = RREG32(reg);
373 tmp &= ~and_mask;
374 tmp |= or_mask;
375 }
376 WREG32(reg, tmp);
377 }
378}
379
8111c387 380void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
d38ceaf9
AD
381{
382 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
383}
384
385/*
386 * GPU doorbell aperture helpers function.
387 */
388/**
06ec9070 389 * amdgpu_device_doorbell_init - Init doorbell driver information.
d38ceaf9
AD
390 *
391 * @adev: amdgpu_device pointer
392 *
393 * Init doorbell driver information (CIK)
394 * Returns 0 on success, error on failure.
395 */
06ec9070 396static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
d38ceaf9 397{
705e519e
CK
398 /* No doorbell on SI hardware generation */
399 if (adev->asic_type < CHIP_BONAIRE) {
400 adev->doorbell.base = 0;
401 adev->doorbell.size = 0;
402 adev->doorbell.num_doorbells = 0;
403 adev->doorbell.ptr = NULL;
404 return 0;
405 }
406
d6895ad3
CK
407 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
408 return -EINVAL;
409
d38ceaf9
AD
410 /* doorbell bar mapping */
411 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
412 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
413
edf600da 414 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
415 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
416 if (adev->doorbell.num_doorbells == 0)
417 return -EINVAL;
418
8972e5d2
CK
419 adev->doorbell.ptr = ioremap(adev->doorbell.base,
420 adev->doorbell.num_doorbells *
421 sizeof(u32));
422 if (adev->doorbell.ptr == NULL)
d38ceaf9 423 return -ENOMEM;
d38ceaf9
AD
424
425 return 0;
426}
427
428/**
06ec9070 429 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
d38ceaf9
AD
430 *
431 * @adev: amdgpu_device pointer
432 *
433 * Tear down doorbell driver information (CIK)
434 */
06ec9070 435static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
d38ceaf9
AD
436{
437 iounmap(adev->doorbell.ptr);
438 adev->doorbell.ptr = NULL;
439}
440
22cb0164 441
d38ceaf9
AD
442
443/*
06ec9070 444 * amdgpu_device_wb_*()
455a7bc2 445 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 446 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
447 */
448
449/**
06ec9070 450 * amdgpu_device_wb_fini - Disable Writeback and free memory
d38ceaf9
AD
451 *
452 * @adev: amdgpu_device pointer
453 *
454 * Disables Writeback and frees the Writeback memory (all asics).
455 * Used at driver shutdown.
456 */
06ec9070 457static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
d38ceaf9
AD
458{
459 if (adev->wb.wb_obj) {
a76ed485
AD
460 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
461 &adev->wb.gpu_addr,
462 (void **)&adev->wb.wb);
d38ceaf9
AD
463 adev->wb.wb_obj = NULL;
464 }
465}
466
467/**
06ec9070 468 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
d38ceaf9
AD
469 *
470 * @adev: amdgpu_device pointer
471 *
455a7bc2 472 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
473 * Used at driver startup.
474 * Returns 0 on success or an -error on failure.
475 */
06ec9070 476static int amdgpu_device_wb_init(struct amdgpu_device *adev)
d38ceaf9
AD
477{
478 int r;
479
480 if (adev->wb.wb_obj == NULL) {
97407b63
AD
481 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
482 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
483 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
484 &adev->wb.wb_obj, &adev->wb.gpu_addr,
485 (void **)&adev->wb.wb);
d38ceaf9
AD
486 if (r) {
487 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
488 return r;
489 }
d38ceaf9
AD
490
491 adev->wb.num_wb = AMDGPU_MAX_WB;
492 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
493
494 /* clear wb memory */
60a970a6 495 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
d38ceaf9
AD
496 }
497
498 return 0;
499}
500
501/**
131b4b36 502 * amdgpu_device_wb_get - Allocate a wb entry
d38ceaf9
AD
503 *
504 * @adev: amdgpu_device pointer
505 * @wb: wb index
506 *
507 * Allocate a wb slot for use by the driver (all asics).
508 * Returns 0 on success or -EINVAL on failure.
509 */
131b4b36 510int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
d38ceaf9
AD
511{
512 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 513
97407b63 514 if (offset < adev->wb.num_wb) {
7014285a 515 __set_bit(offset, adev->wb.used);
63ae07ca 516 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
517 return 0;
518 } else {
519 return -EINVAL;
520 }
521}
522
d38ceaf9 523/**
131b4b36 524 * amdgpu_device_wb_free - Free a wb entry
d38ceaf9
AD
525 *
526 * @adev: amdgpu_device pointer
527 * @wb: wb index
528 *
529 * Free a wb slot allocated for use by the driver (all asics)
530 */
131b4b36 531void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
d38ceaf9
AD
532{
533 if (wb < adev->wb.num_wb)
63ae07ca 534 __clear_bit(wb >> 3, adev->wb.used);
d38ceaf9
AD
535}
536
537/**
2543e28a 538 * amdgpu_device_vram_location - try to find VRAM location
d38ceaf9
AD
539 * @adev: amdgpu device structure holding all necessary informations
540 * @mc: memory controller structure holding memory informations
541 * @base: base address at which to put VRAM
542 *
455a7bc2 543 * Function will try to place VRAM at base address provided
3d647c8f 544 * as parameter.
d38ceaf9 545 */
2543e28a
AD
546void amdgpu_device_vram_location(struct amdgpu_device *adev,
547 struct amdgpu_mc *mc, u64 base)
d38ceaf9
AD
548{
549 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
550
551 mc->vram_start = base;
d38ceaf9
AD
552 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
553 if (limit && limit < mc->real_vram_size)
554 mc->real_vram_size = limit;
555 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
556 mc->mc_vram_size >> 20, mc->vram_start,
557 mc->vram_end, mc->real_vram_size >> 20);
558}
559
560/**
2543e28a 561 * amdgpu_device_gart_location - try to find GTT location
d38ceaf9
AD
562 * @adev: amdgpu device structure holding all necessary informations
563 * @mc: memory controller structure holding memory informations
564 *
565 * Function will place try to place GTT before or after VRAM.
566 *
567 * If GTT size is bigger than space left then we ajust GTT size.
568 * Thus function will never fails.
569 *
570 * FIXME: when reducing GTT size align new size on power of 2.
571 */
2543e28a
AD
572void amdgpu_device_gart_location(struct amdgpu_device *adev,
573 struct amdgpu_mc *mc)
d38ceaf9
AD
574{
575 u64 size_af, size_bf;
576
ed21c047
CK
577 size_af = adev->mc.mc_mask - mc->vram_end;
578 size_bf = mc->vram_start;
d38ceaf9 579 if (size_bf > size_af) {
6f02a696 580 if (mc->gart_size > size_bf) {
d38ceaf9 581 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 582 mc->gart_size = size_bf;
d38ceaf9 583 }
6f02a696 584 mc->gart_start = 0;
d38ceaf9 585 } else {
6f02a696 586 if (mc->gart_size > size_af) {
d38ceaf9 587 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 588 mc->gart_size = size_af;
d38ceaf9 589 }
b98f1b9e
CK
590 /* VCE doesn't like it when BOs cross a 4GB segment, so align
591 * the GART base on a 4GB boundary as well.
592 */
593 mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
d38ceaf9 594 }
6f02a696 595 mc->gart_end = mc->gart_start + mc->gart_size - 1;
d38ceaf9 596 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6f02a696 597 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
d38ceaf9
AD
598}
599
d6895ad3
CK
600/**
601 * amdgpu_device_resize_fb_bar - try to resize FB BAR
602 *
603 * @adev: amdgpu_device pointer
604 *
605 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
606 * to fail, but if any of the BARs is not accessible after the size we abort
607 * driver loading by returning -ENODEV.
608 */
609int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
610{
611 u64 space_needed = roundup_pow_of_two(adev->mc.real_vram_size);
612 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
31b8adab
CK
613 struct pci_bus *root;
614 struct resource *res;
615 unsigned i;
d6895ad3
CK
616 u16 cmd;
617 int r;
618
0c03b912 619 /* Bypass for VF */
620 if (amdgpu_sriov_vf(adev))
621 return 0;
622
31b8adab
CK
623 /* Check if the root BUS has 64bit memory resources */
624 root = adev->pdev->bus;
625 while (root->parent)
626 root = root->parent;
627
628 pci_bus_for_each_resource(root, res, i) {
629 if (res && res->flags & IORESOURCE_MEM_64 &&
630 res->start > 0x100000000ull)
631 break;
632 }
633
634 /* Trying to resize is pointless without a root hub window above 4GB */
635 if (!res)
636 return 0;
637
d6895ad3
CK
638 /* Disable memory decoding while we change the BAR addresses and size */
639 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
640 pci_write_config_word(adev->pdev, PCI_COMMAND,
641 cmd & ~PCI_COMMAND_MEMORY);
642
643 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
06ec9070 644 amdgpu_device_doorbell_fini(adev);
d6895ad3
CK
645 if (adev->asic_type >= CHIP_BONAIRE)
646 pci_release_resource(adev->pdev, 2);
647
648 pci_release_resource(adev->pdev, 0);
649
650 r = pci_resize_resource(adev->pdev, 0, rbar_size);
651 if (r == -ENOSPC)
652 DRM_INFO("Not enough PCI address space for a large BAR.");
653 else if (r && r != -ENOTSUPP)
654 DRM_ERROR("Problem resizing BAR0 (%d).", r);
655
656 pci_assign_unassigned_bus_resources(adev->pdev->bus);
657
658 /* When the doorbell or fb BAR isn't available we have no chance of
659 * using the device.
660 */
06ec9070 661 r = amdgpu_device_doorbell_init(adev);
d6895ad3
CK
662 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
663 return -ENODEV;
664
665 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
666
667 return 0;
668}
a05502e5 669
d38ceaf9
AD
670/*
671 * GPU helpers function.
672 */
673/**
c836fec5 674 * amdgpu_need_post - check if the hw need post or not
d38ceaf9
AD
675 *
676 * @adev: amdgpu_device pointer
677 *
c836fec5
JQ
678 * Check if the asic has been initialized (all asics) at driver startup
679 * or post is needed if hw reset is performed.
680 * Returns true if need or false if not.
d38ceaf9 681 */
c836fec5 682bool amdgpu_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
683{
684 uint32_t reg;
685
bec86378
ML
686 if (amdgpu_sriov_vf(adev))
687 return false;
688
689 if (amdgpu_passthrough(adev)) {
1da2c326
ML
690 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
691 * some old smc fw still need driver do vPost otherwise gpu hang, while
692 * those smc fw version above 22.15 doesn't have this flaw, so we force
693 * vpost executed for smc version below 22.15
bec86378
ML
694 */
695 if (adev->asic_type == CHIP_FIJI) {
696 int err;
697 uint32_t fw_ver;
698 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
699 /* force vPost if error occured */
700 if (err)
701 return true;
702
703 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
704 if (fw_ver < 0x00160e00)
705 return true;
bec86378 706 }
bec86378 707 }
91fe77eb 708
709 if (adev->has_hw_reset) {
710 adev->has_hw_reset = false;
711 return true;
712 }
713
714 /* bios scratch used on CIK+ */
715 if (adev->asic_type >= CHIP_BONAIRE)
716 return amdgpu_atombios_scratch_need_asic_init(adev);
717
718 /* check MEM_SIZE for older asics */
719 reg = amdgpu_asic_get_config_memsize(adev);
720
721 if ((reg != 0) && (reg != 0xffffffff))
722 return false;
723
724 return true;
bec86378
ML
725}
726
d38ceaf9
AD
727/**
728 * amdgpu_dummy_page_init - init dummy page used by the driver
729 *
730 * @adev: amdgpu_device pointer
731 *
732 * Allocate the dummy page used by the driver (all asics).
733 * This dummy page is used by the driver as a filler for gart entries
734 * when pages are taken out of the GART
735 * Returns 0 on sucess, -ENOMEM on failure.
736 */
737int amdgpu_dummy_page_init(struct amdgpu_device *adev)
738{
739 if (adev->dummy_page.page)
740 return 0;
741 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
742 if (adev->dummy_page.page == NULL)
743 return -ENOMEM;
744 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
745 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
746 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
747 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
748 __free_page(adev->dummy_page.page);
749 adev->dummy_page.page = NULL;
750 return -ENOMEM;
751 }
752 return 0;
753}
754
755/**
756 * amdgpu_dummy_page_fini - free dummy page used by the driver
757 *
758 * @adev: amdgpu_device pointer
759 *
760 * Frees the dummy page used by the driver (all asics).
761 */
762void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
763{
764 if (adev->dummy_page.page == NULL)
765 return;
766 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
767 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
768 __free_page(adev->dummy_page.page);
769 adev->dummy_page.page = NULL;
770}
771
d38ceaf9
AD
772/* if we get transitioned to only one device, take VGA back */
773/**
06ec9070 774 * amdgpu_device_vga_set_decode - enable/disable vga decode
d38ceaf9
AD
775 *
776 * @cookie: amdgpu_device pointer
777 * @state: enable/disable vga decode
778 *
779 * Enable/disable vga decode (all asics).
780 * Returns VGA resource flags.
781 */
06ec9070 782static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
d38ceaf9
AD
783{
784 struct amdgpu_device *adev = cookie;
785 amdgpu_asic_set_vga_state(adev, state);
786 if (state)
787 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
788 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
789 else
790 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
791}
792
06ec9070 793static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
794{
795 /* defines number of bits in page table versus page directory,
796 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
797 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
798 if (amdgpu_vm_block_size == -1)
799 return;
a1adf8be 800
bab4fee7 801 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
802 dev_warn(adev->dev, "VM page table size (%d) too small\n",
803 amdgpu_vm_block_size);
97489129 804 amdgpu_vm_block_size = -1;
a1adf8be 805 }
a1adf8be
CZ
806}
807
06ec9070 808static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
83ca145d 809{
64dab074
AD
810 /* no need to check the default value */
811 if (amdgpu_vm_size == -1)
812 return;
813
83ca145d
ZJ
814 if (amdgpu_vm_size < 1) {
815 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
816 amdgpu_vm_size);
f3368128 817 amdgpu_vm_size = -1;
83ca145d 818 }
83ca145d
ZJ
819}
820
d38ceaf9 821/**
06ec9070 822 * amdgpu_device_check_arguments - validate module params
d38ceaf9
AD
823 *
824 * @adev: amdgpu_device pointer
825 *
826 * Validates certain module parameters and updates
827 * the associated values used by the driver (all asics).
828 */
06ec9070 829static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
d38ceaf9 830{
5b011235
CZ
831 if (amdgpu_sched_jobs < 4) {
832 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
833 amdgpu_sched_jobs);
834 amdgpu_sched_jobs = 4;
76117507 835 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
836 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
837 amdgpu_sched_jobs);
838 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
839 }
d38ceaf9 840
83e74db6 841 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
842 /* gart size must be greater or equal to 32M */
843 dev_warn(adev->dev, "gart size (%d) too small\n",
844 amdgpu_gart_size);
83e74db6 845 amdgpu_gart_size = -1;
d38ceaf9
AD
846 }
847
36d38372 848 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 849 /* gtt size must be greater or equal to 32M */
36d38372
CK
850 dev_warn(adev->dev, "gtt size (%d) too small\n",
851 amdgpu_gtt_size);
852 amdgpu_gtt_size = -1;
d38ceaf9
AD
853 }
854
d07f14be
RH
855 /* valid range is between 4 and 9 inclusive */
856 if (amdgpu_vm_fragment_size != -1 &&
857 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
858 dev_warn(adev->dev, "valid range is between 4 and 9\n");
859 amdgpu_vm_fragment_size = -1;
860 }
861
06ec9070 862 amdgpu_device_check_vm_size(adev);
d38ceaf9 863
06ec9070 864 amdgpu_device_check_block_size(adev);
6a7f76e7 865
526bae37 866 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 867 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
868 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
869 amdgpu_vram_page_split);
870 amdgpu_vram_page_split = 1024;
871 }
8854695a
AG
872
873 if (amdgpu_lockup_timeout == 0) {
874 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
875 amdgpu_lockup_timeout = 10000;
876 }
d38ceaf9
AD
877}
878
879/**
880 * amdgpu_switcheroo_set_state - set switcheroo state
881 *
882 * @pdev: pci dev pointer
1694467b 883 * @state: vga_switcheroo state
d38ceaf9
AD
884 *
885 * Callback for the switcheroo driver. Suspends or resumes the
886 * the asics before or after it is powered up using ACPI methods.
887 */
888static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
889{
890 struct drm_device *dev = pci_get_drvdata(pdev);
891
892 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
893 return;
894
895 if (state == VGA_SWITCHEROO_ON) {
7ca85295 896 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
897 /* don't suspend or resume card normally */
898 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
899
810ddc3a 900 amdgpu_device_resume(dev, true, true);
d38ceaf9 901
d38ceaf9
AD
902 dev->switch_power_state = DRM_SWITCH_POWER_ON;
903 drm_kms_helper_poll_enable(dev);
904 } else {
7ca85295 905 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
906 drm_kms_helper_poll_disable(dev);
907 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 908 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
909 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
910 }
911}
912
913/**
914 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
915 *
916 * @pdev: pci dev pointer
917 *
918 * Callback for the switcheroo driver. Check of the switcheroo
919 * state can be changed.
920 * Returns true if the state can be changed, false if not.
921 */
922static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
923{
924 struct drm_device *dev = pci_get_drvdata(pdev);
925
926 /*
927 * FIXME: open_count is protected by drm_global_mutex but that would lead to
928 * locking inversion with the driver load path. And the access here is
929 * completely racy anyway. So don't bother with locking for now.
930 */
931 return dev->open_count == 0;
932}
933
934static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
935 .set_gpu_state = amdgpu_switcheroo_set_state,
936 .reprobe = NULL,
937 .can_switch = amdgpu_switcheroo_can_switch,
938};
939
2990a1fc
AD
940int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
941 enum amd_ip_block_type block_type,
942 enum amd_clockgating_state state)
d38ceaf9
AD
943{
944 int i, r = 0;
945
946 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 947 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 948 continue;
c722865a
RZ
949 if (adev->ip_blocks[i].version->type != block_type)
950 continue;
951 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
952 continue;
953 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
954 (void *)adev, state);
955 if (r)
956 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
957 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
958 }
959 return r;
960}
961
2990a1fc
AD
962int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
963 enum amd_ip_block_type block_type,
964 enum amd_powergating_state state)
d38ceaf9
AD
965{
966 int i, r = 0;
967
968 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 969 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 970 continue;
c722865a
RZ
971 if (adev->ip_blocks[i].version->type != block_type)
972 continue;
973 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
974 continue;
975 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
976 (void *)adev, state);
977 if (r)
978 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
979 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
980 }
981 return r;
982}
983
2990a1fc
AD
984void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
985 u32 *flags)
6cb2d4e4
HR
986{
987 int i;
988
989 for (i = 0; i < adev->num_ip_blocks; i++) {
990 if (!adev->ip_blocks[i].status.valid)
991 continue;
992 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
993 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
994 }
995}
996
2990a1fc
AD
997int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
998 enum amd_ip_block_type block_type)
5dbbb60b
AD
999{
1000 int i, r;
1001
1002 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1003 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1004 continue;
a1255107
AD
1005 if (adev->ip_blocks[i].version->type == block_type) {
1006 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1007 if (r)
1008 return r;
1009 break;
1010 }
1011 }
1012 return 0;
1013
1014}
1015
2990a1fc
AD
1016bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1017 enum amd_ip_block_type block_type)
5dbbb60b
AD
1018{
1019 int i;
1020
1021 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1022 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1023 continue;
a1255107
AD
1024 if (adev->ip_blocks[i].version->type == block_type)
1025 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1026 }
1027 return true;
1028
1029}
1030
2990a1fc
AD
1031struct amdgpu_ip_block *
1032amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1033 enum amd_ip_block_type type)
d38ceaf9
AD
1034{
1035 int i;
1036
1037 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1038 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1039 return &adev->ip_blocks[i];
1040
1041 return NULL;
1042}
1043
1044/**
2990a1fc 1045 * amdgpu_device_ip_block_version_cmp
d38ceaf9
AD
1046 *
1047 * @adev: amdgpu_device pointer
5fc3aeeb 1048 * @type: enum amd_ip_block_type
d38ceaf9
AD
1049 * @major: major version
1050 * @minor: minor version
1051 *
1052 * return 0 if equal or greater
1053 * return 1 if smaller or the ip_block doesn't exist
1054 */
2990a1fc
AD
1055int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1056 enum amd_ip_block_type type,
1057 u32 major, u32 minor)
d38ceaf9 1058{
2990a1fc 1059 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
d38ceaf9 1060
a1255107
AD
1061 if (ip_block && ((ip_block->version->major > major) ||
1062 ((ip_block->version->major == major) &&
1063 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1064 return 0;
1065
1066 return 1;
1067}
1068
a1255107 1069/**
2990a1fc 1070 * amdgpu_device_ip_block_add
a1255107
AD
1071 *
1072 * @adev: amdgpu_device pointer
1073 * @ip_block_version: pointer to the IP to add
1074 *
1075 * Adds the IP block driver information to the collection of IPs
1076 * on the asic.
1077 */
2990a1fc
AD
1078int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1079 const struct amdgpu_ip_block_version *ip_block_version)
a1255107
AD
1080{
1081 if (!ip_block_version)
1082 return -EINVAL;
1083
a0bae357
HR
1084 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1085 ip_block_version->funcs->name);
1086
a1255107
AD
1087 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1088
1089 return 0;
1090}
1091
483ef985 1092static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1093{
1094 adev->enable_virtual_display = false;
1095
1096 if (amdgpu_virtual_display) {
1097 struct drm_device *ddev = adev->ddev;
1098 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1099 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1100
1101 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1102 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1103 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1104 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1105 if (!strcmp("all", pciaddname)
1106 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1107 long num_crtc;
1108 int res = -1;
1109
9accf2fd 1110 adev->enable_virtual_display = true;
0f66356d
ED
1111
1112 if (pciaddname_tmp)
1113 res = kstrtol(pciaddname_tmp, 10,
1114 &num_crtc);
1115
1116 if (!res) {
1117 if (num_crtc < 1)
1118 num_crtc = 1;
1119 if (num_crtc > 6)
1120 num_crtc = 6;
1121 adev->mode_info.num_crtc = num_crtc;
1122 } else {
1123 adev->mode_info.num_crtc = 1;
1124 }
9accf2fd
ED
1125 break;
1126 }
1127 }
1128
0f66356d
ED
1129 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1130 amdgpu_virtual_display, pci_address_name,
1131 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1132
1133 kfree(pciaddstr);
1134 }
1135}
1136
e2a75f88
AD
1137static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1138{
e2a75f88
AD
1139 const char *chip_name;
1140 char fw_name[30];
1141 int err;
1142 const struct gpu_info_firmware_header_v1_0 *hdr;
1143
ab4fe3e1
HR
1144 adev->firmware.gpu_info_fw = NULL;
1145
e2a75f88
AD
1146 switch (adev->asic_type) {
1147 case CHIP_TOPAZ:
1148 case CHIP_TONGA:
1149 case CHIP_FIJI:
1150 case CHIP_POLARIS11:
1151 case CHIP_POLARIS10:
1152 case CHIP_POLARIS12:
1153 case CHIP_CARRIZO:
1154 case CHIP_STONEY:
1155#ifdef CONFIG_DRM_AMDGPU_SI
1156 case CHIP_VERDE:
1157 case CHIP_TAHITI:
1158 case CHIP_PITCAIRN:
1159 case CHIP_OLAND:
1160 case CHIP_HAINAN:
1161#endif
1162#ifdef CONFIG_DRM_AMDGPU_CIK
1163 case CHIP_BONAIRE:
1164 case CHIP_HAWAII:
1165 case CHIP_KAVERI:
1166 case CHIP_KABINI:
1167 case CHIP_MULLINS:
1168#endif
1169 default:
1170 return 0;
1171 case CHIP_VEGA10:
1172 chip_name = "vega10";
1173 break;
2d2e5e7e
AD
1174 case CHIP_RAVEN:
1175 chip_name = "raven";
1176 break;
e2a75f88
AD
1177 }
1178
1179 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1180 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1181 if (err) {
1182 dev_err(adev->dev,
1183 "Failed to load gpu_info firmware \"%s\"\n",
1184 fw_name);
1185 goto out;
1186 }
ab4fe3e1 1187 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1188 if (err) {
1189 dev_err(adev->dev,
1190 "Failed to validate gpu_info firmware \"%s\"\n",
1191 fw_name);
1192 goto out;
1193 }
1194
ab4fe3e1 1195 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1196 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1197
1198 switch (hdr->version_major) {
1199 case 1:
1200 {
1201 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1202 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1203 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1204
b5ab16bf
AD
1205 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1206 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1207 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1208 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1209 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1210 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1211 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1212 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1213 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1214 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1215 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1216 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1217 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1218 adev->gfx.cu_info.max_waves_per_simd =
1219 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1220 adev->gfx.cu_info.max_scratch_slots_per_cu =
1221 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1222 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1223 break;
1224 }
1225 default:
1226 dev_err(adev->dev,
1227 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1228 err = -EINVAL;
1229 goto out;
1230 }
1231out:
e2a75f88
AD
1232 return err;
1233}
1234
06ec9070 1235static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
d38ceaf9 1236{
aaa36a97 1237 int i, r;
d38ceaf9 1238
483ef985 1239 amdgpu_device_enable_virtual_display(adev);
a6be7570 1240
d38ceaf9 1241 switch (adev->asic_type) {
aaa36a97
AD
1242 case CHIP_TOPAZ:
1243 case CHIP_TONGA:
48299f95 1244 case CHIP_FIJI:
2cc0c0b5
FC
1245 case CHIP_POLARIS11:
1246 case CHIP_POLARIS10:
c4642a47 1247 case CHIP_POLARIS12:
aaa36a97 1248 case CHIP_CARRIZO:
39bb0c92
SL
1249 case CHIP_STONEY:
1250 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1251 adev->family = AMDGPU_FAMILY_CZ;
1252 else
1253 adev->family = AMDGPU_FAMILY_VI;
1254
1255 r = vi_set_ip_blocks(adev);
1256 if (r)
1257 return r;
1258 break;
33f34802
KW
1259#ifdef CONFIG_DRM_AMDGPU_SI
1260 case CHIP_VERDE:
1261 case CHIP_TAHITI:
1262 case CHIP_PITCAIRN:
1263 case CHIP_OLAND:
1264 case CHIP_HAINAN:
295d0daf 1265 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1266 r = si_set_ip_blocks(adev);
1267 if (r)
1268 return r;
1269 break;
1270#endif
a2e73f56
AD
1271#ifdef CONFIG_DRM_AMDGPU_CIK
1272 case CHIP_BONAIRE:
1273 case CHIP_HAWAII:
1274 case CHIP_KAVERI:
1275 case CHIP_KABINI:
1276 case CHIP_MULLINS:
1277 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1278 adev->family = AMDGPU_FAMILY_CI;
1279 else
1280 adev->family = AMDGPU_FAMILY_KV;
1281
1282 r = cik_set_ip_blocks(adev);
1283 if (r)
1284 return r;
1285 break;
1286#endif
2ca8a5d2
CZ
1287 case CHIP_VEGA10:
1288 case CHIP_RAVEN:
1289 if (adev->asic_type == CHIP_RAVEN)
1290 adev->family = AMDGPU_FAMILY_RV;
1291 else
1292 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1293
1294 r = soc15_set_ip_blocks(adev);
1295 if (r)
1296 return r;
1297 break;
d38ceaf9
AD
1298 default:
1299 /* FIXME: not supported yet */
1300 return -EINVAL;
1301 }
1302
e2a75f88
AD
1303 r = amdgpu_device_parse_gpu_info_fw(adev);
1304 if (r)
1305 return r;
1306
1884734a 1307 amdgpu_amdkfd_device_probe(adev);
1308
3149d9da
XY
1309 if (amdgpu_sriov_vf(adev)) {
1310 r = amdgpu_virt_request_full_gpu(adev, true);
1311 if (r)
5ffa61c1 1312 return -EAGAIN;
3149d9da
XY
1313 }
1314
d38ceaf9
AD
1315 for (i = 0; i < adev->num_ip_blocks; i++) {
1316 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1317 DRM_ERROR("disabled ip block: %d <%s>\n",
1318 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1319 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1320 } else {
a1255107
AD
1321 if (adev->ip_blocks[i].version->funcs->early_init) {
1322 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1323 if (r == -ENOENT) {
a1255107 1324 adev->ip_blocks[i].status.valid = false;
2c1a2784 1325 } else if (r) {
a1255107
AD
1326 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1327 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1328 return r;
2c1a2784 1329 } else {
a1255107 1330 adev->ip_blocks[i].status.valid = true;
2c1a2784 1331 }
974e6b64 1332 } else {
a1255107 1333 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1334 }
d38ceaf9
AD
1335 }
1336 }
1337
395d1fb9
NH
1338 adev->cg_flags &= amdgpu_cg_mask;
1339 adev->pg_flags &= amdgpu_pg_mask;
1340
d38ceaf9
AD
1341 return 0;
1342}
1343
06ec9070 1344static int amdgpu_device_ip_init(struct amdgpu_device *adev)
d38ceaf9
AD
1345{
1346 int i, r;
1347
1348 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1349 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1350 continue;
a1255107 1351 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1352 if (r) {
a1255107
AD
1353 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1354 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1355 return r;
2c1a2784 1356 }
a1255107 1357 adev->ip_blocks[i].status.sw = true;
d38ceaf9 1358 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1359 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
06ec9070 1360 r = amdgpu_device_vram_scratch_init(adev);
2c1a2784
AD
1361 if (r) {
1362 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1363 return r;
2c1a2784 1364 }
a1255107 1365 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1366 if (r) {
1367 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1368 return r;
2c1a2784 1369 }
06ec9070 1370 r = amdgpu_device_wb_init(adev);
2c1a2784 1371 if (r) {
06ec9070 1372 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
d38ceaf9 1373 return r;
2c1a2784 1374 }
a1255107 1375 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1376
1377 /* right after GMC hw init, we create CSA */
1378 if (amdgpu_sriov_vf(adev)) {
1379 r = amdgpu_allocate_static_csa(adev);
1380 if (r) {
1381 DRM_ERROR("allocate CSA failed %d\n", r);
1382 return r;
1383 }
1384 }
d38ceaf9
AD
1385 }
1386 }
1387
1388 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1389 if (!adev->ip_blocks[i].status.sw)
d38ceaf9
AD
1390 continue;
1391 /* gmc hw init is done early */
a1255107 1392 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
d38ceaf9 1393 continue;
a1255107 1394 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1395 if (r) {
a1255107
AD
1396 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1397 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1398 return r;
2c1a2784 1399 }
a1255107 1400 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1401 }
1402
1884734a 1403 amdgpu_amdkfd_device_init(adev);
c6332b97 1404
1405 if (amdgpu_sriov_vf(adev))
1406 amdgpu_virt_release_full_gpu(adev, true);
1407
d38ceaf9
AD
1408 return 0;
1409}
1410
06ec9070 1411static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
0c49e0b8
CZ
1412{
1413 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1414}
1415
06ec9070 1416static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
0c49e0b8
CZ
1417{
1418 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1419 AMDGPU_RESET_MAGIC_NUM);
1420}
1421
06ec9070 1422static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
d38ceaf9
AD
1423{
1424 int i = 0, r;
1425
1426 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1427 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1428 continue;
4a446d55 1429 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1430 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1431 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1432 /* enable clockgating to save power */
a1255107
AD
1433 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1434 AMD_CG_STATE_GATE);
4a446d55
AD
1435 if (r) {
1436 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1437 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1438 return r;
1439 }
b0b00ff1 1440 }
d38ceaf9 1441 }
2dc80b00
S
1442 return 0;
1443}
1444
06ec9070 1445static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2dc80b00
S
1446{
1447 int i = 0, r;
1448
1449 for (i = 0; i < adev->num_ip_blocks; i++) {
1450 if (!adev->ip_blocks[i].status.valid)
1451 continue;
1452 if (adev->ip_blocks[i].version->funcs->late_init) {
1453 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1454 if (r) {
1455 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1456 adev->ip_blocks[i].version->funcs->name, r);
1457 return r;
1458 }
1459 adev->ip_blocks[i].status.late_initialized = true;
1460 }
1461 }
1462
1463 mod_delayed_work(system_wq, &adev->late_init_work,
1464 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1465
06ec9070 1466 amdgpu_device_fill_reset_magic(adev);
d38ceaf9
AD
1467
1468 return 0;
1469}
1470
06ec9070 1471static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
d38ceaf9
AD
1472{
1473 int i, r;
1474
1884734a 1475 amdgpu_amdkfd_device_fini(adev);
3e96dbfd
AD
1476 /* need to disable SMC first */
1477 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1478 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1479 continue;
a1255107 1480 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1481 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1482 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1483 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1484 if (r) {
1485 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1486 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1487 return r;
1488 }
a1255107 1489 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1490 /* XXX handle errors */
1491 if (r) {
1492 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1493 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1494 }
a1255107 1495 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1496 break;
1497 }
1498 }
1499
d38ceaf9 1500 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1501 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1502 continue;
a1255107 1503 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
84e5b516 1504 amdgpu_free_static_csa(adev);
06ec9070
AD
1505 amdgpu_device_wb_fini(adev);
1506 amdgpu_device_vram_scratch_fini(adev);
d38ceaf9 1507 }
8201a67a
RZ
1508
1509 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1510 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1511 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1512 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1513 AMD_CG_STATE_UNGATE);
1514 if (r) {
1515 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1516 adev->ip_blocks[i].version->funcs->name, r);
1517 return r;
1518 }
2c1a2784 1519 }
8201a67a 1520
a1255107 1521 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1522 /* XXX handle errors */
2c1a2784 1523 if (r) {
a1255107
AD
1524 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1525 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1526 }
8201a67a 1527
a1255107 1528 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1529 }
1530
1531 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1532 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1533 continue;
a1255107 1534 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1535 /* XXX handle errors */
2c1a2784 1536 if (r) {
a1255107
AD
1537 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1538 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1539 }
a1255107
AD
1540 adev->ip_blocks[i].status.sw = false;
1541 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1542 }
1543
a6dcfd9c 1544 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1545 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1546 continue;
a1255107
AD
1547 if (adev->ip_blocks[i].version->funcs->late_fini)
1548 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1549 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1550 }
1551
030308fc 1552 if (amdgpu_sriov_vf(adev))
24136135
ML
1553 if (amdgpu_virt_release_full_gpu(adev, false))
1554 DRM_ERROR("failed to release exclusive mode on fini\n");
2493664f 1555
d38ceaf9
AD
1556 return 0;
1557}
1558
06ec9070 1559static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
2dc80b00
S
1560{
1561 struct amdgpu_device *adev =
1562 container_of(work, struct amdgpu_device, late_init_work.work);
06ec9070 1563 amdgpu_device_ip_late_set_cg_state(adev);
2dc80b00
S
1564}
1565
cdd61df6 1566int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1567{
1568 int i, r;
1569
e941ea99
XY
1570 if (amdgpu_sriov_vf(adev))
1571 amdgpu_virt_request_full_gpu(adev, false);
1572
c5a93a28 1573 /* ungate SMC block first */
2990a1fc
AD
1574 r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1575 AMD_CG_STATE_UNGATE);
c5a93a28 1576 if (r) {
2990a1fc 1577 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
c5a93a28
FC
1578 }
1579
d38ceaf9 1580 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1581 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1582 continue;
1583 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1584 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1585 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1586 AMD_CG_STATE_UNGATE);
c5a93a28 1587 if (r) {
a1255107
AD
1588 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1589 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1590 }
2c1a2784 1591 }
d38ceaf9 1592 /* XXX handle errors */
a1255107 1593 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1594 /* XXX handle errors */
2c1a2784 1595 if (r) {
a1255107
AD
1596 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1597 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1598 }
d38ceaf9
AD
1599 }
1600
e941ea99
XY
1601 if (amdgpu_sriov_vf(adev))
1602 amdgpu_virt_release_full_gpu(adev, false);
1603
d38ceaf9
AD
1604 return 0;
1605}
1606
06ec9070 1607static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1608{
1609 int i, r;
1610
2cb681b6
ML
1611 static enum amd_ip_block_type ip_order[] = {
1612 AMD_IP_BLOCK_TYPE_GMC,
1613 AMD_IP_BLOCK_TYPE_COMMON,
2cb681b6
ML
1614 AMD_IP_BLOCK_TYPE_IH,
1615 };
a90ad3c2 1616
2cb681b6
ML
1617 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1618 int j;
1619 struct amdgpu_ip_block *block;
a90ad3c2 1620
2cb681b6
ML
1621 for (j = 0; j < adev->num_ip_blocks; j++) {
1622 block = &adev->ip_blocks[j];
1623
1624 if (block->version->type != ip_order[i] ||
1625 !block->status.valid)
1626 continue;
1627
1628 r = block->version->funcs->hw_init(adev);
1629 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1630 }
1631 }
1632
1633 return 0;
1634}
1635
06ec9070 1636static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1637{
1638 int i, r;
1639
2cb681b6
ML
1640 static enum amd_ip_block_type ip_order[] = {
1641 AMD_IP_BLOCK_TYPE_SMC,
ef4c166d 1642 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
1643 AMD_IP_BLOCK_TYPE_DCE,
1644 AMD_IP_BLOCK_TYPE_GFX,
1645 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
1646 AMD_IP_BLOCK_TYPE_UVD,
1647 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 1648 };
a90ad3c2 1649
2cb681b6
ML
1650 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1651 int j;
1652 struct amdgpu_ip_block *block;
a90ad3c2 1653
2cb681b6
ML
1654 for (j = 0; j < adev->num_ip_blocks; j++) {
1655 block = &adev->ip_blocks[j];
1656
1657 if (block->version->type != ip_order[i] ||
1658 !block->status.valid)
1659 continue;
1660
1661 r = block->version->funcs->hw_init(adev);
1662 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1663 }
1664 }
1665
1666 return 0;
1667}
1668
06ec9070 1669static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
1670{
1671 int i, r;
1672
a90ad3c2
ML
1673 for (i = 0; i < adev->num_ip_blocks; i++) {
1674 if (!adev->ip_blocks[i].status.valid)
1675 continue;
a90ad3c2
ML
1676 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1677 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
fcf0649f
CZ
1678 adev->ip_blocks[i].version->type ==
1679 AMD_IP_BLOCK_TYPE_IH) {
1680 r = adev->ip_blocks[i].version->funcs->resume(adev);
1681 if (r) {
1682 DRM_ERROR("resume of IP block <%s> failed %d\n",
1683 adev->ip_blocks[i].version->funcs->name, r);
1684 return r;
1685 }
a90ad3c2
ML
1686 }
1687 }
1688
1689 return 0;
1690}
1691
06ec9070 1692static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
1693{
1694 int i, r;
1695
1696 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1697 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1698 continue;
fcf0649f
CZ
1699 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1700 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1701 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1702 continue;
a1255107 1703 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 1704 if (r) {
a1255107
AD
1705 DRM_ERROR("resume of IP block <%s> failed %d\n",
1706 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1707 return r;
2c1a2784 1708 }
d38ceaf9
AD
1709 }
1710
1711 return 0;
1712}
1713
06ec9070 1714static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
fcf0649f
CZ
1715{
1716 int r;
1717
06ec9070 1718 r = amdgpu_device_ip_resume_phase1(adev);
fcf0649f
CZ
1719 if (r)
1720 return r;
06ec9070 1721 r = amdgpu_device_ip_resume_phase2(adev);
fcf0649f
CZ
1722
1723 return r;
1724}
1725
4e99a44e 1726static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 1727{
6867e1b5
ML
1728 if (amdgpu_sriov_vf(adev)) {
1729 if (adev->is_atom_fw) {
1730 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1731 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1732 } else {
1733 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1734 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1735 }
1736
1737 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
1738 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 1739 }
048765ad
AR
1740}
1741
4562236b
HW
1742bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
1743{
1744 switch (asic_type) {
1745#if defined(CONFIG_DRM_AMD_DC)
1746 case CHIP_BONAIRE:
1747 case CHIP_HAWAII:
0d6fbccb 1748 case CHIP_KAVERI:
4562236b
HW
1749 case CHIP_CARRIZO:
1750 case CHIP_STONEY:
1751 case CHIP_POLARIS11:
1752 case CHIP_POLARIS10:
2c8ad2d5 1753 case CHIP_POLARIS12:
4562236b
HW
1754 case CHIP_TONGA:
1755 case CHIP_FIJI:
1756#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
1757 return amdgpu_dc != 0;
4562236b 1758#endif
17b7cf8c
AD
1759 case CHIP_KABINI:
1760 case CHIP_MULLINS:
1761 return amdgpu_dc > 0;
42f8ffa1
HW
1762 case CHIP_VEGA10:
1763#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 1764 case CHIP_RAVEN:
42f8ffa1 1765#endif
fd187853 1766 return amdgpu_dc != 0;
4562236b
HW
1767#endif
1768 default:
1769 return false;
1770 }
1771}
1772
1773/**
1774 * amdgpu_device_has_dc_support - check if dc is supported
1775 *
1776 * @adev: amdgpu_device_pointer
1777 *
1778 * Returns true for supported, false for not supported
1779 */
1780bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
1781{
2555039d
XY
1782 if (amdgpu_sriov_vf(adev))
1783 return false;
1784
4562236b
HW
1785 return amdgpu_device_asic_has_dc_support(adev->asic_type);
1786}
1787
d38ceaf9
AD
1788/**
1789 * amdgpu_device_init - initialize the driver
1790 *
1791 * @adev: amdgpu_device pointer
1792 * @pdev: drm dev pointer
1793 * @pdev: pci dev pointer
1794 * @flags: driver flags
1795 *
1796 * Initializes the driver info and hw (all asics).
1797 * Returns 0 for success or an error on failure.
1798 * Called at driver startup.
1799 */
1800int amdgpu_device_init(struct amdgpu_device *adev,
1801 struct drm_device *ddev,
1802 struct pci_dev *pdev,
1803 uint32_t flags)
1804{
1805 int r, i;
1806 bool runtime = false;
95844d20 1807 u32 max_MBps;
d38ceaf9
AD
1808
1809 adev->shutdown = false;
1810 adev->dev = &pdev->dev;
1811 adev->ddev = ddev;
1812 adev->pdev = pdev;
1813 adev->flags = flags;
2f7d10b3 1814 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 1815 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
6f02a696 1816 adev->mc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
1817 adev->accel_working = false;
1818 adev->num_rings = 0;
1819 adev->mman.buffer_funcs = NULL;
1820 adev->mman.buffer_funcs_ring = NULL;
1821 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 1822 adev->vm_manager.vm_pte_num_rings = 0;
d38ceaf9 1823 adev->gart.gart_funcs = NULL;
f54d1867 1824 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 1825 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
1826
1827 adev->smc_rreg = &amdgpu_invalid_rreg;
1828 adev->smc_wreg = &amdgpu_invalid_wreg;
1829 adev->pcie_rreg = &amdgpu_invalid_rreg;
1830 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
1831 adev->pciep_rreg = &amdgpu_invalid_rreg;
1832 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1833 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1834 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1835 adev->didt_rreg = &amdgpu_invalid_rreg;
1836 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
1837 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1838 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1839 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1840 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1841
3e39ab90
AD
1842 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1843 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1844 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
1845
1846 /* mutex initialization are all done here so we
1847 * can recall function without having locking issues */
d38ceaf9 1848 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 1849 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
1850 mutex_init(&adev->pm.mutex);
1851 mutex_init(&adev->gfx.gpu_clock_mutex);
1852 mutex_init(&adev->srbm_mutex);
b8866c26 1853 mutex_init(&adev->gfx.pipe_reserve_mutex);
d38ceaf9 1854 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 1855 mutex_init(&adev->mn_lock);
e23b74aa 1856 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 1857 hash_init(adev->mn_hash);
13a752e3 1858 mutex_init(&adev->lock_reset);
d38ceaf9 1859
06ec9070 1860 amdgpu_device_check_arguments(adev);
d38ceaf9 1861
d38ceaf9
AD
1862 spin_lock_init(&adev->mmio_idx_lock);
1863 spin_lock_init(&adev->smc_idx_lock);
1864 spin_lock_init(&adev->pcie_idx_lock);
1865 spin_lock_init(&adev->uvd_ctx_idx_lock);
1866 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 1867 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 1868 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 1869 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 1870 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 1871
0c4e7fa5
CZ
1872 INIT_LIST_HEAD(&adev->shadow_list);
1873 mutex_init(&adev->shadow_list_lock);
1874
795f2813
AR
1875 INIT_LIST_HEAD(&adev->ring_lru_list);
1876 spin_lock_init(&adev->ring_lru_list_lock);
1877
06ec9070
AD
1878 INIT_DELAYED_WORK(&adev->late_init_work,
1879 amdgpu_device_ip_late_init_func_handler);
2dc80b00 1880
0fa49558
AX
1881 /* Registers mapping */
1882 /* TODO: block userspace mapping of io register */
da69c161
KW
1883 if (adev->asic_type >= CHIP_BONAIRE) {
1884 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1885 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1886 } else {
1887 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1888 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1889 }
d38ceaf9 1890
d38ceaf9
AD
1891 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1892 if (adev->rmmio == NULL) {
1893 return -ENOMEM;
1894 }
1895 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1896 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1897
705e519e 1898 /* doorbell bar mapping */
06ec9070 1899 amdgpu_device_doorbell_init(adev);
d38ceaf9
AD
1900
1901 /* io port mapping */
1902 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1903 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1904 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1905 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1906 break;
1907 }
1908 }
1909 if (adev->rio_mem == NULL)
b64a18c5 1910 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
1911
1912 /* early init functions */
06ec9070 1913 r = amdgpu_device_ip_early_init(adev);
d38ceaf9
AD
1914 if (r)
1915 return r;
1916
1917 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1918 /* this will fail for cards that aren't VGA class devices, just
1919 * ignore it */
06ec9070 1920 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
d38ceaf9
AD
1921
1922 if (amdgpu_runtime_pm == 1)
1923 runtime = true;
e9bef455 1924 if (amdgpu_device_is_px(ddev))
d38ceaf9 1925 runtime = true;
84c8b22e
LW
1926 if (!pci_is_thunderbolt_attached(adev->pdev))
1927 vga_switcheroo_register_client(adev->pdev,
1928 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
1929 if (runtime)
1930 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1931
1932 /* Read BIOS */
83ba126a
AD
1933 if (!amdgpu_get_bios(adev)) {
1934 r = -EINVAL;
1935 goto failed;
1936 }
f7e9e9fe 1937
d38ceaf9 1938 r = amdgpu_atombios_init(adev);
2c1a2784
AD
1939 if (r) {
1940 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 1941 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 1942 goto failed;
2c1a2784 1943 }
d38ceaf9 1944
4e99a44e
ML
1945 /* detect if we are with an SRIOV vbios */
1946 amdgpu_device_detect_sriov_bios(adev);
048765ad 1947
d38ceaf9 1948 /* Post card if necessary */
91fe77eb 1949 if (amdgpu_need_post(adev)) {
d38ceaf9 1950 if (!adev->bios) {
bec86378 1951 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
1952 r = -EINVAL;
1953 goto failed;
d38ceaf9 1954 }
bec86378 1955 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
1956 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1957 if (r) {
1958 dev_err(adev->dev, "gpu post error!\n");
1959 goto failed;
1960 }
d38ceaf9
AD
1961 }
1962
88b64e95
AD
1963 if (adev->is_atom_fw) {
1964 /* Initialize clocks */
1965 r = amdgpu_atomfirmware_get_clock_info(adev);
1966 if (r) {
1967 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 1968 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
1969 goto failed;
1970 }
1971 } else {
a5bde2f9
AD
1972 /* Initialize clocks */
1973 r = amdgpu_atombios_get_clock_info(adev);
1974 if (r) {
1975 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 1976 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 1977 goto failed;
a5bde2f9
AD
1978 }
1979 /* init i2c buses */
4562236b
HW
1980 if (!amdgpu_device_has_dc_support(adev))
1981 amdgpu_atombios_i2c_init(adev);
2c1a2784 1982 }
d38ceaf9
AD
1983
1984 /* Fence driver */
1985 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
1986 if (r) {
1987 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 1988 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 1989 goto failed;
2c1a2784 1990 }
d38ceaf9
AD
1991
1992 /* init the mode config */
1993 drm_mode_config_init(adev->ddev);
1994
06ec9070 1995 r = amdgpu_device_ip_init(adev);
d38ceaf9 1996 if (r) {
8840a387 1997 /* failed in exclusive mode due to timeout */
1998 if (amdgpu_sriov_vf(adev) &&
1999 !amdgpu_sriov_runtime(adev) &&
2000 amdgpu_virt_mmio_blocked(adev) &&
2001 !amdgpu_virt_wait_reset(adev)) {
2002 dev_err(adev->dev, "VF exclusive mode timeout\n");
1daee8b4
PD
2003 /* Don't send request since VF is inactive. */
2004 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2005 adev->virt.ops = NULL;
8840a387 2006 r = -EAGAIN;
2007 goto failed;
2008 }
06ec9070 2009 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
e23b74aa 2010 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
06ec9070 2011 amdgpu_device_ip_fini(adev);
83ba126a 2012 goto failed;
d38ceaf9
AD
2013 }
2014
2015 adev->accel_working = true;
2016
e59c0205
AX
2017 amdgpu_vm_check_compute_bug(adev);
2018
95844d20
MO
2019 /* Initialize the buffer migration limit. */
2020 if (amdgpu_moverate >= 0)
2021 max_MBps = amdgpu_moverate;
2022 else
2023 max_MBps = 8; /* Allow 8 MB/s. */
2024 /* Get a log2 for easy divisions. */
2025 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2026
d38ceaf9
AD
2027 r = amdgpu_ib_pool_init(adev);
2028 if (r) {
2029 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 2030 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 2031 goto failed;
d38ceaf9
AD
2032 }
2033
2034 r = amdgpu_ib_ring_tests(adev);
2035 if (r)
2036 DRM_ERROR("ib ring test failed (%d).\n", r);
2037
2dc8f81e
HC
2038 if (amdgpu_sriov_vf(adev))
2039 amdgpu_virt_init_data_exchange(adev);
2040
9bc92b9c
ML
2041 amdgpu_fbdev_init(adev);
2042
d2f52ac8
RZ
2043 r = amdgpu_pm_sysfs_init(adev);
2044 if (r)
2045 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2046
75758255 2047 r = amdgpu_debugfs_gem_init(adev);
3f14e623 2048 if (r)
d38ceaf9 2049 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2050
2051 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2052 if (r)
d38ceaf9 2053 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2054
50ab2533 2055 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2056 if (r)
50ab2533 2057 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2058
763efb6c 2059 r = amdgpu_debugfs_init(adev);
db95e218 2060 if (r)
763efb6c 2061 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
db95e218 2062
d38ceaf9
AD
2063 if ((amdgpu_testing & 1)) {
2064 if (adev->accel_working)
2065 amdgpu_test_moves(adev);
2066 else
2067 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2068 }
d38ceaf9
AD
2069 if (amdgpu_benchmarking) {
2070 if (adev->accel_working)
2071 amdgpu_benchmark(adev, amdgpu_benchmarking);
2072 else
2073 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2074 }
2075
2076 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2077 * explicit gating rather than handling it automatically.
2078 */
06ec9070 2079 r = amdgpu_device_ip_late_init(adev);
2c1a2784 2080 if (r) {
06ec9070 2081 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
e23b74aa 2082 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2083 goto failed;
2c1a2784 2084 }
d38ceaf9
AD
2085
2086 return 0;
83ba126a
AD
2087
2088failed:
89041940 2089 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2090 if (runtime)
2091 vga_switcheroo_fini_domain_pm_ops(adev->dev);
8840a387 2092
83ba126a 2093 return r;
d38ceaf9
AD
2094}
2095
d38ceaf9
AD
2096/**
2097 * amdgpu_device_fini - tear down the driver
2098 *
2099 * @adev: amdgpu_device pointer
2100 *
2101 * Tear down the driver info (all asics).
2102 * Called at driver shutdown.
2103 */
2104void amdgpu_device_fini(struct amdgpu_device *adev)
2105{
2106 int r;
2107
2108 DRM_INFO("amdgpu: finishing device.\n");
2109 adev->shutdown = true;
db2c2a97
PD
2110 if (adev->mode_info.mode_config_initialized)
2111 drm_crtc_force_disable_all(adev->ddev);
b9141cd3 2112
d38ceaf9
AD
2113 amdgpu_ib_pool_fini(adev);
2114 amdgpu_fence_driver_fini(adev);
2115 amdgpu_fbdev_fini(adev);
06ec9070 2116 r = amdgpu_device_ip_fini(adev);
ab4fe3e1
HR
2117 if (adev->firmware.gpu_info_fw) {
2118 release_firmware(adev->firmware.gpu_info_fw);
2119 adev->firmware.gpu_info_fw = NULL;
2120 }
d38ceaf9 2121 adev->accel_working = false;
2dc80b00 2122 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2123 /* free i2c buses */
4562236b
HW
2124 if (!amdgpu_device_has_dc_support(adev))
2125 amdgpu_i2c_fini(adev);
d38ceaf9
AD
2126 amdgpu_atombios_fini(adev);
2127 kfree(adev->bios);
2128 adev->bios = NULL;
84c8b22e
LW
2129 if (!pci_is_thunderbolt_attached(adev->pdev))
2130 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2131 if (adev->flags & AMD_IS_PX)
2132 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2133 vga_client_register(adev->pdev, NULL, NULL, NULL);
2134 if (adev->rio_mem)
2135 pci_iounmap(adev->pdev, adev->rio_mem);
2136 adev->rio_mem = NULL;
2137 iounmap(adev->rmmio);
2138 adev->rmmio = NULL;
06ec9070 2139 amdgpu_device_doorbell_fini(adev);
d2f52ac8 2140 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2141 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2142}
2143
2144
2145/*
2146 * Suspend & resume.
2147 */
2148/**
810ddc3a 2149 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2150 *
2151 * @pdev: drm dev pointer
2152 * @state: suspend state
2153 *
2154 * Puts the hw in the suspend state (all asics).
2155 * Returns 0 for success or an error on failure.
2156 * Called at driver suspend.
2157 */
810ddc3a 2158int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2159{
2160 struct amdgpu_device *adev;
2161 struct drm_crtc *crtc;
2162 struct drm_connector *connector;
5ceb54c6 2163 int r;
d38ceaf9
AD
2164
2165 if (dev == NULL || dev->dev_private == NULL) {
2166 return -ENODEV;
2167 }
2168
2169 adev = dev->dev_private;
2170
2171 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2172 return 0;
2173
2174 drm_kms_helper_poll_disable(dev);
2175
4562236b
HW
2176 if (!amdgpu_device_has_dc_support(adev)) {
2177 /* turn off display hw */
2178 drm_modeset_lock_all(dev);
2179 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2180 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2181 }
2182 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2183 }
2184
ba997709
YZ
2185 amdgpu_amdkfd_suspend(adev);
2186
756e6880 2187 /* unpin the front buffers and cursors */
d38ceaf9 2188 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2189 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2190 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2191 struct amdgpu_bo *robj;
2192
756e6880
AD
2193 if (amdgpu_crtc->cursor_bo) {
2194 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2195 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2196 if (r == 0) {
2197 amdgpu_bo_unpin(aobj);
2198 amdgpu_bo_unreserve(aobj);
2199 }
2200 }
2201
d38ceaf9
AD
2202 if (rfb == NULL || rfb->obj == NULL) {
2203 continue;
2204 }
2205 robj = gem_to_amdgpu_bo(rfb->obj);
2206 /* don't unpin kernel fb objects */
2207 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2208 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2209 if (r == 0) {
2210 amdgpu_bo_unpin(robj);
2211 amdgpu_bo_unreserve(robj);
2212 }
2213 }
2214 }
2215 /* evict vram memory */
2216 amdgpu_bo_evict_vram(adev);
2217
5ceb54c6 2218 amdgpu_fence_driver_suspend(adev);
d38ceaf9 2219
cdd61df6 2220 r = amdgpu_device_ip_suspend(adev);
d38ceaf9 2221
a0a71e49
AD
2222 /* evict remaining vram memory
2223 * This second call to evict vram is to evict the gart page table
2224 * using the CPU.
2225 */
d38ceaf9
AD
2226 amdgpu_bo_evict_vram(adev);
2227
2228 pci_save_state(dev->pdev);
2229 if (suspend) {
2230 /* Shut down the device */
2231 pci_disable_device(dev->pdev);
2232 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2233 } else {
2234 r = amdgpu_asic_reset(adev);
2235 if (r)
2236 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2237 }
2238
2239 if (fbcon) {
2240 console_lock();
2241 amdgpu_fbdev_set_suspend(adev, 1);
2242 console_unlock();
2243 }
2244 return 0;
2245}
2246
2247/**
810ddc3a 2248 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2249 *
2250 * @pdev: drm dev pointer
2251 *
2252 * Bring the hw back to operating state (all asics).
2253 * Returns 0 for success or an error on failure.
2254 * Called at driver resume.
2255 */
810ddc3a 2256int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2257{
2258 struct drm_connector *connector;
2259 struct amdgpu_device *adev = dev->dev_private;
756e6880 2260 struct drm_crtc *crtc;
03161a6e 2261 int r = 0;
d38ceaf9
AD
2262
2263 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2264 return 0;
2265
74b0b157 2266 if (fbcon)
d38ceaf9 2267 console_lock();
74b0b157 2268
d38ceaf9
AD
2269 if (resume) {
2270 pci_set_power_state(dev->pdev, PCI_D0);
2271 pci_restore_state(dev->pdev);
74b0b157 2272 r = pci_enable_device(dev->pdev);
03161a6e
HR
2273 if (r)
2274 goto unlock;
d38ceaf9
AD
2275 }
2276
2277 /* post card */
c836fec5 2278 if (amdgpu_need_post(adev)) {
74b0b157 2279 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2280 if (r)
2281 DRM_ERROR("amdgpu asic init failed\n");
2282 }
d38ceaf9 2283
06ec9070 2284 r = amdgpu_device_ip_resume(adev);
e6707218 2285 if (r) {
06ec9070 2286 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
03161a6e 2287 goto unlock;
e6707218 2288 }
5ceb54c6
AD
2289 amdgpu_fence_driver_resume(adev);
2290
ca198528
FC
2291 if (resume) {
2292 r = amdgpu_ib_ring_tests(adev);
2293 if (r)
2294 DRM_ERROR("ib ring test failed (%d).\n", r);
2295 }
d38ceaf9 2296
06ec9070 2297 r = amdgpu_device_ip_late_init(adev);
03161a6e
HR
2298 if (r)
2299 goto unlock;
d38ceaf9 2300
756e6880
AD
2301 /* pin cursors */
2302 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2303 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2304
2305 if (amdgpu_crtc->cursor_bo) {
2306 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2307 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2308 if (r == 0) {
2309 r = amdgpu_bo_pin(aobj,
2310 AMDGPU_GEM_DOMAIN_VRAM,
2311 &amdgpu_crtc->cursor_addr);
2312 if (r != 0)
2313 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2314 amdgpu_bo_unreserve(aobj);
2315 }
2316 }
2317 }
ba997709
YZ
2318 r = amdgpu_amdkfd_resume(adev);
2319 if (r)
2320 return r;
756e6880 2321
d38ceaf9
AD
2322 /* blat the mode back in */
2323 if (fbcon) {
4562236b
HW
2324 if (!amdgpu_device_has_dc_support(adev)) {
2325 /* pre DCE11 */
2326 drm_helper_resume_force_mode(dev);
2327
2328 /* turn on display hw */
2329 drm_modeset_lock_all(dev);
2330 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2331 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2332 }
2333 drm_modeset_unlock_all(dev);
2334 } else {
2335 /*
2336 * There is no equivalent atomic helper to turn on
2337 * display, so we defined our own function for this,
2338 * once suspend resume is supported by the atomic
2339 * framework this will be reworked
2340 */
2341 amdgpu_dm_display_resume(adev);
d38ceaf9
AD
2342 }
2343 }
2344
2345 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2346
2347 /*
2348 * Most of the connector probing functions try to acquire runtime pm
2349 * refs to ensure that the GPU is powered on when connector polling is
2350 * performed. Since we're calling this from a runtime PM callback,
2351 * trying to acquire rpm refs will cause us to deadlock.
2352 *
2353 * Since we're guaranteed to be holding the rpm lock, it's safe to
2354 * temporarily disable the rpm helpers so this doesn't deadlock us.
2355 */
2356#ifdef CONFIG_PM
2357 dev->dev->power.disable_depth++;
2358#endif
4562236b
HW
2359 if (!amdgpu_device_has_dc_support(adev))
2360 drm_helper_hpd_irq_event(dev);
2361 else
2362 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2363#ifdef CONFIG_PM
2364 dev->dev->power.disable_depth--;
2365#endif
d38ceaf9 2366
03161a6e 2367 if (fbcon)
d38ceaf9 2368 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2369
2370unlock:
2371 if (fbcon)
d38ceaf9 2372 console_unlock();
d38ceaf9 2373
03161a6e 2374 return r;
d38ceaf9
AD
2375}
2376
06ec9070 2377static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
63fbf42f
CZ
2378{
2379 int i;
2380 bool asic_hang = false;
2381
f993d628
ML
2382 if (amdgpu_sriov_vf(adev))
2383 return true;
2384
63fbf42f 2385 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2386 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2387 continue;
a1255107
AD
2388 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2389 adev->ip_blocks[i].status.hang =
2390 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2391 if (adev->ip_blocks[i].status.hang) {
2392 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2393 asic_hang = true;
2394 }
2395 }
2396 return asic_hang;
2397}
2398
06ec9070 2399static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2400{
2401 int i, r = 0;
2402
2403 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2404 if (!adev->ip_blocks[i].status.valid)
d31a501e 2405 continue;
a1255107
AD
2406 if (adev->ip_blocks[i].status.hang &&
2407 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2408 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2409 if (r)
2410 return r;
2411 }
2412 }
2413
2414 return 0;
2415}
2416
06ec9070 2417static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
35d782fe 2418{
da146d3b
AD
2419 int i;
2420
2421 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2422 if (!adev->ip_blocks[i].status.valid)
da146d3b 2423 continue;
a1255107
AD
2424 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2425 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2426 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
2427 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2428 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 2429 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2430 DRM_INFO("Some block need full reset!\n");
2431 return true;
2432 }
2433 }
35d782fe
CZ
2434 }
2435 return false;
2436}
2437
06ec9070 2438static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2439{
2440 int i, r = 0;
2441
2442 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2443 if (!adev->ip_blocks[i].status.valid)
35d782fe 2444 continue;
a1255107
AD
2445 if (adev->ip_blocks[i].status.hang &&
2446 adev->ip_blocks[i].version->funcs->soft_reset) {
2447 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2448 if (r)
2449 return r;
2450 }
2451 }
2452
2453 return 0;
2454}
2455
06ec9070 2456static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2457{
2458 int i, r = 0;
2459
2460 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2461 if (!adev->ip_blocks[i].status.valid)
35d782fe 2462 continue;
a1255107
AD
2463 if (adev->ip_blocks[i].status.hang &&
2464 adev->ip_blocks[i].version->funcs->post_soft_reset)
2465 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2466 if (r)
2467 return r;
2468 }
2469
2470 return 0;
2471}
2472
3ad81f16
CZ
2473bool amdgpu_need_backup(struct amdgpu_device *adev)
2474{
2475 if (adev->flags & AMD_IS_APU)
2476 return false;
2477
8854695a 2478 return amdgpu_gpu_recovery;
3ad81f16
CZ
2479}
2480
06ec9070
AD
2481static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
2482 struct amdgpu_ring *ring,
2483 struct amdgpu_bo *bo,
2484 struct dma_fence **fence)
53cdccd5
CZ
2485{
2486 uint32_t domain;
2487 int r;
2488
23d2e504
RH
2489 if (!bo->shadow)
2490 return 0;
2491
1d284797 2492 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2493 if (r)
2494 return r;
2495 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2496 /* if bo has been evicted, then no need to recover */
2497 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2498 r = amdgpu_bo_validate(bo->shadow);
2499 if (r) {
2500 DRM_ERROR("bo validate failed!\n");
2501 goto err;
2502 }
2503
23d2e504 2504 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2505 NULL, fence, true);
23d2e504
RH
2506 if (r) {
2507 DRM_ERROR("recover page table failed!\n");
2508 goto err;
2509 }
2510 }
53cdccd5 2511err:
23d2e504
RH
2512 amdgpu_bo_unreserve(bo);
2513 return r;
53cdccd5
CZ
2514}
2515
5740682e 2516/*
06ec9070 2517 * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
a90ad3c2
ML
2518 *
2519 * @adev: amdgpu device pointer
5740682e 2520 * @reset_flags: output param tells caller the reset result
a90ad3c2 2521 *
5740682e
ML
2522 * attempt to do soft-reset or full-reset and reinitialize Asic
2523 * return 0 means successed otherwise failed
2524*/
06ec9070
AD
2525static int amdgpu_device_reset(struct amdgpu_device *adev,
2526 uint64_t* reset_flags)
a90ad3c2 2527{
5740682e
ML
2528 bool need_full_reset, vram_lost = 0;
2529 int r;
a90ad3c2 2530
06ec9070 2531 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
a90ad3c2 2532
5740682e 2533 if (!need_full_reset) {
06ec9070
AD
2534 amdgpu_device_ip_pre_soft_reset(adev);
2535 r = amdgpu_device_ip_soft_reset(adev);
2536 amdgpu_device_ip_post_soft_reset(adev);
2537 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5740682e
ML
2538 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2539 need_full_reset = true;
2540 }
a90ad3c2 2541
5740682e 2542 }
a90ad3c2 2543
5740682e 2544 if (need_full_reset) {
cdd61df6 2545 r = amdgpu_device_ip_suspend(adev);
a90ad3c2 2546
5740682e 2547retry:
5740682e 2548 r = amdgpu_asic_reset(adev);
5740682e
ML
2549 /* post card */
2550 amdgpu_atom_asic_init(adev->mode_info.atom_context);
65781c78 2551
5740682e
ML
2552 if (!r) {
2553 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
06ec9070 2554 r = amdgpu_device_ip_resume_phase1(adev);
5740682e
ML
2555 if (r)
2556 goto out;
65781c78 2557
06ec9070 2558 vram_lost = amdgpu_device_check_vram_lost(adev);
5740682e
ML
2559 if (vram_lost) {
2560 DRM_ERROR("VRAM is lost!\n");
2561 atomic_inc(&adev->vram_lost_counter);
2562 }
2563
c1c7ce8f
CK
2564 r = amdgpu_gtt_mgr_recover(
2565 &adev->mman.bdev.man[TTM_PL_TT]);
5740682e
ML
2566 if (r)
2567 goto out;
2568
06ec9070 2569 r = amdgpu_device_ip_resume_phase2(adev);
5740682e
ML
2570 if (r)
2571 goto out;
2572
2573 if (vram_lost)
06ec9070 2574 amdgpu_device_fill_reset_magic(adev);
65781c78 2575 }
5740682e 2576 }
65781c78 2577
5740682e
ML
2578out:
2579 if (!r) {
2580 amdgpu_irq_gpu_reset_resume_helper(adev);
2581 r = amdgpu_ib_ring_tests(adev);
2582 if (r) {
2583 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
cdd61df6 2584 r = amdgpu_device_ip_suspend(adev);
5740682e
ML
2585 need_full_reset = true;
2586 goto retry;
2587 }
2588 }
65781c78 2589
5740682e
ML
2590 if (reset_flags) {
2591 if (vram_lost)
2592 (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
a90ad3c2 2593
5740682e
ML
2594 if (need_full_reset)
2595 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
65781c78 2596 }
a90ad3c2 2597
5740682e
ML
2598 return r;
2599}
a90ad3c2 2600
5740682e 2601/*
06ec9070 2602 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5740682e
ML
2603 *
2604 * @adev: amdgpu device pointer
2605 * @reset_flags: output param tells caller the reset result
2606 *
2607 * do VF FLR and reinitialize Asic
2608 * return 0 means successed otherwise failed
2609*/
06ec9070
AD
2610static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
2611 uint64_t *reset_flags,
2612 bool from_hypervisor)
5740682e
ML
2613{
2614 int r;
2615
2616 if (from_hypervisor)
2617 r = amdgpu_virt_request_full_gpu(adev, true);
2618 else
2619 r = amdgpu_virt_reset_gpu(adev);
2620 if (r)
2621 return r;
a90ad3c2
ML
2622
2623 /* Resume IP prior to SMC */
06ec9070 2624 r = amdgpu_device_ip_reinit_early_sriov(adev);
5740682e
ML
2625 if (r)
2626 goto error;
a90ad3c2
ML
2627
2628 /* we need recover gart prior to run SMC/CP/SDMA resume */
c1c7ce8f 2629 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
a90ad3c2
ML
2630
2631 /* now we are okay to resume SMC/CP/SDMA */
06ec9070 2632 r = amdgpu_device_ip_reinit_late_sriov(adev);
5740682e
ML
2633 if (r)
2634 goto error;
a90ad3c2
ML
2635
2636 amdgpu_irq_gpu_reset_resume_helper(adev);
5740682e
ML
2637 r = amdgpu_ib_ring_tests(adev);
2638 if (r)
a90ad3c2
ML
2639 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2640
5740682e 2641error:
a90ad3c2
ML
2642 /* release full control of GPU after ib test */
2643 amdgpu_virt_release_full_gpu(adev, true);
2644
5740682e 2645 if (reset_flags) {
75bc6099
ML
2646 if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
2647 (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
2648 atomic_inc(&adev->vram_lost_counter);
2649 }
a90ad3c2 2650
5740682e
ML
2651 /* VF FLR or hotlink reset is always full-reset */
2652 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
a90ad3c2
ML
2653 }
2654
2655 return r;
2656}
2657
d38ceaf9 2658/**
5740682e 2659 * amdgpu_gpu_recover - reset the asic and recover scheduler
d38ceaf9
AD
2660 *
2661 * @adev: amdgpu device pointer
5740682e 2662 * @job: which job trigger hang
dcebf026 2663 * @force forces reset regardless of amdgpu_gpu_recovery
d38ceaf9 2664 *
5740682e 2665 * Attempt to reset the GPU if it has hung (all asics).
d38ceaf9
AD
2666 * Returns 0 for success or an error on failure.
2667 */
dcebf026 2668int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool force)
d38ceaf9 2669{
4562236b 2670 struct drm_atomic_state *state = NULL;
5740682e
ML
2671 uint64_t reset_flags = 0;
2672 int i, r, resched;
fb140b29 2673
06ec9070 2674 if (!amdgpu_device_ip_check_soft_reset(adev)) {
63fbf42f
CZ
2675 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2676 return 0;
2677 }
d38ceaf9 2678
dcebf026
AG
2679 if (!force && (amdgpu_gpu_recovery == 0 ||
2680 (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
2681 DRM_INFO("GPU recovery disabled.\n");
2682 return 0;
2683 }
2684
5740682e
ML
2685 dev_info(adev->dev, "GPU reset begin!\n");
2686
13a752e3 2687 mutex_lock(&adev->lock_reset);
d94aed5a 2688 atomic_inc(&adev->gpu_reset_counter);
13a752e3 2689 adev->in_gpu_reset = 1;
d38ceaf9 2690
a3c47d6b
CZ
2691 /* block TTM */
2692 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
4562236b
HW
2693 /* store modesetting */
2694 if (amdgpu_device_has_dc_support(adev))
2695 state = drm_atomic_helper_suspend(adev->ddev);
a3c47d6b 2696
0875dc9e
CZ
2697 /* block scheduler */
2698 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2699 struct amdgpu_ring *ring = adev->rings[i];
2700
51687759 2701 if (!ring || !ring->sched.thread)
0875dc9e 2702 continue;
5740682e
ML
2703
2704 /* only focus on the ring hit timeout if &job not NULL */
2705 if (job && job->ring->idx != i)
2706 continue;
2707
0875dc9e 2708 kthread_park(ring->sched.thread);
1b1f42d8 2709 drm_sched_hw_job_reset(&ring->sched, &job->base);
5740682e 2710
2f9d4084
ML
2711 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2712 amdgpu_fence_driver_force_completion(ring);
0875dc9e 2713 }
d38ceaf9 2714
5740682e 2715 if (amdgpu_sriov_vf(adev))
06ec9070 2716 r = amdgpu_device_reset_sriov(adev, &reset_flags, job ? false : true);
5740682e 2717 else
06ec9070 2718 r = amdgpu_device_reset(adev, &reset_flags);
35d782fe 2719
d38ceaf9 2720 if (!r) {
5740682e
ML
2721 if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
2722 (reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) {
53cdccd5
CZ
2723 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2724 struct amdgpu_bo *bo, *tmp;
f54d1867 2725 struct dma_fence *fence = NULL, *next = NULL;
53cdccd5
CZ
2726
2727 DRM_INFO("recover vram bo from shadow\n");
2728 mutex_lock(&adev->shadow_list_lock);
2729 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 2730 next = NULL;
06ec9070 2731 amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
53cdccd5 2732 if (fence) {
f54d1867 2733 r = dma_fence_wait(fence, false);
53cdccd5 2734 if (r) {
1d7b17b0 2735 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5
CZ
2736 break;
2737 }
2738 }
1f465087 2739
f54d1867 2740 dma_fence_put(fence);
53cdccd5
CZ
2741 fence = next;
2742 }
2743 mutex_unlock(&adev->shadow_list_lock);
2744 if (fence) {
f54d1867 2745 r = dma_fence_wait(fence, false);
53cdccd5 2746 if (r)
1d7b17b0 2747 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5 2748 }
f54d1867 2749 dma_fence_put(fence);
53cdccd5 2750 }
5740682e 2751
d38ceaf9
AD
2752 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2753 struct amdgpu_ring *ring = adev->rings[i];
51687759
CZ
2754
2755 if (!ring || !ring->sched.thread)
d38ceaf9 2756 continue;
53cdccd5 2757
5740682e
ML
2758 /* only focus on the ring hit timeout if &job not NULL */
2759 if (job && job->ring->idx != i)
2760 continue;
2761
1b1f42d8 2762 drm_sched_job_recovery(&ring->sched);
0875dc9e 2763 kthread_unpark(ring->sched.thread);
d38ceaf9 2764 }
d38ceaf9 2765 } else {
d38ceaf9 2766 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5740682e
ML
2767 struct amdgpu_ring *ring = adev->rings[i];
2768
2769 if (!ring || !ring->sched.thread)
2770 continue;
2771
2772 /* only focus on the ring hit timeout if &job not NULL */
2773 if (job && job->ring->idx != i)
2774 continue;
2775
2776 kthread_unpark(adev->rings[i]->sched.thread);
d38ceaf9
AD
2777 }
2778 }
2779
4562236b 2780 if (amdgpu_device_has_dc_support(adev)) {
5740682e
ML
2781 if (drm_atomic_helper_resume(adev->ddev, state))
2782 dev_info(adev->dev, "drm resume failed:%d\n", r);
4562236b 2783 amdgpu_dm_display_resume(adev);
5740682e 2784 } else {
4562236b 2785 drm_helper_resume_force_mode(adev->ddev);
5740682e 2786 }
d38ceaf9
AD
2787
2788 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
5740682e 2789
89041940 2790 if (r) {
d38ceaf9 2791 /* bad news, how to tell it to userspace ? */
5740682e
ML
2792 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
2793 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
2794 } else {
2795 dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
89041940 2796 }
d38ceaf9 2797
89041940 2798 amdgpu_vf_error_trans_all(adev);
13a752e3
ML
2799 adev->in_gpu_reset = 0;
2800 mutex_unlock(&adev->lock_reset);
d38ceaf9
AD
2801 return r;
2802}
2803
d0dd7f0c
AD
2804void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2805{
2806 u32 mask;
2807 int ret;
2808
cd474ba0
AD
2809 if (amdgpu_pcie_gen_cap)
2810 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 2811
cd474ba0
AD
2812 if (amdgpu_pcie_lane_cap)
2813 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 2814
cd474ba0
AD
2815 /* covers APUs as well */
2816 if (pci_is_root_bus(adev->pdev->bus)) {
2817 if (adev->pm.pcie_gen_mask == 0)
2818 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2819 if (adev->pm.pcie_mlw_mask == 0)
2820 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 2821 return;
cd474ba0 2822 }
d0dd7f0c 2823
cd474ba0
AD
2824 if (adev->pm.pcie_gen_mask == 0) {
2825 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2826 if (!ret) {
2827 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2828 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2829 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2830
2831 if (mask & DRM_PCIE_SPEED_25)
2832 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2833 if (mask & DRM_PCIE_SPEED_50)
2834 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2835 if (mask & DRM_PCIE_SPEED_80)
2836 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2837 } else {
2838 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2839 }
2840 }
2841 if (adev->pm.pcie_mlw_mask == 0) {
2842 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2843 if (!ret) {
2844 switch (mask) {
2845 case 32:
2846 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2847 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2848 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2849 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2850 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2851 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2852 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2853 break;
2854 case 16:
2855 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2856 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2857 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2858 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2859 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2860 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2861 break;
2862 case 12:
2863 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2864 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2865 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2866 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2867 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2868 break;
2869 case 8:
2870 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2871 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2872 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2873 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2874 break;
2875 case 4:
2876 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2877 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2878 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2879 break;
2880 case 2:
2881 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2882 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2883 break;
2884 case 1:
2885 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2886 break;
2887 default:
2888 break;
2889 }
2890 } else {
2891 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
2892 }
2893 }
2894}
d38ceaf9 2895