drm/amdgpu: query vram type from atombios
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
d38ceaf9
AD
31#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
4562236b 33#include <drm/drm_atomic_helper.h>
d38ceaf9
AD
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
f4b373f4 39#include "amdgpu_trace.h"
d38ceaf9
AD
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
a5bde2f9 43#include "amdgpu_atomfirmware.h"
d0dd7f0c 44#include "amd_pcie.h"
33f34802
KW
45#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
a2e73f56
AD
48#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
aaa36a97 51#include "vi.h"
460826e6 52#include "soc15.h"
d38ceaf9 53#include "bif/bif_4_1_d.h"
9accf2fd 54#include <linux/pci.h>
bec86378 55#include <linux/firmware.h>
89041940 56#include "amdgpu_vf_error.h"
d38ceaf9 57
ba997709 58#include "amdgpu_amdkfd.h"
d2f52ac8 59#include "amdgpu_pm.h"
d38ceaf9 60
e2a75f88 61MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
2d2e5e7e 62MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
e2a75f88 63
2dc80b00
S
64#define AMDGPU_RESUME_MS 2000
65
d38ceaf9 66static const char *amdgpu_asic_name[] = {
da69c161
KW
67 "TAHITI",
68 "PITCAIRN",
69 "VERDE",
70 "OLAND",
71 "HAINAN",
d38ceaf9
AD
72 "BONAIRE",
73 "KAVERI",
74 "KABINI",
75 "HAWAII",
76 "MULLINS",
77 "TOPAZ",
78 "TONGA",
48299f95 79 "FIJI",
d38ceaf9 80 "CARRIZO",
139f4917 81 "STONEY",
2cc0c0b5
FC
82 "POLARIS10",
83 "POLARIS11",
c4642a47 84 "POLARIS12",
d4196f01 85 "VEGA10",
2ca8a5d2 86 "RAVEN",
d38ceaf9
AD
87 "LAST",
88};
89
90bool amdgpu_device_is_px(struct drm_device *dev)
91{
92 struct amdgpu_device *adev = dev->dev_private;
93
2f7d10b3 94 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
95 return true;
96 return false;
97}
98
99/*
100 * MMIO register access helper functions.
101 */
102uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 103 uint32_t acc_flags)
d38ceaf9 104{
f4b373f4
TSD
105 uint32_t ret;
106
43ca8efa 107 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 108 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 109
15d72fd7 110 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 111 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
112 else {
113 unsigned long flags;
d38ceaf9
AD
114
115 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
116 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
117 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
118 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 119 }
f4b373f4
TSD
120 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
121 return ret;
d38ceaf9
AD
122}
123
124void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 125 uint32_t acc_flags)
d38ceaf9 126{
f4b373f4 127 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 128
47ed4e1c
KW
129 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
130 adev->last_mm_index = v;
131 }
132
43ca8efa 133 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 134 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 135
15d72fd7 136 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
137 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
138 else {
139 unsigned long flags;
140
141 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
142 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
143 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
144 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
145 }
47ed4e1c
KW
146
147 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
148 udelay(500);
149 }
d38ceaf9
AD
150}
151
152u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
153{
154 if ((reg * 4) < adev->rio_mem_size)
155 return ioread32(adev->rio_mem + (reg * 4));
156 else {
157 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
158 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
159 }
160}
161
162void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
163{
47ed4e1c
KW
164 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
165 adev->last_mm_index = v;
166 }
d38ceaf9
AD
167
168 if ((reg * 4) < adev->rio_mem_size)
169 iowrite32(v, adev->rio_mem + (reg * 4));
170 else {
171 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
172 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
173 }
47ed4e1c
KW
174
175 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
176 udelay(500);
177 }
d38ceaf9
AD
178}
179
180/**
181 * amdgpu_mm_rdoorbell - read a doorbell dword
182 *
183 * @adev: amdgpu_device pointer
184 * @index: doorbell index
185 *
186 * Returns the value in the doorbell aperture at the
187 * requested doorbell index (CIK).
188 */
189u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
190{
191 if (index < adev->doorbell.num_doorbells) {
192 return readl(adev->doorbell.ptr + index);
193 } else {
194 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
195 return 0;
196 }
197}
198
199/**
200 * amdgpu_mm_wdoorbell - write a doorbell dword
201 *
202 * @adev: amdgpu_device pointer
203 * @index: doorbell index
204 * @v: value to write
205 *
206 * Writes @v to the doorbell aperture at the
207 * requested doorbell index (CIK).
208 */
209void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
210{
211 if (index < adev->doorbell.num_doorbells) {
212 writel(v, adev->doorbell.ptr + index);
213 } else {
214 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
215 }
216}
217
832be404
KW
218/**
219 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
220 *
221 * @adev: amdgpu_device pointer
222 * @index: doorbell index
223 *
224 * Returns the value in the doorbell aperture at the
225 * requested doorbell index (VEGA10+).
226 */
227u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
228{
229 if (index < adev->doorbell.num_doorbells) {
230 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
231 } else {
232 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
233 return 0;
234 }
235}
236
237/**
238 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
239 *
240 * @adev: amdgpu_device pointer
241 * @index: doorbell index
242 * @v: value to write
243 *
244 * Writes @v to the doorbell aperture at the
245 * requested doorbell index (VEGA10+).
246 */
247void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
248{
249 if (index < adev->doorbell.num_doorbells) {
250 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
251 } else {
252 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
253 }
254}
255
d38ceaf9
AD
256/**
257 * amdgpu_invalid_rreg - dummy reg read function
258 *
259 * @adev: amdgpu device pointer
260 * @reg: offset of register
261 *
262 * Dummy register read function. Used for register blocks
263 * that certain asics don't have (all asics).
264 * Returns the value in the register.
265 */
266static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
267{
268 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
269 BUG();
270 return 0;
271}
272
273/**
274 * amdgpu_invalid_wreg - dummy reg write function
275 *
276 * @adev: amdgpu device pointer
277 * @reg: offset of register
278 * @v: value to write to the register
279 *
280 * Dummy register read function. Used for register blocks
281 * that certain asics don't have (all asics).
282 */
283static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
284{
285 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
286 reg, v);
287 BUG();
288}
289
290/**
291 * amdgpu_block_invalid_rreg - dummy reg read function
292 *
293 * @adev: amdgpu device pointer
294 * @block: offset of instance
295 * @reg: offset of register
296 *
297 * Dummy register read function. Used for register blocks
298 * that certain asics don't have (all asics).
299 * Returns the value in the register.
300 */
301static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
302 uint32_t block, uint32_t reg)
303{
304 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
305 reg, block);
306 BUG();
307 return 0;
308}
309
310/**
311 * amdgpu_block_invalid_wreg - dummy reg write function
312 *
313 * @adev: amdgpu device pointer
314 * @block: offset of instance
315 * @reg: offset of register
316 * @v: value to write to the register
317 *
318 * Dummy register read function. Used for register blocks
319 * that certain asics don't have (all asics).
320 */
321static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
322 uint32_t block,
323 uint32_t reg, uint32_t v)
324{
325 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
326 reg, block, v);
327 BUG();
328}
329
06ec9070 330static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
d38ceaf9 331{
a4a02777
CK
332 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
333 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
334 &adev->vram_scratch.robj,
335 &adev->vram_scratch.gpu_addr,
336 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
337}
338
06ec9070 339static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
d38ceaf9 340{
078af1a3 341 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
342}
343
344/**
9c3f2b54 345 * amdgpu_device_program_register_sequence - program an array of registers.
d38ceaf9
AD
346 *
347 * @adev: amdgpu_device pointer
348 * @registers: pointer to the register array
349 * @array_size: size of the register array
350 *
351 * Programs an array or registers with and and or masks.
352 * This is a helper for setting golden registers.
353 */
9c3f2b54
AD
354void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
355 const u32 *registers,
356 const u32 array_size)
d38ceaf9
AD
357{
358 u32 tmp, reg, and_mask, or_mask;
359 int i;
360
361 if (array_size % 3)
362 return;
363
364 for (i = 0; i < array_size; i +=3) {
365 reg = registers[i + 0];
366 and_mask = registers[i + 1];
367 or_mask = registers[i + 2];
368
369 if (and_mask == 0xffffffff) {
370 tmp = or_mask;
371 } else {
372 tmp = RREG32(reg);
373 tmp &= ~and_mask;
374 tmp |= or_mask;
375 }
376 WREG32(reg, tmp);
377 }
378}
379
8111c387 380void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
d38ceaf9
AD
381{
382 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
383}
384
385/*
386 * GPU doorbell aperture helpers function.
387 */
388/**
06ec9070 389 * amdgpu_device_doorbell_init - Init doorbell driver information.
d38ceaf9
AD
390 *
391 * @adev: amdgpu_device pointer
392 *
393 * Init doorbell driver information (CIK)
394 * Returns 0 on success, error on failure.
395 */
06ec9070 396static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
d38ceaf9 397{
705e519e
CK
398 /* No doorbell on SI hardware generation */
399 if (adev->asic_type < CHIP_BONAIRE) {
400 adev->doorbell.base = 0;
401 adev->doorbell.size = 0;
402 adev->doorbell.num_doorbells = 0;
403 adev->doorbell.ptr = NULL;
404 return 0;
405 }
406
d6895ad3
CK
407 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
408 return -EINVAL;
409
d38ceaf9
AD
410 /* doorbell bar mapping */
411 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
412 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
413
edf600da 414 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
415 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
416 if (adev->doorbell.num_doorbells == 0)
417 return -EINVAL;
418
8972e5d2
CK
419 adev->doorbell.ptr = ioremap(adev->doorbell.base,
420 adev->doorbell.num_doorbells *
421 sizeof(u32));
422 if (adev->doorbell.ptr == NULL)
d38ceaf9 423 return -ENOMEM;
d38ceaf9
AD
424
425 return 0;
426}
427
428/**
06ec9070 429 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
d38ceaf9
AD
430 *
431 * @adev: amdgpu_device pointer
432 *
433 * Tear down doorbell driver information (CIK)
434 */
06ec9070 435static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
d38ceaf9
AD
436{
437 iounmap(adev->doorbell.ptr);
438 adev->doorbell.ptr = NULL;
439}
440
22cb0164 441
d38ceaf9
AD
442
443/*
06ec9070 444 * amdgpu_device_wb_*()
455a7bc2 445 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 446 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
447 */
448
449/**
06ec9070 450 * amdgpu_device_wb_fini - Disable Writeback and free memory
d38ceaf9
AD
451 *
452 * @adev: amdgpu_device pointer
453 *
454 * Disables Writeback and frees the Writeback memory (all asics).
455 * Used at driver shutdown.
456 */
06ec9070 457static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
d38ceaf9
AD
458{
459 if (adev->wb.wb_obj) {
a76ed485
AD
460 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
461 &adev->wb.gpu_addr,
462 (void **)&adev->wb.wb);
d38ceaf9
AD
463 adev->wb.wb_obj = NULL;
464 }
465}
466
467/**
06ec9070 468 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
d38ceaf9
AD
469 *
470 * @adev: amdgpu_device pointer
471 *
455a7bc2 472 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
473 * Used at driver startup.
474 * Returns 0 on success or an -error on failure.
475 */
06ec9070 476static int amdgpu_device_wb_init(struct amdgpu_device *adev)
d38ceaf9
AD
477{
478 int r;
479
480 if (adev->wb.wb_obj == NULL) {
97407b63
AD
481 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
482 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
483 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
484 &adev->wb.wb_obj, &adev->wb.gpu_addr,
485 (void **)&adev->wb.wb);
d38ceaf9
AD
486 if (r) {
487 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
488 return r;
489 }
d38ceaf9
AD
490
491 adev->wb.num_wb = AMDGPU_MAX_WB;
492 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
493
494 /* clear wb memory */
73469585 495 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
d38ceaf9
AD
496 }
497
498 return 0;
499}
500
501/**
131b4b36 502 * amdgpu_device_wb_get - Allocate a wb entry
d38ceaf9
AD
503 *
504 * @adev: amdgpu_device pointer
505 * @wb: wb index
506 *
507 * Allocate a wb slot for use by the driver (all asics).
508 * Returns 0 on success or -EINVAL on failure.
509 */
131b4b36 510int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
d38ceaf9
AD
511{
512 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 513
97407b63 514 if (offset < adev->wb.num_wb) {
7014285a 515 __set_bit(offset, adev->wb.used);
63ae07ca 516 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
517 return 0;
518 } else {
519 return -EINVAL;
520 }
521}
522
d38ceaf9 523/**
131b4b36 524 * amdgpu_device_wb_free - Free a wb entry
d38ceaf9
AD
525 *
526 * @adev: amdgpu_device pointer
527 * @wb: wb index
528 *
529 * Free a wb slot allocated for use by the driver (all asics)
530 */
131b4b36 531void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
d38ceaf9 532{
73469585 533 wb >>= 3;
d38ceaf9 534 if (wb < adev->wb.num_wb)
73469585 535 __clear_bit(wb, adev->wb.used);
d38ceaf9
AD
536}
537
538/**
2543e28a 539 * amdgpu_device_vram_location - try to find VRAM location
d38ceaf9
AD
540 * @adev: amdgpu device structure holding all necessary informations
541 * @mc: memory controller structure holding memory informations
542 * @base: base address at which to put VRAM
543 *
455a7bc2 544 * Function will try to place VRAM at base address provided
3d647c8f 545 * as parameter.
d38ceaf9 546 */
2543e28a 547void amdgpu_device_vram_location(struct amdgpu_device *adev,
770d13b1 548 struct amdgpu_gmc *mc, u64 base)
d38ceaf9
AD
549{
550 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
551
552 mc->vram_start = base;
d38ceaf9
AD
553 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
554 if (limit && limit < mc->real_vram_size)
555 mc->real_vram_size = limit;
556 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
557 mc->mc_vram_size >> 20, mc->vram_start,
558 mc->vram_end, mc->real_vram_size >> 20);
559}
560
561/**
2543e28a 562 * amdgpu_device_gart_location - try to find GTT location
d38ceaf9
AD
563 * @adev: amdgpu device structure holding all necessary informations
564 * @mc: memory controller structure holding memory informations
565 *
566 * Function will place try to place GTT before or after VRAM.
567 *
568 * If GTT size is bigger than space left then we ajust GTT size.
569 * Thus function will never fails.
570 *
571 * FIXME: when reducing GTT size align new size on power of 2.
572 */
2543e28a 573void amdgpu_device_gart_location(struct amdgpu_device *adev,
770d13b1 574 struct amdgpu_gmc *mc)
d38ceaf9
AD
575{
576 u64 size_af, size_bf;
577
770d13b1 578 size_af = adev->gmc.mc_mask - mc->vram_end;
ed21c047 579 size_bf = mc->vram_start;
d38ceaf9 580 if (size_bf > size_af) {
6f02a696 581 if (mc->gart_size > size_bf) {
d38ceaf9 582 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 583 mc->gart_size = size_bf;
d38ceaf9 584 }
6f02a696 585 mc->gart_start = 0;
d38ceaf9 586 } else {
6f02a696 587 if (mc->gart_size > size_af) {
d38ceaf9 588 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 589 mc->gart_size = size_af;
d38ceaf9 590 }
b98f1b9e
CK
591 /* VCE doesn't like it when BOs cross a 4GB segment, so align
592 * the GART base on a 4GB boundary as well.
593 */
594 mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
d38ceaf9 595 }
6f02a696 596 mc->gart_end = mc->gart_start + mc->gart_size - 1;
d38ceaf9 597 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6f02a696 598 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
d38ceaf9
AD
599}
600
d6895ad3
CK
601/**
602 * amdgpu_device_resize_fb_bar - try to resize FB BAR
603 *
604 * @adev: amdgpu_device pointer
605 *
606 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
607 * to fail, but if any of the BARs is not accessible after the size we abort
608 * driver loading by returning -ENODEV.
609 */
610int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
611{
770d13b1 612 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
d6895ad3 613 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
31b8adab
CK
614 struct pci_bus *root;
615 struct resource *res;
616 unsigned i;
d6895ad3
CK
617 u16 cmd;
618 int r;
619
0c03b912 620 /* Bypass for VF */
621 if (amdgpu_sriov_vf(adev))
622 return 0;
623
31b8adab
CK
624 /* Check if the root BUS has 64bit memory resources */
625 root = adev->pdev->bus;
626 while (root->parent)
627 root = root->parent;
628
629 pci_bus_for_each_resource(root, res, i) {
0ebb7c54 630 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
31b8adab
CK
631 res->start > 0x100000000ull)
632 break;
633 }
634
635 /* Trying to resize is pointless without a root hub window above 4GB */
636 if (!res)
637 return 0;
638
d6895ad3
CK
639 /* Disable memory decoding while we change the BAR addresses and size */
640 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
641 pci_write_config_word(adev->pdev, PCI_COMMAND,
642 cmd & ~PCI_COMMAND_MEMORY);
643
644 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
06ec9070 645 amdgpu_device_doorbell_fini(adev);
d6895ad3
CK
646 if (adev->asic_type >= CHIP_BONAIRE)
647 pci_release_resource(adev->pdev, 2);
648
649 pci_release_resource(adev->pdev, 0);
650
651 r = pci_resize_resource(adev->pdev, 0, rbar_size);
652 if (r == -ENOSPC)
653 DRM_INFO("Not enough PCI address space for a large BAR.");
654 else if (r && r != -ENOTSUPP)
655 DRM_ERROR("Problem resizing BAR0 (%d).", r);
656
657 pci_assign_unassigned_bus_resources(adev->pdev->bus);
658
659 /* When the doorbell or fb BAR isn't available we have no chance of
660 * using the device.
661 */
06ec9070 662 r = amdgpu_device_doorbell_init(adev);
d6895ad3
CK
663 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
664 return -ENODEV;
665
666 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
667
668 return 0;
669}
a05502e5 670
d38ceaf9
AD
671/*
672 * GPU helpers function.
673 */
674/**
39c640c0 675 * amdgpu_device_need_post - check if the hw need post or not
d38ceaf9
AD
676 *
677 * @adev: amdgpu_device pointer
678 *
c836fec5
JQ
679 * Check if the asic has been initialized (all asics) at driver startup
680 * or post is needed if hw reset is performed.
681 * Returns true if need or false if not.
d38ceaf9 682 */
39c640c0 683bool amdgpu_device_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
684{
685 uint32_t reg;
686
bec86378
ML
687 if (amdgpu_sriov_vf(adev))
688 return false;
689
690 if (amdgpu_passthrough(adev)) {
1da2c326
ML
691 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
692 * some old smc fw still need driver do vPost otherwise gpu hang, while
693 * those smc fw version above 22.15 doesn't have this flaw, so we force
694 * vpost executed for smc version below 22.15
bec86378
ML
695 */
696 if (adev->asic_type == CHIP_FIJI) {
697 int err;
698 uint32_t fw_ver;
699 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
700 /* force vPost if error occured */
701 if (err)
702 return true;
703
704 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
705 if (fw_ver < 0x00160e00)
706 return true;
bec86378 707 }
bec86378 708 }
91fe77eb 709
710 if (adev->has_hw_reset) {
711 adev->has_hw_reset = false;
712 return true;
713 }
714
715 /* bios scratch used on CIK+ */
716 if (adev->asic_type >= CHIP_BONAIRE)
717 return amdgpu_atombios_scratch_need_asic_init(adev);
718
719 /* check MEM_SIZE for older asics */
720 reg = amdgpu_asic_get_config_memsize(adev);
721
722 if ((reg != 0) && (reg != 0xffffffff))
723 return false;
724
725 return true;
bec86378
ML
726}
727
d38ceaf9
AD
728/* if we get transitioned to only one device, take VGA back */
729/**
06ec9070 730 * amdgpu_device_vga_set_decode - enable/disable vga decode
d38ceaf9
AD
731 *
732 * @cookie: amdgpu_device pointer
733 * @state: enable/disable vga decode
734 *
735 * Enable/disable vga decode (all asics).
736 * Returns VGA resource flags.
737 */
06ec9070 738static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
d38ceaf9
AD
739{
740 struct amdgpu_device *adev = cookie;
741 amdgpu_asic_set_vga_state(adev, state);
742 if (state)
743 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
744 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
745 else
746 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
747}
748
06ec9070 749static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
750{
751 /* defines number of bits in page table versus page directory,
752 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
753 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
754 if (amdgpu_vm_block_size == -1)
755 return;
a1adf8be 756
bab4fee7 757 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
758 dev_warn(adev->dev, "VM page table size (%d) too small\n",
759 amdgpu_vm_block_size);
97489129 760 amdgpu_vm_block_size = -1;
a1adf8be 761 }
a1adf8be
CZ
762}
763
06ec9070 764static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
83ca145d 765{
64dab074
AD
766 /* no need to check the default value */
767 if (amdgpu_vm_size == -1)
768 return;
769
83ca145d
ZJ
770 if (amdgpu_vm_size < 1) {
771 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
772 amdgpu_vm_size);
f3368128 773 amdgpu_vm_size = -1;
83ca145d 774 }
83ca145d
ZJ
775}
776
d38ceaf9 777/**
06ec9070 778 * amdgpu_device_check_arguments - validate module params
d38ceaf9
AD
779 *
780 * @adev: amdgpu_device pointer
781 *
782 * Validates certain module parameters and updates
783 * the associated values used by the driver (all asics).
784 */
06ec9070 785static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
d38ceaf9 786{
5b011235
CZ
787 if (amdgpu_sched_jobs < 4) {
788 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
789 amdgpu_sched_jobs);
790 amdgpu_sched_jobs = 4;
76117507 791 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
792 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
793 amdgpu_sched_jobs);
794 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
795 }
d38ceaf9 796
83e74db6 797 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
798 /* gart size must be greater or equal to 32M */
799 dev_warn(adev->dev, "gart size (%d) too small\n",
800 amdgpu_gart_size);
83e74db6 801 amdgpu_gart_size = -1;
d38ceaf9
AD
802 }
803
36d38372 804 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 805 /* gtt size must be greater or equal to 32M */
36d38372
CK
806 dev_warn(adev->dev, "gtt size (%d) too small\n",
807 amdgpu_gtt_size);
808 amdgpu_gtt_size = -1;
d38ceaf9
AD
809 }
810
d07f14be
RH
811 /* valid range is between 4 and 9 inclusive */
812 if (amdgpu_vm_fragment_size != -1 &&
813 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
814 dev_warn(adev->dev, "valid range is between 4 and 9\n");
815 amdgpu_vm_fragment_size = -1;
816 }
817
06ec9070 818 amdgpu_device_check_vm_size(adev);
d38ceaf9 819
06ec9070 820 amdgpu_device_check_block_size(adev);
6a7f76e7 821
526bae37 822 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 823 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
824 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
825 amdgpu_vram_page_split);
826 amdgpu_vram_page_split = 1024;
827 }
8854695a
AG
828
829 if (amdgpu_lockup_timeout == 0) {
830 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
831 amdgpu_lockup_timeout = 10000;
832 }
d38ceaf9
AD
833}
834
835/**
836 * amdgpu_switcheroo_set_state - set switcheroo state
837 *
838 * @pdev: pci dev pointer
1694467b 839 * @state: vga_switcheroo state
d38ceaf9
AD
840 *
841 * Callback for the switcheroo driver. Suspends or resumes the
842 * the asics before or after it is powered up using ACPI methods.
843 */
844static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
845{
846 struct drm_device *dev = pci_get_drvdata(pdev);
847
848 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
849 return;
850
851 if (state == VGA_SWITCHEROO_ON) {
7ca85295 852 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
853 /* don't suspend or resume card normally */
854 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
855
810ddc3a 856 amdgpu_device_resume(dev, true, true);
d38ceaf9 857
d38ceaf9
AD
858 dev->switch_power_state = DRM_SWITCH_POWER_ON;
859 drm_kms_helper_poll_enable(dev);
860 } else {
7ca85295 861 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
862 drm_kms_helper_poll_disable(dev);
863 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 864 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
865 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
866 }
867}
868
869/**
870 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
871 *
872 * @pdev: pci dev pointer
873 *
874 * Callback for the switcheroo driver. Check of the switcheroo
875 * state can be changed.
876 * Returns true if the state can be changed, false if not.
877 */
878static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
879{
880 struct drm_device *dev = pci_get_drvdata(pdev);
881
882 /*
883 * FIXME: open_count is protected by drm_global_mutex but that would lead to
884 * locking inversion with the driver load path. And the access here is
885 * completely racy anyway. So don't bother with locking for now.
886 */
887 return dev->open_count == 0;
888}
889
890static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
891 .set_gpu_state = amdgpu_switcheroo_set_state,
892 .reprobe = NULL,
893 .can_switch = amdgpu_switcheroo_can_switch,
894};
895
2990a1fc
AD
896int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
897 enum amd_ip_block_type block_type,
898 enum amd_clockgating_state state)
d38ceaf9
AD
899{
900 int i, r = 0;
901
902 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 903 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 904 continue;
c722865a
RZ
905 if (adev->ip_blocks[i].version->type != block_type)
906 continue;
907 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
908 continue;
909 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
910 (void *)adev, state);
911 if (r)
912 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
913 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
914 }
915 return r;
916}
917
2990a1fc
AD
918int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
919 enum amd_ip_block_type block_type,
920 enum amd_powergating_state state)
d38ceaf9
AD
921{
922 int i, r = 0;
923
924 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 925 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 926 continue;
c722865a
RZ
927 if (adev->ip_blocks[i].version->type != block_type)
928 continue;
929 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
930 continue;
931 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
932 (void *)adev, state);
933 if (r)
934 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
935 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
936 }
937 return r;
938}
939
2990a1fc
AD
940void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
941 u32 *flags)
6cb2d4e4
HR
942{
943 int i;
944
945 for (i = 0; i < adev->num_ip_blocks; i++) {
946 if (!adev->ip_blocks[i].status.valid)
947 continue;
948 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
949 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
950 }
951}
952
2990a1fc
AD
953int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
954 enum amd_ip_block_type block_type)
5dbbb60b
AD
955{
956 int i, r;
957
958 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 959 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 960 continue;
a1255107
AD
961 if (adev->ip_blocks[i].version->type == block_type) {
962 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
963 if (r)
964 return r;
965 break;
966 }
967 }
968 return 0;
969
970}
971
2990a1fc
AD
972bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
973 enum amd_ip_block_type block_type)
5dbbb60b
AD
974{
975 int i;
976
977 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 978 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 979 continue;
a1255107
AD
980 if (adev->ip_blocks[i].version->type == block_type)
981 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
982 }
983 return true;
984
985}
986
2990a1fc
AD
987struct amdgpu_ip_block *
988amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
989 enum amd_ip_block_type type)
d38ceaf9
AD
990{
991 int i;
992
993 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 994 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
995 return &adev->ip_blocks[i];
996
997 return NULL;
998}
999
1000/**
2990a1fc 1001 * amdgpu_device_ip_block_version_cmp
d38ceaf9
AD
1002 *
1003 * @adev: amdgpu_device pointer
5fc3aeeb 1004 * @type: enum amd_ip_block_type
d38ceaf9
AD
1005 * @major: major version
1006 * @minor: minor version
1007 *
1008 * return 0 if equal or greater
1009 * return 1 if smaller or the ip_block doesn't exist
1010 */
2990a1fc
AD
1011int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1012 enum amd_ip_block_type type,
1013 u32 major, u32 minor)
d38ceaf9 1014{
2990a1fc 1015 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
d38ceaf9 1016
a1255107
AD
1017 if (ip_block && ((ip_block->version->major > major) ||
1018 ((ip_block->version->major == major) &&
1019 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1020 return 0;
1021
1022 return 1;
1023}
1024
a1255107 1025/**
2990a1fc 1026 * amdgpu_device_ip_block_add
a1255107
AD
1027 *
1028 * @adev: amdgpu_device pointer
1029 * @ip_block_version: pointer to the IP to add
1030 *
1031 * Adds the IP block driver information to the collection of IPs
1032 * on the asic.
1033 */
2990a1fc
AD
1034int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1035 const struct amdgpu_ip_block_version *ip_block_version)
a1255107
AD
1036{
1037 if (!ip_block_version)
1038 return -EINVAL;
1039
e966a725 1040 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
a0bae357
HR
1041 ip_block_version->funcs->name);
1042
a1255107
AD
1043 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1044
1045 return 0;
1046}
1047
483ef985 1048static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1049{
1050 adev->enable_virtual_display = false;
1051
1052 if (amdgpu_virtual_display) {
1053 struct drm_device *ddev = adev->ddev;
1054 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1055 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1056
1057 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1058 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1059 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1060 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1061 if (!strcmp("all", pciaddname)
1062 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1063 long num_crtc;
1064 int res = -1;
1065
9accf2fd 1066 adev->enable_virtual_display = true;
0f66356d
ED
1067
1068 if (pciaddname_tmp)
1069 res = kstrtol(pciaddname_tmp, 10,
1070 &num_crtc);
1071
1072 if (!res) {
1073 if (num_crtc < 1)
1074 num_crtc = 1;
1075 if (num_crtc > 6)
1076 num_crtc = 6;
1077 adev->mode_info.num_crtc = num_crtc;
1078 } else {
1079 adev->mode_info.num_crtc = 1;
1080 }
9accf2fd
ED
1081 break;
1082 }
1083 }
1084
0f66356d
ED
1085 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1086 amdgpu_virtual_display, pci_address_name,
1087 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1088
1089 kfree(pciaddstr);
1090 }
1091}
1092
e2a75f88
AD
1093static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1094{
e2a75f88
AD
1095 const char *chip_name;
1096 char fw_name[30];
1097 int err;
1098 const struct gpu_info_firmware_header_v1_0 *hdr;
1099
ab4fe3e1
HR
1100 adev->firmware.gpu_info_fw = NULL;
1101
e2a75f88
AD
1102 switch (adev->asic_type) {
1103 case CHIP_TOPAZ:
1104 case CHIP_TONGA:
1105 case CHIP_FIJI:
1106 case CHIP_POLARIS11:
1107 case CHIP_POLARIS10:
1108 case CHIP_POLARIS12:
1109 case CHIP_CARRIZO:
1110 case CHIP_STONEY:
1111#ifdef CONFIG_DRM_AMDGPU_SI
1112 case CHIP_VERDE:
1113 case CHIP_TAHITI:
1114 case CHIP_PITCAIRN:
1115 case CHIP_OLAND:
1116 case CHIP_HAINAN:
1117#endif
1118#ifdef CONFIG_DRM_AMDGPU_CIK
1119 case CHIP_BONAIRE:
1120 case CHIP_HAWAII:
1121 case CHIP_KAVERI:
1122 case CHIP_KABINI:
1123 case CHIP_MULLINS:
1124#endif
1125 default:
1126 return 0;
1127 case CHIP_VEGA10:
1128 chip_name = "vega10";
1129 break;
2d2e5e7e
AD
1130 case CHIP_RAVEN:
1131 chip_name = "raven";
1132 break;
e2a75f88
AD
1133 }
1134
1135 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1136 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1137 if (err) {
1138 dev_err(adev->dev,
1139 "Failed to load gpu_info firmware \"%s\"\n",
1140 fw_name);
1141 goto out;
1142 }
ab4fe3e1 1143 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1144 if (err) {
1145 dev_err(adev->dev,
1146 "Failed to validate gpu_info firmware \"%s\"\n",
1147 fw_name);
1148 goto out;
1149 }
1150
ab4fe3e1 1151 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1152 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1153
1154 switch (hdr->version_major) {
1155 case 1:
1156 {
1157 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1158 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1159 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1160
b5ab16bf
AD
1161 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1162 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1163 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1164 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1165 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1166 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1167 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1168 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1169 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1170 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1171 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1172 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1173 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1174 adev->gfx.cu_info.max_waves_per_simd =
1175 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1176 adev->gfx.cu_info.max_scratch_slots_per_cu =
1177 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1178 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1179 break;
1180 }
1181 default:
1182 dev_err(adev->dev,
1183 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1184 err = -EINVAL;
1185 goto out;
1186 }
1187out:
e2a75f88
AD
1188 return err;
1189}
1190
06ec9070 1191static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
d38ceaf9 1192{
aaa36a97 1193 int i, r;
d38ceaf9 1194
483ef985 1195 amdgpu_device_enable_virtual_display(adev);
a6be7570 1196
d38ceaf9 1197 switch (adev->asic_type) {
aaa36a97
AD
1198 case CHIP_TOPAZ:
1199 case CHIP_TONGA:
48299f95 1200 case CHIP_FIJI:
2cc0c0b5
FC
1201 case CHIP_POLARIS11:
1202 case CHIP_POLARIS10:
c4642a47 1203 case CHIP_POLARIS12:
aaa36a97 1204 case CHIP_CARRIZO:
39bb0c92
SL
1205 case CHIP_STONEY:
1206 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1207 adev->family = AMDGPU_FAMILY_CZ;
1208 else
1209 adev->family = AMDGPU_FAMILY_VI;
1210
1211 r = vi_set_ip_blocks(adev);
1212 if (r)
1213 return r;
1214 break;
33f34802
KW
1215#ifdef CONFIG_DRM_AMDGPU_SI
1216 case CHIP_VERDE:
1217 case CHIP_TAHITI:
1218 case CHIP_PITCAIRN:
1219 case CHIP_OLAND:
1220 case CHIP_HAINAN:
295d0daf 1221 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1222 r = si_set_ip_blocks(adev);
1223 if (r)
1224 return r;
1225 break;
1226#endif
a2e73f56
AD
1227#ifdef CONFIG_DRM_AMDGPU_CIK
1228 case CHIP_BONAIRE:
1229 case CHIP_HAWAII:
1230 case CHIP_KAVERI:
1231 case CHIP_KABINI:
1232 case CHIP_MULLINS:
1233 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1234 adev->family = AMDGPU_FAMILY_CI;
1235 else
1236 adev->family = AMDGPU_FAMILY_KV;
1237
1238 r = cik_set_ip_blocks(adev);
1239 if (r)
1240 return r;
1241 break;
1242#endif
2ca8a5d2
CZ
1243 case CHIP_VEGA10:
1244 case CHIP_RAVEN:
1245 if (adev->asic_type == CHIP_RAVEN)
1246 adev->family = AMDGPU_FAMILY_RV;
1247 else
1248 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1249
1250 r = soc15_set_ip_blocks(adev);
1251 if (r)
1252 return r;
1253 break;
d38ceaf9
AD
1254 default:
1255 /* FIXME: not supported yet */
1256 return -EINVAL;
1257 }
1258
e2a75f88
AD
1259 r = amdgpu_device_parse_gpu_info_fw(adev);
1260 if (r)
1261 return r;
1262
1884734a 1263 amdgpu_amdkfd_device_probe(adev);
1264
3149d9da
XY
1265 if (amdgpu_sriov_vf(adev)) {
1266 r = amdgpu_virt_request_full_gpu(adev, true);
1267 if (r)
5ffa61c1 1268 return -EAGAIN;
3149d9da
XY
1269 }
1270
d38ceaf9
AD
1271 for (i = 0; i < adev->num_ip_blocks; i++) {
1272 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1273 DRM_ERROR("disabled ip block: %d <%s>\n",
1274 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1275 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1276 } else {
a1255107
AD
1277 if (adev->ip_blocks[i].version->funcs->early_init) {
1278 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1279 if (r == -ENOENT) {
a1255107 1280 adev->ip_blocks[i].status.valid = false;
2c1a2784 1281 } else if (r) {
a1255107
AD
1282 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1283 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1284 return r;
2c1a2784 1285 } else {
a1255107 1286 adev->ip_blocks[i].status.valid = true;
2c1a2784 1287 }
974e6b64 1288 } else {
a1255107 1289 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1290 }
d38ceaf9
AD
1291 }
1292 }
1293
395d1fb9
NH
1294 adev->cg_flags &= amdgpu_cg_mask;
1295 adev->pg_flags &= amdgpu_pg_mask;
1296
d38ceaf9
AD
1297 return 0;
1298}
1299
06ec9070 1300static int amdgpu_device_ip_init(struct amdgpu_device *adev)
d38ceaf9
AD
1301{
1302 int i, r;
1303
1304 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1305 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1306 continue;
a1255107 1307 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1308 if (r) {
a1255107
AD
1309 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1310 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1311 return r;
2c1a2784 1312 }
a1255107 1313 adev->ip_blocks[i].status.sw = true;
bfca0289 1314
d38ceaf9 1315 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1316 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
06ec9070 1317 r = amdgpu_device_vram_scratch_init(adev);
2c1a2784
AD
1318 if (r) {
1319 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1320 return r;
2c1a2784 1321 }
a1255107 1322 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1323 if (r) {
1324 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1325 return r;
2c1a2784 1326 }
06ec9070 1327 r = amdgpu_device_wb_init(adev);
2c1a2784 1328 if (r) {
06ec9070 1329 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
d38ceaf9 1330 return r;
2c1a2784 1331 }
a1255107 1332 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1333
1334 /* right after GMC hw init, we create CSA */
1335 if (amdgpu_sriov_vf(adev)) {
1336 r = amdgpu_allocate_static_csa(adev);
1337 if (r) {
1338 DRM_ERROR("allocate CSA failed %d\n", r);
1339 return r;
1340 }
1341 }
d38ceaf9
AD
1342 }
1343 }
1344
1345 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1346 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1347 continue;
bfca0289 1348 if (adev->ip_blocks[i].status.hw)
d38ceaf9 1349 continue;
a1255107 1350 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1351 if (r) {
a1255107
AD
1352 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1353 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1354 return r;
2c1a2784 1355 }
a1255107 1356 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1357 }
1358
1884734a 1359 amdgpu_amdkfd_device_init(adev);
c6332b97 1360
1361 if (amdgpu_sriov_vf(adev))
1362 amdgpu_virt_release_full_gpu(adev, true);
1363
d38ceaf9
AD
1364 return 0;
1365}
1366
06ec9070 1367static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
0c49e0b8
CZ
1368{
1369 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1370}
1371
06ec9070 1372static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
0c49e0b8
CZ
1373{
1374 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1375 AMDGPU_RESET_MAGIC_NUM);
1376}
1377
06ec9070 1378static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
d38ceaf9
AD
1379{
1380 int i = 0, r;
1381
4a2ba394
SL
1382 if (amdgpu_emu_mode == 1)
1383 return 0;
1384
d38ceaf9 1385 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1386 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1387 continue;
4a446d55 1388 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1389 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1390 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1391 /* enable clockgating to save power */
a1255107
AD
1392 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1393 AMD_CG_STATE_GATE);
4a446d55
AD
1394 if (r) {
1395 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1396 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1397 return r;
1398 }
b0b00ff1 1399 }
d38ceaf9 1400 }
2dc80b00
S
1401 return 0;
1402}
1403
06ec9070 1404static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2dc80b00
S
1405{
1406 int i = 0, r;
1407
1408 for (i = 0; i < adev->num_ip_blocks; i++) {
1409 if (!adev->ip_blocks[i].status.valid)
1410 continue;
1411 if (adev->ip_blocks[i].version->funcs->late_init) {
1412 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1413 if (r) {
1414 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1415 adev->ip_blocks[i].version->funcs->name, r);
1416 return r;
1417 }
1418 adev->ip_blocks[i].status.late_initialized = true;
1419 }
1420 }
1421
1422 mod_delayed_work(system_wq, &adev->late_init_work,
1423 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1424
06ec9070 1425 amdgpu_device_fill_reset_magic(adev);
d38ceaf9
AD
1426
1427 return 0;
1428}
1429
06ec9070 1430static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
d38ceaf9
AD
1431{
1432 int i, r;
1433
1884734a 1434 amdgpu_amdkfd_device_fini(adev);
3e96dbfd
AD
1435 /* need to disable SMC first */
1436 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1437 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1438 continue;
a1255107 1439 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1440 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1441 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1442 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1443 if (r) {
1444 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1445 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1446 return r;
1447 }
a1255107 1448 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1449 /* XXX handle errors */
1450 if (r) {
1451 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1452 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1453 }
a1255107 1454 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1455 break;
1456 }
1457 }
1458
d38ceaf9 1459 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
edc3d27c
ED
1460 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
1461 adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
1462 amdgpu_ucode_fini_bo(adev);
a1255107 1463 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1464 continue;
8201a67a
RZ
1465
1466 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1467 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1468 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1469 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1470 AMD_CG_STATE_UNGATE);
1471 if (r) {
1472 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1473 adev->ip_blocks[i].version->funcs->name, r);
1474 return r;
1475 }
2c1a2784 1476 }
8201a67a 1477
a1255107 1478 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1479 /* XXX handle errors */
2c1a2784 1480 if (r) {
a1255107
AD
1481 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1482 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1483 }
8201a67a 1484
a1255107 1485 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1486 }
1487
9950cda2
AD
1488 /* disable all interrupts */
1489 amdgpu_irq_disable_all(adev);
1490
d38ceaf9 1491 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1492 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1493 continue;
c12aba3a
ML
1494
1495 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1496 amdgpu_free_static_csa(adev);
1497 amdgpu_device_wb_fini(adev);
1498 amdgpu_device_vram_scratch_fini(adev);
1499 }
1500
a1255107 1501 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1502 /* XXX handle errors */
2c1a2784 1503 if (r) {
a1255107
AD
1504 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1505 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1506 }
a1255107
AD
1507 adev->ip_blocks[i].status.sw = false;
1508 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1509 }
1510
a6dcfd9c 1511 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1512 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1513 continue;
a1255107
AD
1514 if (adev->ip_blocks[i].version->funcs->late_fini)
1515 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1516 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1517 }
1518
030308fc 1519 if (amdgpu_sriov_vf(adev))
24136135
ML
1520 if (amdgpu_virt_release_full_gpu(adev, false))
1521 DRM_ERROR("failed to release exclusive mode on fini\n");
2493664f 1522
d38ceaf9
AD
1523 return 0;
1524}
1525
06ec9070 1526static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
2dc80b00
S
1527{
1528 struct amdgpu_device *adev =
1529 container_of(work, struct amdgpu_device, late_init_work.work);
06ec9070 1530 amdgpu_device_ip_late_set_cg_state(adev);
2dc80b00
S
1531}
1532
cdd61df6 1533int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1534{
1535 int i, r;
1536
e941ea99
XY
1537 if (amdgpu_sriov_vf(adev))
1538 amdgpu_virt_request_full_gpu(adev, false);
1539
c5a93a28 1540 /* ungate SMC block first */
2990a1fc
AD
1541 r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1542 AMD_CG_STATE_UNGATE);
c5a93a28 1543 if (r) {
2990a1fc 1544 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
c5a93a28
FC
1545 }
1546
d38ceaf9 1547 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1548 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1549 continue;
1550 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1551 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1552 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1553 AMD_CG_STATE_UNGATE);
c5a93a28 1554 if (r) {
a1255107
AD
1555 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1556 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1557 }
2c1a2784 1558 }
d38ceaf9 1559 /* XXX handle errors */
a1255107 1560 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1561 /* XXX handle errors */
2c1a2784 1562 if (r) {
a1255107
AD
1563 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1564 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1565 }
d38ceaf9
AD
1566 }
1567
e941ea99
XY
1568 if (amdgpu_sriov_vf(adev))
1569 amdgpu_virt_release_full_gpu(adev, false);
1570
d38ceaf9
AD
1571 return 0;
1572}
1573
06ec9070 1574static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1575{
1576 int i, r;
1577
2cb681b6
ML
1578 static enum amd_ip_block_type ip_order[] = {
1579 AMD_IP_BLOCK_TYPE_GMC,
1580 AMD_IP_BLOCK_TYPE_COMMON,
2cb681b6
ML
1581 AMD_IP_BLOCK_TYPE_IH,
1582 };
a90ad3c2 1583
2cb681b6
ML
1584 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1585 int j;
1586 struct amdgpu_ip_block *block;
a90ad3c2 1587
2cb681b6
ML
1588 for (j = 0; j < adev->num_ip_blocks; j++) {
1589 block = &adev->ip_blocks[j];
1590
1591 if (block->version->type != ip_order[i] ||
1592 !block->status.valid)
1593 continue;
1594
1595 r = block->version->funcs->hw_init(adev);
1596 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
c41d1cf6
ML
1597 if (r)
1598 return r;
a90ad3c2
ML
1599 }
1600 }
1601
1602 return 0;
1603}
1604
06ec9070 1605static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1606{
1607 int i, r;
1608
2cb681b6
ML
1609 static enum amd_ip_block_type ip_order[] = {
1610 AMD_IP_BLOCK_TYPE_SMC,
ef4c166d 1611 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
1612 AMD_IP_BLOCK_TYPE_DCE,
1613 AMD_IP_BLOCK_TYPE_GFX,
1614 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
1615 AMD_IP_BLOCK_TYPE_UVD,
1616 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 1617 };
a90ad3c2 1618
2cb681b6
ML
1619 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1620 int j;
1621 struct amdgpu_ip_block *block;
a90ad3c2 1622
2cb681b6
ML
1623 for (j = 0; j < adev->num_ip_blocks; j++) {
1624 block = &adev->ip_blocks[j];
1625
1626 if (block->version->type != ip_order[i] ||
1627 !block->status.valid)
1628 continue;
1629
1630 r = block->version->funcs->hw_init(adev);
1631 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
c41d1cf6
ML
1632 if (r)
1633 return r;
a90ad3c2
ML
1634 }
1635 }
1636
1637 return 0;
1638}
1639
06ec9070 1640static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
1641{
1642 int i, r;
1643
a90ad3c2
ML
1644 for (i = 0; i < adev->num_ip_blocks; i++) {
1645 if (!adev->ip_blocks[i].status.valid)
1646 continue;
a90ad3c2
ML
1647 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1648 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
fcf0649f
CZ
1649 adev->ip_blocks[i].version->type ==
1650 AMD_IP_BLOCK_TYPE_IH) {
1651 r = adev->ip_blocks[i].version->funcs->resume(adev);
1652 if (r) {
1653 DRM_ERROR("resume of IP block <%s> failed %d\n",
1654 adev->ip_blocks[i].version->funcs->name, r);
1655 return r;
1656 }
a90ad3c2
ML
1657 }
1658 }
1659
1660 return 0;
1661}
1662
06ec9070 1663static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
1664{
1665 int i, r;
1666
1667 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1668 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1669 continue;
fcf0649f
CZ
1670 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1671 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1672 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1673 continue;
a1255107 1674 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 1675 if (r) {
a1255107
AD
1676 DRM_ERROR("resume of IP block <%s> failed %d\n",
1677 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1678 return r;
2c1a2784 1679 }
d38ceaf9
AD
1680 }
1681
1682 return 0;
1683}
1684
06ec9070 1685static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
fcf0649f
CZ
1686{
1687 int r;
1688
06ec9070 1689 r = amdgpu_device_ip_resume_phase1(adev);
fcf0649f
CZ
1690 if (r)
1691 return r;
06ec9070 1692 r = amdgpu_device_ip_resume_phase2(adev);
fcf0649f
CZ
1693
1694 return r;
1695}
1696
4e99a44e 1697static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 1698{
6867e1b5
ML
1699 if (amdgpu_sriov_vf(adev)) {
1700 if (adev->is_atom_fw) {
1701 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1702 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1703 } else {
1704 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1705 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1706 }
1707
1708 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
1709 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 1710 }
048765ad
AR
1711}
1712
4562236b
HW
1713bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
1714{
1715 switch (asic_type) {
1716#if defined(CONFIG_DRM_AMD_DC)
1717 case CHIP_BONAIRE:
1718 case CHIP_HAWAII:
0d6fbccb 1719 case CHIP_KAVERI:
367e6687
AD
1720 case CHIP_KABINI:
1721 case CHIP_MULLINS:
4562236b
HW
1722 case CHIP_CARRIZO:
1723 case CHIP_STONEY:
1724 case CHIP_POLARIS11:
1725 case CHIP_POLARIS10:
2c8ad2d5 1726 case CHIP_POLARIS12:
4562236b
HW
1727 case CHIP_TONGA:
1728 case CHIP_FIJI:
1729#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
1730 return amdgpu_dc != 0;
4562236b 1731#endif
42f8ffa1
HW
1732 case CHIP_VEGA10:
1733#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 1734 case CHIP_RAVEN:
42f8ffa1 1735#endif
fd187853 1736 return amdgpu_dc != 0;
4562236b
HW
1737#endif
1738 default:
1739 return false;
1740 }
1741}
1742
1743/**
1744 * amdgpu_device_has_dc_support - check if dc is supported
1745 *
1746 * @adev: amdgpu_device_pointer
1747 *
1748 * Returns true for supported, false for not supported
1749 */
1750bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
1751{
2555039d
XY
1752 if (amdgpu_sriov_vf(adev))
1753 return false;
1754
4562236b
HW
1755 return amdgpu_device_asic_has_dc_support(adev->asic_type);
1756}
1757
d38ceaf9
AD
1758/**
1759 * amdgpu_device_init - initialize the driver
1760 *
1761 * @adev: amdgpu_device pointer
1762 * @pdev: drm dev pointer
1763 * @pdev: pci dev pointer
1764 * @flags: driver flags
1765 *
1766 * Initializes the driver info and hw (all asics).
1767 * Returns 0 for success or an error on failure.
1768 * Called at driver startup.
1769 */
1770int amdgpu_device_init(struct amdgpu_device *adev,
1771 struct drm_device *ddev,
1772 struct pci_dev *pdev,
1773 uint32_t flags)
1774{
1775 int r, i;
1776 bool runtime = false;
95844d20 1777 u32 max_MBps;
d38ceaf9
AD
1778
1779 adev->shutdown = false;
1780 adev->dev = &pdev->dev;
1781 adev->ddev = ddev;
1782 adev->pdev = pdev;
1783 adev->flags = flags;
2f7d10b3 1784 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 1785 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
593aa2d2
SL
1786 if (amdgpu_emu_mode == 1)
1787 adev->usec_timeout *= 2;
770d13b1 1788 adev->gmc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
1789 adev->accel_working = false;
1790 adev->num_rings = 0;
1791 adev->mman.buffer_funcs = NULL;
1792 adev->mman.buffer_funcs_ring = NULL;
1793 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 1794 adev->vm_manager.vm_pte_num_rings = 0;
132f34e4 1795 adev->gmc.gmc_funcs = NULL;
f54d1867 1796 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 1797 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
1798
1799 adev->smc_rreg = &amdgpu_invalid_rreg;
1800 adev->smc_wreg = &amdgpu_invalid_wreg;
1801 adev->pcie_rreg = &amdgpu_invalid_rreg;
1802 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
1803 adev->pciep_rreg = &amdgpu_invalid_rreg;
1804 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1805 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1806 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1807 adev->didt_rreg = &amdgpu_invalid_rreg;
1808 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
1809 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1810 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1811 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1812 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1813
3e39ab90
AD
1814 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1815 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1816 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
1817
1818 /* mutex initialization are all done here so we
1819 * can recall function without having locking issues */
d38ceaf9 1820 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 1821 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
1822 mutex_init(&adev->pm.mutex);
1823 mutex_init(&adev->gfx.gpu_clock_mutex);
1824 mutex_init(&adev->srbm_mutex);
b8866c26 1825 mutex_init(&adev->gfx.pipe_reserve_mutex);
d38ceaf9 1826 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 1827 mutex_init(&adev->mn_lock);
e23b74aa 1828 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 1829 hash_init(adev->mn_hash);
13a752e3 1830 mutex_init(&adev->lock_reset);
d38ceaf9 1831
06ec9070 1832 amdgpu_device_check_arguments(adev);
d38ceaf9 1833
d38ceaf9
AD
1834 spin_lock_init(&adev->mmio_idx_lock);
1835 spin_lock_init(&adev->smc_idx_lock);
1836 spin_lock_init(&adev->pcie_idx_lock);
1837 spin_lock_init(&adev->uvd_ctx_idx_lock);
1838 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 1839 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 1840 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 1841 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 1842 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 1843
0c4e7fa5
CZ
1844 INIT_LIST_HEAD(&adev->shadow_list);
1845 mutex_init(&adev->shadow_list_lock);
1846
795f2813
AR
1847 INIT_LIST_HEAD(&adev->ring_lru_list);
1848 spin_lock_init(&adev->ring_lru_list_lock);
1849
06ec9070
AD
1850 INIT_DELAYED_WORK(&adev->late_init_work,
1851 amdgpu_device_ip_late_init_func_handler);
2dc80b00 1852
0fa49558
AX
1853 /* Registers mapping */
1854 /* TODO: block userspace mapping of io register */
da69c161
KW
1855 if (adev->asic_type >= CHIP_BONAIRE) {
1856 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1857 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1858 } else {
1859 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1860 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1861 }
d38ceaf9 1862
d38ceaf9
AD
1863 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1864 if (adev->rmmio == NULL) {
1865 return -ENOMEM;
1866 }
1867 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1868 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1869
705e519e 1870 /* doorbell bar mapping */
06ec9070 1871 amdgpu_device_doorbell_init(adev);
d38ceaf9
AD
1872
1873 /* io port mapping */
1874 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1875 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1876 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1877 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1878 break;
1879 }
1880 }
1881 if (adev->rio_mem == NULL)
b64a18c5 1882 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
1883
1884 /* early init functions */
06ec9070 1885 r = amdgpu_device_ip_early_init(adev);
d38ceaf9
AD
1886 if (r)
1887 return r;
1888
1889 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1890 /* this will fail for cards that aren't VGA class devices, just
1891 * ignore it */
06ec9070 1892 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
d38ceaf9 1893
e9bef455 1894 if (amdgpu_device_is_px(ddev))
d38ceaf9 1895 runtime = true;
84c8b22e
LW
1896 if (!pci_is_thunderbolt_attached(adev->pdev))
1897 vga_switcheroo_register_client(adev->pdev,
1898 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
1899 if (runtime)
1900 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1901
9475a943
SL
1902 if (amdgpu_emu_mode == 1) {
1903 /* post the asic on emulation mode */
1904 emu_soc_asic_init(adev);
bfca0289 1905 goto fence_driver_init;
9475a943 1906 }
bfca0289 1907
d38ceaf9 1908 /* Read BIOS */
83ba126a
AD
1909 if (!amdgpu_get_bios(adev)) {
1910 r = -EINVAL;
1911 goto failed;
1912 }
f7e9e9fe 1913
d38ceaf9 1914 r = amdgpu_atombios_init(adev);
2c1a2784
AD
1915 if (r) {
1916 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 1917 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 1918 goto failed;
2c1a2784 1919 }
d38ceaf9 1920
4e99a44e
ML
1921 /* detect if we are with an SRIOV vbios */
1922 amdgpu_device_detect_sriov_bios(adev);
048765ad 1923
d38ceaf9 1924 /* Post card if necessary */
39c640c0 1925 if (amdgpu_device_need_post(adev)) {
d38ceaf9 1926 if (!adev->bios) {
bec86378 1927 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
1928 r = -EINVAL;
1929 goto failed;
d38ceaf9 1930 }
bec86378 1931 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
1932 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1933 if (r) {
1934 dev_err(adev->dev, "gpu post error!\n");
1935 goto failed;
1936 }
d38ceaf9
AD
1937 }
1938
88b64e95
AD
1939 if (adev->is_atom_fw) {
1940 /* Initialize clocks */
1941 r = amdgpu_atomfirmware_get_clock_info(adev);
1942 if (r) {
1943 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 1944 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
1945 goto failed;
1946 }
1947 } else {
a5bde2f9
AD
1948 /* Initialize clocks */
1949 r = amdgpu_atombios_get_clock_info(adev);
1950 if (r) {
1951 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 1952 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 1953 goto failed;
a5bde2f9
AD
1954 }
1955 /* init i2c buses */
4562236b
HW
1956 if (!amdgpu_device_has_dc_support(adev))
1957 amdgpu_atombios_i2c_init(adev);
2c1a2784 1958 }
d38ceaf9 1959
bfca0289 1960fence_driver_init:
d38ceaf9
AD
1961 /* Fence driver */
1962 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
1963 if (r) {
1964 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 1965 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 1966 goto failed;
2c1a2784 1967 }
d38ceaf9
AD
1968
1969 /* init the mode config */
1970 drm_mode_config_init(adev->ddev);
1971
06ec9070 1972 r = amdgpu_device_ip_init(adev);
d38ceaf9 1973 if (r) {
8840a387 1974 /* failed in exclusive mode due to timeout */
1975 if (amdgpu_sriov_vf(adev) &&
1976 !amdgpu_sriov_runtime(adev) &&
1977 amdgpu_virt_mmio_blocked(adev) &&
1978 !amdgpu_virt_wait_reset(adev)) {
1979 dev_err(adev->dev, "VF exclusive mode timeout\n");
1daee8b4
PD
1980 /* Don't send request since VF is inactive. */
1981 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
1982 adev->virt.ops = NULL;
8840a387 1983 r = -EAGAIN;
1984 goto failed;
1985 }
06ec9070 1986 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
e23b74aa 1987 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
06ec9070 1988 amdgpu_device_ip_fini(adev);
83ba126a 1989 goto failed;
d38ceaf9
AD
1990 }
1991
1992 adev->accel_working = true;
1993
e59c0205
AX
1994 amdgpu_vm_check_compute_bug(adev);
1995
95844d20
MO
1996 /* Initialize the buffer migration limit. */
1997 if (amdgpu_moverate >= 0)
1998 max_MBps = amdgpu_moverate;
1999 else
2000 max_MBps = 8; /* Allow 8 MB/s. */
2001 /* Get a log2 for easy divisions. */
2002 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2003
d38ceaf9
AD
2004 r = amdgpu_ib_pool_init(adev);
2005 if (r) {
2006 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 2007 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 2008 goto failed;
d38ceaf9
AD
2009 }
2010
2011 r = amdgpu_ib_ring_tests(adev);
2012 if (r)
2013 DRM_ERROR("ib ring test failed (%d).\n", r);
2014
2dc8f81e
HC
2015 if (amdgpu_sriov_vf(adev))
2016 amdgpu_virt_init_data_exchange(adev);
2017
9bc92b9c
ML
2018 amdgpu_fbdev_init(adev);
2019
d2f52ac8
RZ
2020 r = amdgpu_pm_sysfs_init(adev);
2021 if (r)
2022 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2023
75758255 2024 r = amdgpu_debugfs_gem_init(adev);
3f14e623 2025 if (r)
d38ceaf9 2026 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2027
2028 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2029 if (r)
d38ceaf9 2030 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2031
50ab2533 2032 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2033 if (r)
50ab2533 2034 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2035
763efb6c 2036 r = amdgpu_debugfs_init(adev);
db95e218 2037 if (r)
763efb6c 2038 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
db95e218 2039
d38ceaf9
AD
2040 if ((amdgpu_testing & 1)) {
2041 if (adev->accel_working)
2042 amdgpu_test_moves(adev);
2043 else
2044 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2045 }
d38ceaf9
AD
2046 if (amdgpu_benchmarking) {
2047 if (adev->accel_working)
2048 amdgpu_benchmark(adev, amdgpu_benchmarking);
2049 else
2050 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2051 }
2052
2053 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2054 * explicit gating rather than handling it automatically.
2055 */
06ec9070 2056 r = amdgpu_device_ip_late_init(adev);
2c1a2784 2057 if (r) {
06ec9070 2058 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
e23b74aa 2059 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2060 goto failed;
2c1a2784 2061 }
d38ceaf9
AD
2062
2063 return 0;
83ba126a
AD
2064
2065failed:
89041940 2066 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2067 if (runtime)
2068 vga_switcheroo_fini_domain_pm_ops(adev->dev);
8840a387 2069
83ba126a 2070 return r;
d38ceaf9
AD
2071}
2072
d38ceaf9
AD
2073/**
2074 * amdgpu_device_fini - tear down the driver
2075 *
2076 * @adev: amdgpu_device pointer
2077 *
2078 * Tear down the driver info (all asics).
2079 * Called at driver shutdown.
2080 */
2081void amdgpu_device_fini(struct amdgpu_device *adev)
2082{
2083 int r;
2084
2085 DRM_INFO("amdgpu: finishing device.\n");
2086 adev->shutdown = true;
db2c2a97
PD
2087 if (adev->mode_info.mode_config_initialized)
2088 drm_crtc_force_disable_all(adev->ddev);
b9141cd3 2089
d38ceaf9
AD
2090 amdgpu_ib_pool_fini(adev);
2091 amdgpu_fence_driver_fini(adev);
58e955d9 2092 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2093 amdgpu_fbdev_fini(adev);
06ec9070 2094 r = amdgpu_device_ip_fini(adev);
ab4fe3e1
HR
2095 if (adev->firmware.gpu_info_fw) {
2096 release_firmware(adev->firmware.gpu_info_fw);
2097 adev->firmware.gpu_info_fw = NULL;
2098 }
d38ceaf9 2099 adev->accel_working = false;
2dc80b00 2100 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2101 /* free i2c buses */
4562236b
HW
2102 if (!amdgpu_device_has_dc_support(adev))
2103 amdgpu_i2c_fini(adev);
bfca0289
SL
2104
2105 if (amdgpu_emu_mode != 1)
2106 amdgpu_atombios_fini(adev);
2107
d38ceaf9
AD
2108 kfree(adev->bios);
2109 adev->bios = NULL;
84c8b22e
LW
2110 if (!pci_is_thunderbolt_attached(adev->pdev))
2111 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2112 if (adev->flags & AMD_IS_PX)
2113 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2114 vga_client_register(adev->pdev, NULL, NULL, NULL);
2115 if (adev->rio_mem)
2116 pci_iounmap(adev->pdev, adev->rio_mem);
2117 adev->rio_mem = NULL;
2118 iounmap(adev->rmmio);
2119 adev->rmmio = NULL;
06ec9070 2120 amdgpu_device_doorbell_fini(adev);
d38ceaf9 2121 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2122}
2123
2124
2125/*
2126 * Suspend & resume.
2127 */
2128/**
810ddc3a 2129 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2130 *
2131 * @pdev: drm dev pointer
2132 * @state: suspend state
2133 *
2134 * Puts the hw in the suspend state (all asics).
2135 * Returns 0 for success or an error on failure.
2136 * Called at driver suspend.
2137 */
810ddc3a 2138int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2139{
2140 struct amdgpu_device *adev;
2141 struct drm_crtc *crtc;
2142 struct drm_connector *connector;
5ceb54c6 2143 int r;
d38ceaf9
AD
2144
2145 if (dev == NULL || dev->dev_private == NULL) {
2146 return -ENODEV;
2147 }
2148
2149 adev = dev->dev_private;
2150
2151 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2152 return 0;
2153
2154 drm_kms_helper_poll_disable(dev);
2155
4562236b
HW
2156 if (!amdgpu_device_has_dc_support(adev)) {
2157 /* turn off display hw */
2158 drm_modeset_lock_all(dev);
2159 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2160 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2161 }
2162 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2163 }
2164
ba997709
YZ
2165 amdgpu_amdkfd_suspend(adev);
2166
756e6880 2167 /* unpin the front buffers and cursors */
d38ceaf9 2168 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2169 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2170 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2171 struct amdgpu_bo *robj;
2172
756e6880
AD
2173 if (amdgpu_crtc->cursor_bo) {
2174 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2175 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2176 if (r == 0) {
2177 amdgpu_bo_unpin(aobj);
2178 amdgpu_bo_unreserve(aobj);
2179 }
2180 }
2181
d38ceaf9
AD
2182 if (rfb == NULL || rfb->obj == NULL) {
2183 continue;
2184 }
2185 robj = gem_to_amdgpu_bo(rfb->obj);
2186 /* don't unpin kernel fb objects */
2187 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2188 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2189 if (r == 0) {
2190 amdgpu_bo_unpin(robj);
2191 amdgpu_bo_unreserve(robj);
2192 }
2193 }
2194 }
2195 /* evict vram memory */
2196 amdgpu_bo_evict_vram(adev);
2197
5ceb54c6 2198 amdgpu_fence_driver_suspend(adev);
d38ceaf9 2199
cdd61df6 2200 r = amdgpu_device_ip_suspend(adev);
d38ceaf9 2201
a0a71e49
AD
2202 /* evict remaining vram memory
2203 * This second call to evict vram is to evict the gart page table
2204 * using the CPU.
2205 */
d38ceaf9
AD
2206 amdgpu_bo_evict_vram(adev);
2207
2208 pci_save_state(dev->pdev);
2209 if (suspend) {
2210 /* Shut down the device */
2211 pci_disable_device(dev->pdev);
2212 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2213 } else {
2214 r = amdgpu_asic_reset(adev);
2215 if (r)
2216 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2217 }
2218
2219 if (fbcon) {
2220 console_lock();
2221 amdgpu_fbdev_set_suspend(adev, 1);
2222 console_unlock();
2223 }
2224 return 0;
2225}
2226
2227/**
810ddc3a 2228 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2229 *
2230 * @pdev: drm dev pointer
2231 *
2232 * Bring the hw back to operating state (all asics).
2233 * Returns 0 for success or an error on failure.
2234 * Called at driver resume.
2235 */
810ddc3a 2236int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2237{
2238 struct drm_connector *connector;
2239 struct amdgpu_device *adev = dev->dev_private;
756e6880 2240 struct drm_crtc *crtc;
03161a6e 2241 int r = 0;
d38ceaf9
AD
2242
2243 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2244 return 0;
2245
74b0b157 2246 if (fbcon)
d38ceaf9 2247 console_lock();
74b0b157 2248
d38ceaf9
AD
2249 if (resume) {
2250 pci_set_power_state(dev->pdev, PCI_D0);
2251 pci_restore_state(dev->pdev);
74b0b157 2252 r = pci_enable_device(dev->pdev);
03161a6e
HR
2253 if (r)
2254 goto unlock;
d38ceaf9
AD
2255 }
2256
2257 /* post card */
39c640c0 2258 if (amdgpu_device_need_post(adev)) {
74b0b157 2259 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2260 if (r)
2261 DRM_ERROR("amdgpu asic init failed\n");
2262 }
d38ceaf9 2263
06ec9070 2264 r = amdgpu_device_ip_resume(adev);
e6707218 2265 if (r) {
06ec9070 2266 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
03161a6e 2267 goto unlock;
e6707218 2268 }
5ceb54c6
AD
2269 amdgpu_fence_driver_resume(adev);
2270
ca198528
FC
2271 if (resume) {
2272 r = amdgpu_ib_ring_tests(adev);
2273 if (r)
2274 DRM_ERROR("ib ring test failed (%d).\n", r);
2275 }
d38ceaf9 2276
06ec9070 2277 r = amdgpu_device_ip_late_init(adev);
03161a6e
HR
2278 if (r)
2279 goto unlock;
d38ceaf9 2280
756e6880
AD
2281 /* pin cursors */
2282 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2283 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2284
2285 if (amdgpu_crtc->cursor_bo) {
2286 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2287 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2288 if (r == 0) {
2289 r = amdgpu_bo_pin(aobj,
2290 AMDGPU_GEM_DOMAIN_VRAM,
2291 &amdgpu_crtc->cursor_addr);
2292 if (r != 0)
2293 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2294 amdgpu_bo_unreserve(aobj);
2295 }
2296 }
2297 }
ba997709
YZ
2298 r = amdgpu_amdkfd_resume(adev);
2299 if (r)
2300 return r;
756e6880 2301
d38ceaf9
AD
2302 /* blat the mode back in */
2303 if (fbcon) {
4562236b
HW
2304 if (!amdgpu_device_has_dc_support(adev)) {
2305 /* pre DCE11 */
2306 drm_helper_resume_force_mode(dev);
2307
2308 /* turn on display hw */
2309 drm_modeset_lock_all(dev);
2310 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2311 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2312 }
2313 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2314 }
2315 }
2316
2317 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2318
2319 /*
2320 * Most of the connector probing functions try to acquire runtime pm
2321 * refs to ensure that the GPU is powered on when connector polling is
2322 * performed. Since we're calling this from a runtime PM callback,
2323 * trying to acquire rpm refs will cause us to deadlock.
2324 *
2325 * Since we're guaranteed to be holding the rpm lock, it's safe to
2326 * temporarily disable the rpm helpers so this doesn't deadlock us.
2327 */
2328#ifdef CONFIG_PM
2329 dev->dev->power.disable_depth++;
2330#endif
4562236b
HW
2331 if (!amdgpu_device_has_dc_support(adev))
2332 drm_helper_hpd_irq_event(dev);
2333 else
2334 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2335#ifdef CONFIG_PM
2336 dev->dev->power.disable_depth--;
2337#endif
d38ceaf9 2338
03161a6e 2339 if (fbcon)
d38ceaf9 2340 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2341
2342unlock:
2343 if (fbcon)
d38ceaf9 2344 console_unlock();
d38ceaf9 2345
03161a6e 2346 return r;
d38ceaf9
AD
2347}
2348
06ec9070 2349static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
63fbf42f
CZ
2350{
2351 int i;
2352 bool asic_hang = false;
2353
f993d628
ML
2354 if (amdgpu_sriov_vf(adev))
2355 return true;
2356
63fbf42f 2357 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2358 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2359 continue;
a1255107
AD
2360 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2361 adev->ip_blocks[i].status.hang =
2362 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2363 if (adev->ip_blocks[i].status.hang) {
2364 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2365 asic_hang = true;
2366 }
2367 }
2368 return asic_hang;
2369}
2370
06ec9070 2371static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2372{
2373 int i, r = 0;
2374
2375 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2376 if (!adev->ip_blocks[i].status.valid)
d31a501e 2377 continue;
a1255107
AD
2378 if (adev->ip_blocks[i].status.hang &&
2379 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2380 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2381 if (r)
2382 return r;
2383 }
2384 }
2385
2386 return 0;
2387}
2388
06ec9070 2389static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
35d782fe 2390{
da146d3b
AD
2391 int i;
2392
2393 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2394 if (!adev->ip_blocks[i].status.valid)
da146d3b 2395 continue;
a1255107
AD
2396 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2397 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2398 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
2399 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2400 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 2401 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2402 DRM_INFO("Some block need full reset!\n");
2403 return true;
2404 }
2405 }
35d782fe
CZ
2406 }
2407 return false;
2408}
2409
06ec9070 2410static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2411{
2412 int i, r = 0;
2413
2414 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2415 if (!adev->ip_blocks[i].status.valid)
35d782fe 2416 continue;
a1255107
AD
2417 if (adev->ip_blocks[i].status.hang &&
2418 adev->ip_blocks[i].version->funcs->soft_reset) {
2419 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2420 if (r)
2421 return r;
2422 }
2423 }
2424
2425 return 0;
2426}
2427
06ec9070 2428static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2429{
2430 int i, r = 0;
2431
2432 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2433 if (!adev->ip_blocks[i].status.valid)
35d782fe 2434 continue;
a1255107
AD
2435 if (adev->ip_blocks[i].status.hang &&
2436 adev->ip_blocks[i].version->funcs->post_soft_reset)
2437 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2438 if (r)
2439 return r;
2440 }
2441
2442 return 0;
2443}
2444
06ec9070
AD
2445static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
2446 struct amdgpu_ring *ring,
2447 struct amdgpu_bo *bo,
2448 struct dma_fence **fence)
53cdccd5
CZ
2449{
2450 uint32_t domain;
2451 int r;
2452
23d2e504
RH
2453 if (!bo->shadow)
2454 return 0;
2455
1d284797 2456 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2457 if (r)
2458 return r;
2459 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2460 /* if bo has been evicted, then no need to recover */
2461 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2462 r = amdgpu_bo_validate(bo->shadow);
2463 if (r) {
2464 DRM_ERROR("bo validate failed!\n");
2465 goto err;
2466 }
2467
23d2e504 2468 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2469 NULL, fence, true);
23d2e504
RH
2470 if (r) {
2471 DRM_ERROR("recover page table failed!\n");
2472 goto err;
2473 }
2474 }
53cdccd5 2475err:
23d2e504
RH
2476 amdgpu_bo_unreserve(bo);
2477 return r;
53cdccd5
CZ
2478}
2479
c41d1cf6
ML
2480static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
2481{
2482 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2483 struct amdgpu_bo *bo, *tmp;
2484 struct dma_fence *fence = NULL, *next = NULL;
2485 long r = 1;
2486 int i = 0;
2487 long tmo;
2488
2489 if (amdgpu_sriov_runtime(adev))
2490 tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
2491 else
2492 tmo = msecs_to_jiffies(100);
2493
2494 DRM_INFO("recover vram bo from shadow start\n");
2495 mutex_lock(&adev->shadow_list_lock);
2496 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2497 next = NULL;
2498 amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
2499 if (fence) {
2500 r = dma_fence_wait_timeout(fence, false, tmo);
2501 if (r == 0)
2502 pr_err("wait fence %p[%d] timeout\n", fence, i);
2503 else if (r < 0)
2504 pr_err("wait fence %p[%d] interrupted\n", fence, i);
2505 if (r < 1) {
2506 dma_fence_put(fence);
2507 fence = next;
2508 break;
2509 }
2510 i++;
2511 }
2512
2513 dma_fence_put(fence);
2514 fence = next;
2515 }
2516 mutex_unlock(&adev->shadow_list_lock);
2517
2518 if (fence) {
2519 r = dma_fence_wait_timeout(fence, false, tmo);
2520 if (r == 0)
2521 pr_err("wait fence %p[%d] timeout\n", fence, i);
2522 else if (r < 0)
2523 pr_err("wait fence %p[%d] interrupted\n", fence, i);
2524
2525 }
2526 dma_fence_put(fence);
2527
2528 if (r > 0)
2529 DRM_INFO("recover vram bo from shadow done\n");
2530 else
2531 DRM_ERROR("recover vram bo from shadow failed\n");
2532
2533 return (r > 0?0:1);
2534}
2535
5740682e 2536/*
06ec9070 2537 * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
a90ad3c2
ML
2538 *
2539 * @adev: amdgpu device pointer
a90ad3c2 2540 *
5740682e
ML
2541 * attempt to do soft-reset or full-reset and reinitialize Asic
2542 * return 0 means successed otherwise failed
2543*/
c41d1cf6 2544static int amdgpu_device_reset(struct amdgpu_device *adev)
a90ad3c2 2545{
5740682e
ML
2546 bool need_full_reset, vram_lost = 0;
2547 int r;
a90ad3c2 2548
06ec9070 2549 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
a90ad3c2 2550
5740682e 2551 if (!need_full_reset) {
06ec9070
AD
2552 amdgpu_device_ip_pre_soft_reset(adev);
2553 r = amdgpu_device_ip_soft_reset(adev);
2554 amdgpu_device_ip_post_soft_reset(adev);
2555 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5740682e
ML
2556 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2557 need_full_reset = true;
2558 }
5740682e 2559 }
a90ad3c2 2560
5740682e 2561 if (need_full_reset) {
cdd61df6 2562 r = amdgpu_device_ip_suspend(adev);
a90ad3c2 2563
5740682e 2564retry:
5740682e 2565 r = amdgpu_asic_reset(adev);
5740682e
ML
2566 /* post card */
2567 amdgpu_atom_asic_init(adev->mode_info.atom_context);
65781c78 2568
5740682e
ML
2569 if (!r) {
2570 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
06ec9070 2571 r = amdgpu_device_ip_resume_phase1(adev);
5740682e
ML
2572 if (r)
2573 goto out;
65781c78 2574
06ec9070 2575 vram_lost = amdgpu_device_check_vram_lost(adev);
5740682e
ML
2576 if (vram_lost) {
2577 DRM_ERROR("VRAM is lost!\n");
2578 atomic_inc(&adev->vram_lost_counter);
2579 }
2580
c1c7ce8f
CK
2581 r = amdgpu_gtt_mgr_recover(
2582 &adev->mman.bdev.man[TTM_PL_TT]);
5740682e
ML
2583 if (r)
2584 goto out;
2585
06ec9070 2586 r = amdgpu_device_ip_resume_phase2(adev);
5740682e
ML
2587 if (r)
2588 goto out;
2589
2590 if (vram_lost)
06ec9070 2591 amdgpu_device_fill_reset_magic(adev);
65781c78 2592 }
5740682e 2593 }
65781c78 2594
5740682e
ML
2595out:
2596 if (!r) {
2597 amdgpu_irq_gpu_reset_resume_helper(adev);
2598 r = amdgpu_ib_ring_tests(adev);
2599 if (r) {
2600 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
cdd61df6 2601 r = amdgpu_device_ip_suspend(adev);
5740682e
ML
2602 need_full_reset = true;
2603 goto retry;
2604 }
2605 }
65781c78 2606
c41d1cf6
ML
2607 if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
2608 r = amdgpu_device_handle_vram_lost(adev);
a90ad3c2 2609
5740682e
ML
2610 return r;
2611}
a90ad3c2 2612
5740682e 2613/*
06ec9070 2614 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5740682e
ML
2615 *
2616 * @adev: amdgpu device pointer
5740682e
ML
2617 *
2618 * do VF FLR and reinitialize Asic
2619 * return 0 means successed otherwise failed
2620*/
c41d1cf6 2621static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, bool from_hypervisor)
5740682e
ML
2622{
2623 int r;
2624
2625 if (from_hypervisor)
2626 r = amdgpu_virt_request_full_gpu(adev, true);
2627 else
2628 r = amdgpu_virt_reset_gpu(adev);
2629 if (r)
2630 return r;
a90ad3c2
ML
2631
2632 /* Resume IP prior to SMC */
06ec9070 2633 r = amdgpu_device_ip_reinit_early_sriov(adev);
5740682e
ML
2634 if (r)
2635 goto error;
a90ad3c2
ML
2636
2637 /* we need recover gart prior to run SMC/CP/SDMA resume */
c1c7ce8f 2638 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
a90ad3c2
ML
2639
2640 /* now we are okay to resume SMC/CP/SDMA */
06ec9070 2641 r = amdgpu_device_ip_reinit_late_sriov(adev);
c41d1cf6 2642 amdgpu_virt_release_full_gpu(adev, true);
5740682e
ML
2643 if (r)
2644 goto error;
a90ad3c2
ML
2645
2646 amdgpu_irq_gpu_reset_resume_helper(adev);
5740682e 2647 r = amdgpu_ib_ring_tests(adev);
a90ad3c2 2648
c41d1cf6
ML
2649 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
2650 atomic_inc(&adev->vram_lost_counter);
2651 r = amdgpu_device_handle_vram_lost(adev);
a90ad3c2
ML
2652 }
2653
c41d1cf6
ML
2654error:
2655
a90ad3c2
ML
2656 return r;
2657}
2658
d38ceaf9 2659/**
5f152b5e 2660 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
d38ceaf9
AD
2661 *
2662 * @adev: amdgpu device pointer
5740682e 2663 * @job: which job trigger hang
dcebf026 2664 * @force forces reset regardless of amdgpu_gpu_recovery
d38ceaf9 2665 *
5740682e 2666 * Attempt to reset the GPU if it has hung (all asics).
d38ceaf9
AD
2667 * Returns 0 for success or an error on failure.
2668 */
5f152b5e
AD
2669int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
2670 struct amdgpu_job *job, bool force)
d38ceaf9 2671{
4562236b 2672 struct drm_atomic_state *state = NULL;
5740682e 2673 int i, r, resched;
fb140b29 2674
54bc1398 2675 if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
63fbf42f
CZ
2676 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2677 return 0;
2678 }
d38ceaf9 2679
dcebf026
AG
2680 if (!force && (amdgpu_gpu_recovery == 0 ||
2681 (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
2682 DRM_INFO("GPU recovery disabled.\n");
2683 return 0;
2684 }
2685
5740682e
ML
2686 dev_info(adev->dev, "GPU reset begin!\n");
2687
13a752e3 2688 mutex_lock(&adev->lock_reset);
d94aed5a 2689 atomic_inc(&adev->gpu_reset_counter);
13a752e3 2690 adev->in_gpu_reset = 1;
d38ceaf9 2691
a3c47d6b
CZ
2692 /* block TTM */
2693 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
71182665 2694
4562236b
HW
2695 /* store modesetting */
2696 if (amdgpu_device_has_dc_support(adev))
2697 state = drm_atomic_helper_suspend(adev->ddev);
a3c47d6b 2698
71182665 2699 /* block all schedulers and reset given job's ring */
0875dc9e
CZ
2700 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2701 struct amdgpu_ring *ring = adev->rings[i];
2702
51687759 2703 if (!ring || !ring->sched.thread)
0875dc9e 2704 continue;
5740682e 2705
71182665
ML
2706 kthread_park(ring->sched.thread);
2707
5740682e
ML
2708 if (job && job->ring->idx != i)
2709 continue;
2710
1b1f42d8 2711 drm_sched_hw_job_reset(&ring->sched, &job->base);
5740682e 2712
2f9d4084
ML
2713 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2714 amdgpu_fence_driver_force_completion(ring);
0875dc9e 2715 }
d38ceaf9 2716
5740682e 2717 if (amdgpu_sriov_vf(adev))
c41d1cf6 2718 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5740682e 2719 else
c41d1cf6 2720 r = amdgpu_device_reset(adev);
5740682e 2721
71182665
ML
2722 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2723 struct amdgpu_ring *ring = adev->rings[i];
53cdccd5 2724
71182665
ML
2725 if (!ring || !ring->sched.thread)
2726 continue;
5740682e 2727
71182665
ML
2728 /* only need recovery sched of the given job's ring
2729 * or all rings (in the case @job is NULL)
2730 * after above amdgpu_reset accomplished
2731 */
2732 if ((!job || job->ring->idx == i) && !r)
1b1f42d8 2733 drm_sched_job_recovery(&ring->sched);
5740682e 2734
71182665 2735 kthread_unpark(ring->sched.thread);
d38ceaf9
AD
2736 }
2737
4562236b 2738 if (amdgpu_device_has_dc_support(adev)) {
5740682e
ML
2739 if (drm_atomic_helper_resume(adev->ddev, state))
2740 dev_info(adev->dev, "drm resume failed:%d\n", r);
5740682e 2741 } else {
4562236b 2742 drm_helper_resume_force_mode(adev->ddev);
5740682e 2743 }
d38ceaf9
AD
2744
2745 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
5740682e 2746
89041940 2747 if (r) {
d38ceaf9 2748 /* bad news, how to tell it to userspace ? */
5740682e
ML
2749 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
2750 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
2751 } else {
2752 dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
89041940 2753 }
d38ceaf9 2754
89041940 2755 amdgpu_vf_error_trans_all(adev);
13a752e3
ML
2756 adev->in_gpu_reset = 0;
2757 mutex_unlock(&adev->lock_reset);
d38ceaf9
AD
2758 return r;
2759}
2760
041d9d93 2761void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
d0dd7f0c
AD
2762{
2763 u32 mask;
2764 int ret;
2765
cd474ba0
AD
2766 if (amdgpu_pcie_gen_cap)
2767 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 2768
cd474ba0
AD
2769 if (amdgpu_pcie_lane_cap)
2770 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 2771
cd474ba0
AD
2772 /* covers APUs as well */
2773 if (pci_is_root_bus(adev->pdev->bus)) {
2774 if (adev->pm.pcie_gen_mask == 0)
2775 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2776 if (adev->pm.pcie_mlw_mask == 0)
2777 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 2778 return;
cd474ba0 2779 }
d0dd7f0c 2780
cd474ba0
AD
2781 if (adev->pm.pcie_gen_mask == 0) {
2782 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2783 if (!ret) {
2784 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2785 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2786 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2787
2788 if (mask & DRM_PCIE_SPEED_25)
2789 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2790 if (mask & DRM_PCIE_SPEED_50)
2791 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2792 if (mask & DRM_PCIE_SPEED_80)
2793 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2794 } else {
2795 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2796 }
2797 }
2798 if (adev->pm.pcie_mlw_mask == 0) {
2799 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2800 if (!ret) {
2801 switch (mask) {
2802 case 32:
2803 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2804 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2805 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2806 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2807 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2808 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2809 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2810 break;
2811 case 16:
2812 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2813 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2814 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2815 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2816 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2817 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2818 break;
2819 case 12:
2820 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2821 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2822 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2823 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2824 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2825 break;
2826 case 8:
2827 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2828 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2829 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2830 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2831 break;
2832 case 4:
2833 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2834 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2835 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2836 break;
2837 case 2:
2838 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2839 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2840 break;
2841 case 1:
2842 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2843 break;
2844 default:
2845 break;
2846 }
2847 } else {
2848 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
2849 }
2850 }
2851}
d38ceaf9 2852