drm/amd/include/vg20: adjust VCE_BASE to reuse vce 4.0 header files
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
d38ceaf9
AD
31#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
4562236b 33#include <drm/drm_atomic_helper.h>
d38ceaf9
AD
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
f4b373f4 39#include "amdgpu_trace.h"
d38ceaf9
AD
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
a5bde2f9 43#include "amdgpu_atomfirmware.h"
d0dd7f0c 44#include "amd_pcie.h"
33f34802
KW
45#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
a2e73f56
AD
48#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
aaa36a97 51#include "vi.h"
460826e6 52#include "soc15.h"
d38ceaf9 53#include "bif/bif_4_1_d.h"
9accf2fd 54#include <linux/pci.h>
bec86378 55#include <linux/firmware.h>
89041940 56#include "amdgpu_vf_error.h"
d38ceaf9 57
ba997709 58#include "amdgpu_amdkfd.h"
d2f52ac8 59#include "amdgpu_pm.h"
d38ceaf9 60
e2a75f88 61MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
3f76dced 62MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
2d2e5e7e 63MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
e2a75f88 64
2dc80b00
S
65#define AMDGPU_RESUME_MS 2000
66
d38ceaf9 67static const char *amdgpu_asic_name[] = {
da69c161
KW
68 "TAHITI",
69 "PITCAIRN",
70 "VERDE",
71 "OLAND",
72 "HAINAN",
d38ceaf9
AD
73 "BONAIRE",
74 "KAVERI",
75 "KABINI",
76 "HAWAII",
77 "MULLINS",
78 "TOPAZ",
79 "TONGA",
48299f95 80 "FIJI",
d38ceaf9 81 "CARRIZO",
139f4917 82 "STONEY",
2cc0c0b5
FC
83 "POLARIS10",
84 "POLARIS11",
c4642a47 85 "POLARIS12",
48ff108d 86 "VEGAM",
d4196f01 87 "VEGA10",
8fab806a 88 "VEGA12",
2ca8a5d2 89 "RAVEN",
d38ceaf9
AD
90 "LAST",
91};
92
5494d864
AD
93static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
94
e3ecdffa
AD
95/**
96 * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
97 *
98 * @dev: drm_device pointer
99 *
100 * Returns true if the device is a dGPU with HG/PX power control,
101 * otherwise return false.
102 */
d38ceaf9
AD
103bool amdgpu_device_is_px(struct drm_device *dev)
104{
105 struct amdgpu_device *adev = dev->dev_private;
106
2f7d10b3 107 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
108 return true;
109 return false;
110}
111
112/*
113 * MMIO register access helper functions.
114 */
e3ecdffa
AD
115/**
116 * amdgpu_mm_rreg - read a memory mapped IO register
117 *
118 * @adev: amdgpu_device pointer
119 * @reg: dword aligned register offset
120 * @acc_flags: access flags which require special behavior
121 *
122 * Returns the 32 bit value from the offset specified.
123 */
d38ceaf9 124uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 125 uint32_t acc_flags)
d38ceaf9 126{
f4b373f4
TSD
127 uint32_t ret;
128
43ca8efa 129 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 130 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 131
15d72fd7 132 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 133 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
134 else {
135 unsigned long flags;
d38ceaf9
AD
136
137 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
138 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
139 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
140 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 141 }
f4b373f4
TSD
142 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
143 return ret;
d38ceaf9
AD
144}
145
421a2a30
ML
146/*
147 * MMIO register read with bytes helper functions
148 * @offset:bytes offset from MMIO start
149 *
150*/
151
e3ecdffa
AD
152/**
153 * amdgpu_mm_rreg8 - read a memory mapped IO register
154 *
155 * @adev: amdgpu_device pointer
156 * @offset: byte aligned register offset
157 *
158 * Returns the 8 bit value from the offset specified.
159 */
421a2a30
ML
160uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
161 if (offset < adev->rmmio_size)
162 return (readb(adev->rmmio + offset));
163 BUG();
164}
165
166/*
167 * MMIO register write with bytes helper functions
168 * @offset:bytes offset from MMIO start
169 * @value: the value want to be written to the register
170 *
171*/
e3ecdffa
AD
172/**
173 * amdgpu_mm_wreg8 - read a memory mapped IO register
174 *
175 * @adev: amdgpu_device pointer
176 * @offset: byte aligned register offset
177 * @value: 8 bit value to write
178 *
179 * Writes the value specified to the offset specified.
180 */
421a2a30
ML
181void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
182 if (offset < adev->rmmio_size)
183 writeb(value, adev->rmmio + offset);
184 else
185 BUG();
186}
187
e3ecdffa
AD
188/**
189 * amdgpu_mm_wreg - write to a memory mapped IO register
190 *
191 * @adev: amdgpu_device pointer
192 * @reg: dword aligned register offset
193 * @v: 32 bit value to write to the register
194 * @acc_flags: access flags which require special behavior
195 *
196 * Writes the value specified to the offset specified.
197 */
d38ceaf9 198void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 199 uint32_t acc_flags)
d38ceaf9 200{
f4b373f4 201 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 202
47ed4e1c
KW
203 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
204 adev->last_mm_index = v;
205 }
206
43ca8efa 207 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 208 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 209
15d72fd7 210 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
211 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
212 else {
213 unsigned long flags;
214
215 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
216 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
217 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
218 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
219 }
47ed4e1c
KW
220
221 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
222 udelay(500);
223 }
d38ceaf9
AD
224}
225
e3ecdffa
AD
226/**
227 * amdgpu_io_rreg - read an IO register
228 *
229 * @adev: amdgpu_device pointer
230 * @reg: dword aligned register offset
231 *
232 * Returns the 32 bit value from the offset specified.
233 */
d38ceaf9
AD
234u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
235{
236 if ((reg * 4) < adev->rio_mem_size)
237 return ioread32(adev->rio_mem + (reg * 4));
238 else {
239 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
240 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
241 }
242}
243
e3ecdffa
AD
244/**
245 * amdgpu_io_wreg - write to an IO register
246 *
247 * @adev: amdgpu_device pointer
248 * @reg: dword aligned register offset
249 * @v: 32 bit value to write to the register
250 *
251 * Writes the value specified to the offset specified.
252 */
d38ceaf9
AD
253void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
254{
47ed4e1c
KW
255 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
256 adev->last_mm_index = v;
257 }
d38ceaf9
AD
258
259 if ((reg * 4) < adev->rio_mem_size)
260 iowrite32(v, adev->rio_mem + (reg * 4));
261 else {
262 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
263 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
264 }
47ed4e1c
KW
265
266 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
267 udelay(500);
268 }
d38ceaf9
AD
269}
270
271/**
272 * amdgpu_mm_rdoorbell - read a doorbell dword
273 *
274 * @adev: amdgpu_device pointer
275 * @index: doorbell index
276 *
277 * Returns the value in the doorbell aperture at the
278 * requested doorbell index (CIK).
279 */
280u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
281{
282 if (index < adev->doorbell.num_doorbells) {
283 return readl(adev->doorbell.ptr + index);
284 } else {
285 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
286 return 0;
287 }
288}
289
290/**
291 * amdgpu_mm_wdoorbell - write a doorbell dword
292 *
293 * @adev: amdgpu_device pointer
294 * @index: doorbell index
295 * @v: value to write
296 *
297 * Writes @v to the doorbell aperture at the
298 * requested doorbell index (CIK).
299 */
300void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
301{
302 if (index < adev->doorbell.num_doorbells) {
303 writel(v, adev->doorbell.ptr + index);
304 } else {
305 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
306 }
307}
308
832be404
KW
309/**
310 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
311 *
312 * @adev: amdgpu_device pointer
313 * @index: doorbell index
314 *
315 * Returns the value in the doorbell aperture at the
316 * requested doorbell index (VEGA10+).
317 */
318u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
319{
320 if (index < adev->doorbell.num_doorbells) {
321 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
322 } else {
323 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
324 return 0;
325 }
326}
327
328/**
329 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
330 *
331 * @adev: amdgpu_device pointer
332 * @index: doorbell index
333 * @v: value to write
334 *
335 * Writes @v to the doorbell aperture at the
336 * requested doorbell index (VEGA10+).
337 */
338void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
339{
340 if (index < adev->doorbell.num_doorbells) {
341 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
342 } else {
343 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
344 }
345}
346
d38ceaf9
AD
347/**
348 * amdgpu_invalid_rreg - dummy reg read function
349 *
350 * @adev: amdgpu device pointer
351 * @reg: offset of register
352 *
353 * Dummy register read function. Used for register blocks
354 * that certain asics don't have (all asics).
355 * Returns the value in the register.
356 */
357static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
358{
359 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
360 BUG();
361 return 0;
362}
363
364/**
365 * amdgpu_invalid_wreg - dummy reg write function
366 *
367 * @adev: amdgpu device pointer
368 * @reg: offset of register
369 * @v: value to write to the register
370 *
371 * Dummy register read function. Used for register blocks
372 * that certain asics don't have (all asics).
373 */
374static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
375{
376 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
377 reg, v);
378 BUG();
379}
380
381/**
382 * amdgpu_block_invalid_rreg - dummy reg read function
383 *
384 * @adev: amdgpu device pointer
385 * @block: offset of instance
386 * @reg: offset of register
387 *
388 * Dummy register read function. Used for register blocks
389 * that certain asics don't have (all asics).
390 * Returns the value in the register.
391 */
392static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
393 uint32_t block, uint32_t reg)
394{
395 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
396 reg, block);
397 BUG();
398 return 0;
399}
400
401/**
402 * amdgpu_block_invalid_wreg - dummy reg write function
403 *
404 * @adev: amdgpu device pointer
405 * @block: offset of instance
406 * @reg: offset of register
407 * @v: value to write to the register
408 *
409 * Dummy register read function. Used for register blocks
410 * that certain asics don't have (all asics).
411 */
412static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
413 uint32_t block,
414 uint32_t reg, uint32_t v)
415{
416 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
417 reg, block, v);
418 BUG();
419}
420
e3ecdffa
AD
421/**
422 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
423 *
424 * @adev: amdgpu device pointer
425 *
426 * Allocates a scratch page of VRAM for use by various things in the
427 * driver.
428 */
06ec9070 429static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
d38ceaf9 430{
a4a02777
CK
431 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
432 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
433 &adev->vram_scratch.robj,
434 &adev->vram_scratch.gpu_addr,
435 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
436}
437
e3ecdffa
AD
438/**
439 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
440 *
441 * @adev: amdgpu device pointer
442 *
443 * Frees the VRAM scratch page.
444 */
06ec9070 445static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
d38ceaf9 446{
078af1a3 447 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
448}
449
450/**
9c3f2b54 451 * amdgpu_device_program_register_sequence - program an array of registers.
d38ceaf9
AD
452 *
453 * @adev: amdgpu_device pointer
454 * @registers: pointer to the register array
455 * @array_size: size of the register array
456 *
457 * Programs an array or registers with and and or masks.
458 * This is a helper for setting golden registers.
459 */
9c3f2b54
AD
460void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
461 const u32 *registers,
462 const u32 array_size)
d38ceaf9
AD
463{
464 u32 tmp, reg, and_mask, or_mask;
465 int i;
466
467 if (array_size % 3)
468 return;
469
470 for (i = 0; i < array_size; i +=3) {
471 reg = registers[i + 0];
472 and_mask = registers[i + 1];
473 or_mask = registers[i + 2];
474
475 if (and_mask == 0xffffffff) {
476 tmp = or_mask;
477 } else {
478 tmp = RREG32(reg);
479 tmp &= ~and_mask;
480 tmp |= or_mask;
481 }
482 WREG32(reg, tmp);
483 }
484}
485
e3ecdffa
AD
486/**
487 * amdgpu_device_pci_config_reset - reset the GPU
488 *
489 * @adev: amdgpu_device pointer
490 *
491 * Resets the GPU using the pci config reset sequence.
492 * Only applicable to asics prior to vega10.
493 */
8111c387 494void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
d38ceaf9
AD
495{
496 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
497}
498
499/*
500 * GPU doorbell aperture helpers function.
501 */
502/**
06ec9070 503 * amdgpu_device_doorbell_init - Init doorbell driver information.
d38ceaf9
AD
504 *
505 * @adev: amdgpu_device pointer
506 *
507 * Init doorbell driver information (CIK)
508 * Returns 0 on success, error on failure.
509 */
06ec9070 510static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
d38ceaf9 511{
705e519e
CK
512 /* No doorbell on SI hardware generation */
513 if (adev->asic_type < CHIP_BONAIRE) {
514 adev->doorbell.base = 0;
515 adev->doorbell.size = 0;
516 adev->doorbell.num_doorbells = 0;
517 adev->doorbell.ptr = NULL;
518 return 0;
519 }
520
d6895ad3
CK
521 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
522 return -EINVAL;
523
d38ceaf9
AD
524 /* doorbell bar mapping */
525 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
526 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
527
edf600da 528 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
529 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
530 if (adev->doorbell.num_doorbells == 0)
531 return -EINVAL;
532
8972e5d2
CK
533 adev->doorbell.ptr = ioremap(adev->doorbell.base,
534 adev->doorbell.num_doorbells *
535 sizeof(u32));
536 if (adev->doorbell.ptr == NULL)
d38ceaf9 537 return -ENOMEM;
d38ceaf9
AD
538
539 return 0;
540}
541
542/**
06ec9070 543 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
d38ceaf9
AD
544 *
545 * @adev: amdgpu_device pointer
546 *
547 * Tear down doorbell driver information (CIK)
548 */
06ec9070 549static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
d38ceaf9
AD
550{
551 iounmap(adev->doorbell.ptr);
552 adev->doorbell.ptr = NULL;
553}
554
22cb0164 555
d38ceaf9
AD
556
557/*
06ec9070 558 * amdgpu_device_wb_*()
455a7bc2 559 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 560 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
561 */
562
563/**
06ec9070 564 * amdgpu_device_wb_fini - Disable Writeback and free memory
d38ceaf9
AD
565 *
566 * @adev: amdgpu_device pointer
567 *
568 * Disables Writeback and frees the Writeback memory (all asics).
569 * Used at driver shutdown.
570 */
06ec9070 571static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
d38ceaf9
AD
572{
573 if (adev->wb.wb_obj) {
a76ed485
AD
574 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
575 &adev->wb.gpu_addr,
576 (void **)&adev->wb.wb);
d38ceaf9
AD
577 adev->wb.wb_obj = NULL;
578 }
579}
580
581/**
06ec9070 582 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
d38ceaf9
AD
583 *
584 * @adev: amdgpu_device pointer
585 *
455a7bc2 586 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
587 * Used at driver startup.
588 * Returns 0 on success or an -error on failure.
589 */
06ec9070 590static int amdgpu_device_wb_init(struct amdgpu_device *adev)
d38ceaf9
AD
591{
592 int r;
593
594 if (adev->wb.wb_obj == NULL) {
97407b63
AD
595 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
596 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
597 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
598 &adev->wb.wb_obj, &adev->wb.gpu_addr,
599 (void **)&adev->wb.wb);
d38ceaf9
AD
600 if (r) {
601 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
602 return r;
603 }
d38ceaf9
AD
604
605 adev->wb.num_wb = AMDGPU_MAX_WB;
606 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
607
608 /* clear wb memory */
73469585 609 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
d38ceaf9
AD
610 }
611
612 return 0;
613}
614
615/**
131b4b36 616 * amdgpu_device_wb_get - Allocate a wb entry
d38ceaf9
AD
617 *
618 * @adev: amdgpu_device pointer
619 * @wb: wb index
620 *
621 * Allocate a wb slot for use by the driver (all asics).
622 * Returns 0 on success or -EINVAL on failure.
623 */
131b4b36 624int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
d38ceaf9
AD
625{
626 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 627
97407b63 628 if (offset < adev->wb.num_wb) {
7014285a 629 __set_bit(offset, adev->wb.used);
63ae07ca 630 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
631 return 0;
632 } else {
633 return -EINVAL;
634 }
635}
636
d38ceaf9 637/**
131b4b36 638 * amdgpu_device_wb_free - Free a wb entry
d38ceaf9
AD
639 *
640 * @adev: amdgpu_device pointer
641 * @wb: wb index
642 *
643 * Free a wb slot allocated for use by the driver (all asics)
644 */
131b4b36 645void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
d38ceaf9 646{
73469585 647 wb >>= 3;
d38ceaf9 648 if (wb < adev->wb.num_wb)
73469585 649 __clear_bit(wb, adev->wb.used);
d38ceaf9
AD
650}
651
652/**
2543e28a 653 * amdgpu_device_vram_location - try to find VRAM location
e3ecdffa 654 *
d38ceaf9
AD
655 * @adev: amdgpu device structure holding all necessary informations
656 * @mc: memory controller structure holding memory informations
657 * @base: base address at which to put VRAM
658 *
455a7bc2 659 * Function will try to place VRAM at base address provided
3d647c8f 660 * as parameter.
d38ceaf9 661 */
2543e28a 662void amdgpu_device_vram_location(struct amdgpu_device *adev,
770d13b1 663 struct amdgpu_gmc *mc, u64 base)
d38ceaf9
AD
664{
665 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
666
667 mc->vram_start = base;
d38ceaf9
AD
668 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
669 if (limit && limit < mc->real_vram_size)
670 mc->real_vram_size = limit;
671 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
672 mc->mc_vram_size >> 20, mc->vram_start,
673 mc->vram_end, mc->real_vram_size >> 20);
674}
675
676/**
2543e28a 677 * amdgpu_device_gart_location - try to find GTT location
e3ecdffa 678 *
d38ceaf9
AD
679 * @adev: amdgpu device structure holding all necessary informations
680 * @mc: memory controller structure holding memory informations
681 *
682 * Function will place try to place GTT before or after VRAM.
683 *
684 * If GTT size is bigger than space left then we ajust GTT size.
685 * Thus function will never fails.
686 *
687 * FIXME: when reducing GTT size align new size on power of 2.
688 */
2543e28a 689void amdgpu_device_gart_location(struct amdgpu_device *adev,
770d13b1 690 struct amdgpu_gmc *mc)
d38ceaf9
AD
691{
692 u64 size_af, size_bf;
693
7951e376
RZ
694 mc->gart_size += adev->pm.smu_prv_buffer_size;
695
770d13b1 696 size_af = adev->gmc.mc_mask - mc->vram_end;
ed21c047 697 size_bf = mc->vram_start;
d38ceaf9 698 if (size_bf > size_af) {
6f02a696 699 if (mc->gart_size > size_bf) {
d38ceaf9 700 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 701 mc->gart_size = size_bf;
d38ceaf9 702 }
6f02a696 703 mc->gart_start = 0;
d38ceaf9 704 } else {
6f02a696 705 if (mc->gart_size > size_af) {
d38ceaf9 706 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 707 mc->gart_size = size_af;
d38ceaf9 708 }
b98f1b9e
CK
709 /* VCE doesn't like it when BOs cross a 4GB segment, so align
710 * the GART base on a 4GB boundary as well.
711 */
712 mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
d38ceaf9 713 }
6f02a696 714 mc->gart_end = mc->gart_start + mc->gart_size - 1;
d38ceaf9 715 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6f02a696 716 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
d38ceaf9
AD
717}
718
d6895ad3
CK
719/**
720 * amdgpu_device_resize_fb_bar - try to resize FB BAR
721 *
722 * @adev: amdgpu_device pointer
723 *
724 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
725 * to fail, but if any of the BARs is not accessible after the size we abort
726 * driver loading by returning -ENODEV.
727 */
728int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
729{
770d13b1 730 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
d6895ad3 731 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
31b8adab
CK
732 struct pci_bus *root;
733 struct resource *res;
734 unsigned i;
d6895ad3
CK
735 u16 cmd;
736 int r;
737
0c03b912 738 /* Bypass for VF */
739 if (amdgpu_sriov_vf(adev))
740 return 0;
741
31b8adab
CK
742 /* Check if the root BUS has 64bit memory resources */
743 root = adev->pdev->bus;
744 while (root->parent)
745 root = root->parent;
746
747 pci_bus_for_each_resource(root, res, i) {
0ebb7c54 748 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
31b8adab
CK
749 res->start > 0x100000000ull)
750 break;
751 }
752
753 /* Trying to resize is pointless without a root hub window above 4GB */
754 if (!res)
755 return 0;
756
d6895ad3
CK
757 /* Disable memory decoding while we change the BAR addresses and size */
758 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
759 pci_write_config_word(adev->pdev, PCI_COMMAND,
760 cmd & ~PCI_COMMAND_MEMORY);
761
762 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
06ec9070 763 amdgpu_device_doorbell_fini(adev);
d6895ad3
CK
764 if (adev->asic_type >= CHIP_BONAIRE)
765 pci_release_resource(adev->pdev, 2);
766
767 pci_release_resource(adev->pdev, 0);
768
769 r = pci_resize_resource(adev->pdev, 0, rbar_size);
770 if (r == -ENOSPC)
771 DRM_INFO("Not enough PCI address space for a large BAR.");
772 else if (r && r != -ENOTSUPP)
773 DRM_ERROR("Problem resizing BAR0 (%d).", r);
774
775 pci_assign_unassigned_bus_resources(adev->pdev->bus);
776
777 /* When the doorbell or fb BAR isn't available we have no chance of
778 * using the device.
779 */
06ec9070 780 r = amdgpu_device_doorbell_init(adev);
d6895ad3
CK
781 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
782 return -ENODEV;
783
784 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
785
786 return 0;
787}
a05502e5 788
d38ceaf9
AD
789/*
790 * GPU helpers function.
791 */
792/**
39c640c0 793 * amdgpu_device_need_post - check if the hw need post or not
d38ceaf9
AD
794 *
795 * @adev: amdgpu_device pointer
796 *
c836fec5
JQ
797 * Check if the asic has been initialized (all asics) at driver startup
798 * or post is needed if hw reset is performed.
799 * Returns true if need or false if not.
d38ceaf9 800 */
39c640c0 801bool amdgpu_device_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
802{
803 uint32_t reg;
804
bec86378
ML
805 if (amdgpu_sriov_vf(adev))
806 return false;
807
808 if (amdgpu_passthrough(adev)) {
1da2c326
ML
809 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
810 * some old smc fw still need driver do vPost otherwise gpu hang, while
811 * those smc fw version above 22.15 doesn't have this flaw, so we force
812 * vpost executed for smc version below 22.15
bec86378
ML
813 */
814 if (adev->asic_type == CHIP_FIJI) {
815 int err;
816 uint32_t fw_ver;
817 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
818 /* force vPost if error occured */
819 if (err)
820 return true;
821
822 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
823 if (fw_ver < 0x00160e00)
824 return true;
bec86378 825 }
bec86378 826 }
91fe77eb 827
828 if (adev->has_hw_reset) {
829 adev->has_hw_reset = false;
830 return true;
831 }
832
833 /* bios scratch used on CIK+ */
834 if (adev->asic_type >= CHIP_BONAIRE)
835 return amdgpu_atombios_scratch_need_asic_init(adev);
836
837 /* check MEM_SIZE for older asics */
838 reg = amdgpu_asic_get_config_memsize(adev);
839
840 if ((reg != 0) && (reg != 0xffffffff))
841 return false;
842
843 return true;
bec86378
ML
844}
845
d38ceaf9
AD
846/* if we get transitioned to only one device, take VGA back */
847/**
06ec9070 848 * amdgpu_device_vga_set_decode - enable/disable vga decode
d38ceaf9
AD
849 *
850 * @cookie: amdgpu_device pointer
851 * @state: enable/disable vga decode
852 *
853 * Enable/disable vga decode (all asics).
854 * Returns VGA resource flags.
855 */
06ec9070 856static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
d38ceaf9
AD
857{
858 struct amdgpu_device *adev = cookie;
859 amdgpu_asic_set_vga_state(adev, state);
860 if (state)
861 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
862 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
863 else
864 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
865}
866
e3ecdffa
AD
867/**
868 * amdgpu_device_check_block_size - validate the vm block size
869 *
870 * @adev: amdgpu_device pointer
871 *
872 * Validates the vm block size specified via module parameter.
873 * The vm block size defines number of bits in page table versus page directory,
874 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
875 * page table and the remaining bits are in the page directory.
876 */
06ec9070 877static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
878{
879 /* defines number of bits in page table versus page directory,
880 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
881 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
882 if (amdgpu_vm_block_size == -1)
883 return;
a1adf8be 884
bab4fee7 885 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
886 dev_warn(adev->dev, "VM page table size (%d) too small\n",
887 amdgpu_vm_block_size);
97489129 888 amdgpu_vm_block_size = -1;
a1adf8be 889 }
a1adf8be
CZ
890}
891
e3ecdffa
AD
892/**
893 * amdgpu_device_check_vm_size - validate the vm size
894 *
895 * @adev: amdgpu_device pointer
896 *
897 * Validates the vm size in GB specified via module parameter.
898 * The VM size is the size of the GPU virtual memory space in GB.
899 */
06ec9070 900static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
83ca145d 901{
64dab074
AD
902 /* no need to check the default value */
903 if (amdgpu_vm_size == -1)
904 return;
905
83ca145d
ZJ
906 if (amdgpu_vm_size < 1) {
907 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
908 amdgpu_vm_size);
f3368128 909 amdgpu_vm_size = -1;
83ca145d 910 }
83ca145d
ZJ
911}
912
7951e376
RZ
913static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
914{
915 struct sysinfo si;
916 bool is_os_64 = (sizeof(void *) == 8) ? true : false;
917 uint64_t total_memory;
918 uint64_t dram_size_seven_GB = 0x1B8000000;
919 uint64_t dram_size_three_GB = 0xB8000000;
920
921 if (amdgpu_smu_memory_pool_size == 0)
922 return;
923
924 if (!is_os_64) {
925 DRM_WARN("Not 64-bit OS, feature not supported\n");
926 goto def_value;
927 }
928 si_meminfo(&si);
929 total_memory = (uint64_t)si.totalram * si.mem_unit;
930
931 if ((amdgpu_smu_memory_pool_size == 1) ||
932 (amdgpu_smu_memory_pool_size == 2)) {
933 if (total_memory < dram_size_three_GB)
934 goto def_value1;
935 } else if ((amdgpu_smu_memory_pool_size == 4) ||
936 (amdgpu_smu_memory_pool_size == 8)) {
937 if (total_memory < dram_size_seven_GB)
938 goto def_value1;
939 } else {
940 DRM_WARN("Smu memory pool size not supported\n");
941 goto def_value;
942 }
943 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
944
945 return;
946
947def_value1:
948 DRM_WARN("No enough system memory\n");
949def_value:
950 adev->pm.smu_prv_buffer_size = 0;
951}
952
d38ceaf9 953/**
06ec9070 954 * amdgpu_device_check_arguments - validate module params
d38ceaf9
AD
955 *
956 * @adev: amdgpu_device pointer
957 *
958 * Validates certain module parameters and updates
959 * the associated values used by the driver (all asics).
960 */
06ec9070 961static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
d38ceaf9 962{
5b011235
CZ
963 if (amdgpu_sched_jobs < 4) {
964 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
965 amdgpu_sched_jobs);
966 amdgpu_sched_jobs = 4;
76117507 967 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
968 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
969 amdgpu_sched_jobs);
970 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
971 }
d38ceaf9 972
83e74db6 973 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
974 /* gart size must be greater or equal to 32M */
975 dev_warn(adev->dev, "gart size (%d) too small\n",
976 amdgpu_gart_size);
83e74db6 977 amdgpu_gart_size = -1;
d38ceaf9
AD
978 }
979
36d38372 980 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 981 /* gtt size must be greater or equal to 32M */
36d38372
CK
982 dev_warn(adev->dev, "gtt size (%d) too small\n",
983 amdgpu_gtt_size);
984 amdgpu_gtt_size = -1;
d38ceaf9
AD
985 }
986
d07f14be
RH
987 /* valid range is between 4 and 9 inclusive */
988 if (amdgpu_vm_fragment_size != -1 &&
989 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
990 dev_warn(adev->dev, "valid range is between 4 and 9\n");
991 amdgpu_vm_fragment_size = -1;
992 }
993
7951e376
RZ
994 amdgpu_device_check_smu_prv_buffer_size(adev);
995
06ec9070 996 amdgpu_device_check_vm_size(adev);
d38ceaf9 997
06ec9070 998 amdgpu_device_check_block_size(adev);
6a7f76e7 999
526bae37 1000 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 1001 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
1002 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1003 amdgpu_vram_page_split);
1004 amdgpu_vram_page_split = 1024;
1005 }
8854695a
AG
1006
1007 if (amdgpu_lockup_timeout == 0) {
1008 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
1009 amdgpu_lockup_timeout = 10000;
1010 }
19aede77
AD
1011
1012 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
d38ceaf9
AD
1013}
1014
1015/**
1016 * amdgpu_switcheroo_set_state - set switcheroo state
1017 *
1018 * @pdev: pci dev pointer
1694467b 1019 * @state: vga_switcheroo state
d38ceaf9
AD
1020 *
1021 * Callback for the switcheroo driver. Suspends or resumes the
1022 * the asics before or after it is powered up using ACPI methods.
1023 */
1024static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1025{
1026 struct drm_device *dev = pci_get_drvdata(pdev);
1027
1028 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1029 return;
1030
1031 if (state == VGA_SWITCHEROO_ON) {
7ca85295 1032 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1033 /* don't suspend or resume card normally */
1034 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1035
810ddc3a 1036 amdgpu_device_resume(dev, true, true);
d38ceaf9 1037
d38ceaf9
AD
1038 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1039 drm_kms_helper_poll_enable(dev);
1040 } else {
7ca85295 1041 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1042 drm_kms_helper_poll_disable(dev);
1043 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1044 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1045 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1046 }
1047}
1048
1049/**
1050 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1051 *
1052 * @pdev: pci dev pointer
1053 *
1054 * Callback for the switcheroo driver. Check of the switcheroo
1055 * state can be changed.
1056 * Returns true if the state can be changed, false if not.
1057 */
1058static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1059{
1060 struct drm_device *dev = pci_get_drvdata(pdev);
1061
1062 /*
1063 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1064 * locking inversion with the driver load path. And the access here is
1065 * completely racy anyway. So don't bother with locking for now.
1066 */
1067 return dev->open_count == 0;
1068}
1069
1070static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1071 .set_gpu_state = amdgpu_switcheroo_set_state,
1072 .reprobe = NULL,
1073 .can_switch = amdgpu_switcheroo_can_switch,
1074};
1075
e3ecdffa
AD
1076/**
1077 * amdgpu_device_ip_set_clockgating_state - set the CG state
1078 *
1079 * @adev: amdgpu_device pointer
1080 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1081 * @state: clockgating state (gate or ungate)
1082 *
1083 * Sets the requested clockgating state for all instances of
1084 * the hardware IP specified.
1085 * Returns the error code from the last instance.
1086 */
43fa561f 1087int amdgpu_device_ip_set_clockgating_state(void *dev,
2990a1fc
AD
1088 enum amd_ip_block_type block_type,
1089 enum amd_clockgating_state state)
d38ceaf9 1090{
43fa561f 1091 struct amdgpu_device *adev = dev;
d38ceaf9
AD
1092 int i, r = 0;
1093
1094 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1095 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1096 continue;
c722865a
RZ
1097 if (adev->ip_blocks[i].version->type != block_type)
1098 continue;
1099 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1100 continue;
1101 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1102 (void *)adev, state);
1103 if (r)
1104 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1105 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1106 }
1107 return r;
1108}
1109
e3ecdffa
AD
1110/**
1111 * amdgpu_device_ip_set_powergating_state - set the PG state
1112 *
1113 * @adev: amdgpu_device pointer
1114 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1115 * @state: powergating state (gate or ungate)
1116 *
1117 * Sets the requested powergating state for all instances of
1118 * the hardware IP specified.
1119 * Returns the error code from the last instance.
1120 */
43fa561f 1121int amdgpu_device_ip_set_powergating_state(void *dev,
2990a1fc
AD
1122 enum amd_ip_block_type block_type,
1123 enum amd_powergating_state state)
d38ceaf9 1124{
43fa561f 1125 struct amdgpu_device *adev = dev;
d38ceaf9
AD
1126 int i, r = 0;
1127
1128 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1129 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1130 continue;
c722865a
RZ
1131 if (adev->ip_blocks[i].version->type != block_type)
1132 continue;
1133 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1134 continue;
1135 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1136 (void *)adev, state);
1137 if (r)
1138 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1139 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1140 }
1141 return r;
1142}
1143
e3ecdffa
AD
1144/**
1145 * amdgpu_device_ip_get_clockgating_state - get the CG state
1146 *
1147 * @adev: amdgpu_device pointer
1148 * @flags: clockgating feature flags
1149 *
1150 * Walks the list of IPs on the device and updates the clockgating
1151 * flags for each IP.
1152 * Updates @flags with the feature flags for each hardware IP where
1153 * clockgating is enabled.
1154 */
2990a1fc
AD
1155void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1156 u32 *flags)
6cb2d4e4
HR
1157{
1158 int i;
1159
1160 for (i = 0; i < adev->num_ip_blocks; i++) {
1161 if (!adev->ip_blocks[i].status.valid)
1162 continue;
1163 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1164 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1165 }
1166}
1167
e3ecdffa
AD
1168/**
1169 * amdgpu_device_ip_wait_for_idle - wait for idle
1170 *
1171 * @adev: amdgpu_device pointer
1172 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1173 *
1174 * Waits for the request hardware IP to be idle.
1175 * Returns 0 for success or a negative error code on failure.
1176 */
2990a1fc
AD
1177int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1178 enum amd_ip_block_type block_type)
5dbbb60b
AD
1179{
1180 int i, r;
1181
1182 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1183 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1184 continue;
a1255107
AD
1185 if (adev->ip_blocks[i].version->type == block_type) {
1186 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1187 if (r)
1188 return r;
1189 break;
1190 }
1191 }
1192 return 0;
1193
1194}
1195
e3ecdffa
AD
1196/**
1197 * amdgpu_device_ip_is_idle - is the hardware IP idle
1198 *
1199 * @adev: amdgpu_device pointer
1200 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1201 *
1202 * Check if the hardware IP is idle or not.
1203 * Returns true if it the IP is idle, false if not.
1204 */
2990a1fc
AD
1205bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1206 enum amd_ip_block_type block_type)
5dbbb60b
AD
1207{
1208 int i;
1209
1210 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1211 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1212 continue;
a1255107
AD
1213 if (adev->ip_blocks[i].version->type == block_type)
1214 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1215 }
1216 return true;
1217
1218}
1219
e3ecdffa
AD
1220/**
1221 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1222 *
1223 * @adev: amdgpu_device pointer
1224 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1225 *
1226 * Returns a pointer to the hardware IP block structure
1227 * if it exists for the asic, otherwise NULL.
1228 */
2990a1fc
AD
1229struct amdgpu_ip_block *
1230amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1231 enum amd_ip_block_type type)
d38ceaf9
AD
1232{
1233 int i;
1234
1235 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1236 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1237 return &adev->ip_blocks[i];
1238
1239 return NULL;
1240}
1241
1242/**
2990a1fc 1243 * amdgpu_device_ip_block_version_cmp
d38ceaf9
AD
1244 *
1245 * @adev: amdgpu_device pointer
5fc3aeeb 1246 * @type: enum amd_ip_block_type
d38ceaf9
AD
1247 * @major: major version
1248 * @minor: minor version
1249 *
1250 * return 0 if equal or greater
1251 * return 1 if smaller or the ip_block doesn't exist
1252 */
2990a1fc
AD
1253int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1254 enum amd_ip_block_type type,
1255 u32 major, u32 minor)
d38ceaf9 1256{
2990a1fc 1257 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
d38ceaf9 1258
a1255107
AD
1259 if (ip_block && ((ip_block->version->major > major) ||
1260 ((ip_block->version->major == major) &&
1261 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1262 return 0;
1263
1264 return 1;
1265}
1266
a1255107 1267/**
2990a1fc 1268 * amdgpu_device_ip_block_add
a1255107
AD
1269 *
1270 * @adev: amdgpu_device pointer
1271 * @ip_block_version: pointer to the IP to add
1272 *
1273 * Adds the IP block driver information to the collection of IPs
1274 * on the asic.
1275 */
2990a1fc
AD
1276int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1277 const struct amdgpu_ip_block_version *ip_block_version)
a1255107
AD
1278{
1279 if (!ip_block_version)
1280 return -EINVAL;
1281
e966a725 1282 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
a0bae357
HR
1283 ip_block_version->funcs->name);
1284
a1255107
AD
1285 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1286
1287 return 0;
1288}
1289
e3ecdffa
AD
1290/**
1291 * amdgpu_device_enable_virtual_display - enable virtual display feature
1292 *
1293 * @adev: amdgpu_device pointer
1294 *
1295 * Enabled the virtual display feature if the user has enabled it via
1296 * the module parameter virtual_display. This feature provides a virtual
1297 * display hardware on headless boards or in virtualized environments.
1298 * This function parses and validates the configuration string specified by
1299 * the user and configues the virtual display configuration (number of
1300 * virtual connectors, crtcs, etc.) specified.
1301 */
483ef985 1302static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1303{
1304 adev->enable_virtual_display = false;
1305
1306 if (amdgpu_virtual_display) {
1307 struct drm_device *ddev = adev->ddev;
1308 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1309 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1310
1311 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1312 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1313 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1314 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1315 if (!strcmp("all", pciaddname)
1316 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1317 long num_crtc;
1318 int res = -1;
1319
9accf2fd 1320 adev->enable_virtual_display = true;
0f66356d
ED
1321
1322 if (pciaddname_tmp)
1323 res = kstrtol(pciaddname_tmp, 10,
1324 &num_crtc);
1325
1326 if (!res) {
1327 if (num_crtc < 1)
1328 num_crtc = 1;
1329 if (num_crtc > 6)
1330 num_crtc = 6;
1331 adev->mode_info.num_crtc = num_crtc;
1332 } else {
1333 adev->mode_info.num_crtc = 1;
1334 }
9accf2fd
ED
1335 break;
1336 }
1337 }
1338
0f66356d
ED
1339 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1340 amdgpu_virtual_display, pci_address_name,
1341 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1342
1343 kfree(pciaddstr);
1344 }
1345}
1346
e3ecdffa
AD
1347/**
1348 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1349 *
1350 * @adev: amdgpu_device pointer
1351 *
1352 * Parses the asic configuration parameters specified in the gpu info
1353 * firmware and makes them availale to the driver for use in configuring
1354 * the asic.
1355 * Returns 0 on success, -EINVAL on failure.
1356 */
e2a75f88
AD
1357static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1358{
e2a75f88
AD
1359 const char *chip_name;
1360 char fw_name[30];
1361 int err;
1362 const struct gpu_info_firmware_header_v1_0 *hdr;
1363
ab4fe3e1
HR
1364 adev->firmware.gpu_info_fw = NULL;
1365
e2a75f88
AD
1366 switch (adev->asic_type) {
1367 case CHIP_TOPAZ:
1368 case CHIP_TONGA:
1369 case CHIP_FIJI:
e2a75f88 1370 case CHIP_POLARIS10:
cc07f18d 1371 case CHIP_POLARIS11:
e2a75f88 1372 case CHIP_POLARIS12:
cc07f18d 1373 case CHIP_VEGAM:
e2a75f88
AD
1374 case CHIP_CARRIZO:
1375 case CHIP_STONEY:
1376#ifdef CONFIG_DRM_AMDGPU_SI
1377 case CHIP_VERDE:
1378 case CHIP_TAHITI:
1379 case CHIP_PITCAIRN:
1380 case CHIP_OLAND:
1381 case CHIP_HAINAN:
1382#endif
1383#ifdef CONFIG_DRM_AMDGPU_CIK
1384 case CHIP_BONAIRE:
1385 case CHIP_HAWAII:
1386 case CHIP_KAVERI:
1387 case CHIP_KABINI:
1388 case CHIP_MULLINS:
1389#endif
1390 default:
1391 return 0;
1392 case CHIP_VEGA10:
1393 chip_name = "vega10";
1394 break;
3f76dced
AD
1395 case CHIP_VEGA12:
1396 chip_name = "vega12";
1397 break;
2d2e5e7e
AD
1398 case CHIP_RAVEN:
1399 chip_name = "raven";
1400 break;
e2a75f88
AD
1401 }
1402
1403 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1404 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1405 if (err) {
1406 dev_err(adev->dev,
1407 "Failed to load gpu_info firmware \"%s\"\n",
1408 fw_name);
1409 goto out;
1410 }
ab4fe3e1 1411 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1412 if (err) {
1413 dev_err(adev->dev,
1414 "Failed to validate gpu_info firmware \"%s\"\n",
1415 fw_name);
1416 goto out;
1417 }
1418
ab4fe3e1 1419 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1420 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1421
1422 switch (hdr->version_major) {
1423 case 1:
1424 {
1425 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1426 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1427 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1428
b5ab16bf
AD
1429 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1430 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1431 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1432 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1433 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1434 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1435 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1436 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1437 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1438 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1439 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1440 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1441 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1442 adev->gfx.cu_info.max_waves_per_simd =
1443 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1444 adev->gfx.cu_info.max_scratch_slots_per_cu =
1445 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1446 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1447 break;
1448 }
1449 default:
1450 dev_err(adev->dev,
1451 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1452 err = -EINVAL;
1453 goto out;
1454 }
1455out:
e2a75f88
AD
1456 return err;
1457}
1458
e3ecdffa
AD
1459/**
1460 * amdgpu_device_ip_early_init - run early init for hardware IPs
1461 *
1462 * @adev: amdgpu_device pointer
1463 *
1464 * Early initialization pass for hardware IPs. The hardware IPs that make
1465 * up each asic are discovered each IP's early_init callback is run. This
1466 * is the first stage in initializing the asic.
1467 * Returns 0 on success, negative error code on failure.
1468 */
06ec9070 1469static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
d38ceaf9 1470{
aaa36a97 1471 int i, r;
d38ceaf9 1472
483ef985 1473 amdgpu_device_enable_virtual_display(adev);
a6be7570 1474
d38ceaf9 1475 switch (adev->asic_type) {
aaa36a97
AD
1476 case CHIP_TOPAZ:
1477 case CHIP_TONGA:
48299f95 1478 case CHIP_FIJI:
2cc0c0b5 1479 case CHIP_POLARIS10:
32cc7e53 1480 case CHIP_POLARIS11:
c4642a47 1481 case CHIP_POLARIS12:
32cc7e53 1482 case CHIP_VEGAM:
aaa36a97 1483 case CHIP_CARRIZO:
39bb0c92
SL
1484 case CHIP_STONEY:
1485 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1486 adev->family = AMDGPU_FAMILY_CZ;
1487 else
1488 adev->family = AMDGPU_FAMILY_VI;
1489
1490 r = vi_set_ip_blocks(adev);
1491 if (r)
1492 return r;
1493 break;
33f34802
KW
1494#ifdef CONFIG_DRM_AMDGPU_SI
1495 case CHIP_VERDE:
1496 case CHIP_TAHITI:
1497 case CHIP_PITCAIRN:
1498 case CHIP_OLAND:
1499 case CHIP_HAINAN:
295d0daf 1500 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1501 r = si_set_ip_blocks(adev);
1502 if (r)
1503 return r;
1504 break;
1505#endif
a2e73f56
AD
1506#ifdef CONFIG_DRM_AMDGPU_CIK
1507 case CHIP_BONAIRE:
1508 case CHIP_HAWAII:
1509 case CHIP_KAVERI:
1510 case CHIP_KABINI:
1511 case CHIP_MULLINS:
1512 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1513 adev->family = AMDGPU_FAMILY_CI;
1514 else
1515 adev->family = AMDGPU_FAMILY_KV;
1516
1517 r = cik_set_ip_blocks(adev);
1518 if (r)
1519 return r;
1520 break;
1521#endif
e48a3cd9
AD
1522 case CHIP_VEGA10:
1523 case CHIP_VEGA12:
1524 case CHIP_RAVEN:
2ca8a5d2
CZ
1525 if (adev->asic_type == CHIP_RAVEN)
1526 adev->family = AMDGPU_FAMILY_RV;
1527 else
1528 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1529
1530 r = soc15_set_ip_blocks(adev);
1531 if (r)
1532 return r;
1533 break;
d38ceaf9
AD
1534 default:
1535 /* FIXME: not supported yet */
1536 return -EINVAL;
1537 }
1538
e2a75f88
AD
1539 r = amdgpu_device_parse_gpu_info_fw(adev);
1540 if (r)
1541 return r;
1542
1884734a 1543 amdgpu_amdkfd_device_probe(adev);
1544
3149d9da
XY
1545 if (amdgpu_sriov_vf(adev)) {
1546 r = amdgpu_virt_request_full_gpu(adev, true);
1547 if (r)
5ffa61c1 1548 return -EAGAIN;
3149d9da
XY
1549 }
1550
00f54b97
HR
1551 adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
1552
d38ceaf9
AD
1553 for (i = 0; i < adev->num_ip_blocks; i++) {
1554 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1555 DRM_ERROR("disabled ip block: %d <%s>\n",
1556 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1557 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1558 } else {
a1255107
AD
1559 if (adev->ip_blocks[i].version->funcs->early_init) {
1560 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1561 if (r == -ENOENT) {
a1255107 1562 adev->ip_blocks[i].status.valid = false;
2c1a2784 1563 } else if (r) {
a1255107
AD
1564 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1565 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1566 return r;
2c1a2784 1567 } else {
a1255107 1568 adev->ip_blocks[i].status.valid = true;
2c1a2784 1569 }
974e6b64 1570 } else {
a1255107 1571 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1572 }
d38ceaf9
AD
1573 }
1574 }
1575
395d1fb9
NH
1576 adev->cg_flags &= amdgpu_cg_mask;
1577 adev->pg_flags &= amdgpu_pg_mask;
1578
d38ceaf9
AD
1579 return 0;
1580}
1581
e3ecdffa
AD
1582/**
1583 * amdgpu_device_ip_init - run init for hardware IPs
1584 *
1585 * @adev: amdgpu_device pointer
1586 *
1587 * Main initialization pass for hardware IPs. The list of all the hardware
1588 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1589 * are run. sw_init initializes the software state associated with each IP
1590 * and hw_init initializes the hardware associated with each IP.
1591 * Returns 0 on success, negative error code on failure.
1592 */
06ec9070 1593static int amdgpu_device_ip_init(struct amdgpu_device *adev)
d38ceaf9
AD
1594{
1595 int i, r;
1596
1597 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1598 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1599 continue;
a1255107 1600 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1601 if (r) {
a1255107
AD
1602 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1603 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1604 return r;
2c1a2784 1605 }
a1255107 1606 adev->ip_blocks[i].status.sw = true;
bfca0289 1607
d38ceaf9 1608 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1609 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
06ec9070 1610 r = amdgpu_device_vram_scratch_init(adev);
2c1a2784
AD
1611 if (r) {
1612 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1613 return r;
2c1a2784 1614 }
a1255107 1615 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1616 if (r) {
1617 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1618 return r;
2c1a2784 1619 }
06ec9070 1620 r = amdgpu_device_wb_init(adev);
2c1a2784 1621 if (r) {
06ec9070 1622 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
d38ceaf9 1623 return r;
2c1a2784 1624 }
a1255107 1625 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1626
1627 /* right after GMC hw init, we create CSA */
1628 if (amdgpu_sriov_vf(adev)) {
1629 r = amdgpu_allocate_static_csa(adev);
1630 if (r) {
1631 DRM_ERROR("allocate CSA failed %d\n", r);
1632 return r;
1633 }
1634 }
d38ceaf9
AD
1635 }
1636 }
1637
1638 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1639 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1640 continue;
bfca0289 1641 if (adev->ip_blocks[i].status.hw)
d38ceaf9 1642 continue;
a1255107 1643 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1644 if (r) {
a1255107
AD
1645 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1646 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1647 return r;
2c1a2784 1648 }
a1255107 1649 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1650 }
1651
1884734a 1652 amdgpu_amdkfd_device_init(adev);
c6332b97 1653
1654 if (amdgpu_sriov_vf(adev))
1655 amdgpu_virt_release_full_gpu(adev, true);
1656
d38ceaf9
AD
1657 return 0;
1658}
1659
e3ecdffa
AD
1660/**
1661 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1662 *
1663 * @adev: amdgpu_device pointer
1664 *
1665 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
1666 * this function before a GPU reset. If the value is retained after a
1667 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
1668 */
06ec9070 1669static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
0c49e0b8
CZ
1670{
1671 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1672}
1673
e3ecdffa
AD
1674/**
1675 * amdgpu_device_check_vram_lost - check if vram is valid
1676 *
1677 * @adev: amdgpu_device pointer
1678 *
1679 * Checks the reset magic value written to the gart pointer in VRAM.
1680 * The driver calls this after a GPU reset to see if the contents of
1681 * VRAM is lost or now.
1682 * returns true if vram is lost, false if not.
1683 */
06ec9070 1684static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
0c49e0b8
CZ
1685{
1686 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1687 AMDGPU_RESET_MAGIC_NUM);
1688}
1689
e3ecdffa
AD
1690/**
1691 * amdgpu_device_ip_late_set_cg_state - late init for clockgating
1692 *
1693 * @adev: amdgpu_device pointer
1694 *
1695 * Late initialization pass enabling clockgating for hardware IPs.
1696 * The list of all the hardware IPs that make up the asic is walked and the
1697 * set_clockgating_state callbacks are run. This stage is run late
1698 * in the init process.
1699 * Returns 0 on success, negative error code on failure.
1700 */
06ec9070 1701static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
d38ceaf9
AD
1702{
1703 int i = 0, r;
1704
4a2ba394
SL
1705 if (amdgpu_emu_mode == 1)
1706 return 0;
1707
2c773de2
S
1708 r = amdgpu_ib_ring_tests(adev);
1709 if (r)
1710 DRM_ERROR("ib ring test failed (%d).\n", r);
1711
d38ceaf9 1712 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1713 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1714 continue;
4a446d55 1715 /* skip CG for VCE/UVD, it's handled specially */
a1255107 1716 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
57716327
RZ
1717 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1718 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
4a446d55 1719 /* enable clockgating to save power */
a1255107
AD
1720 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1721 AMD_CG_STATE_GATE);
4a446d55
AD
1722 if (r) {
1723 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1724 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1725 return r;
1726 }
b0b00ff1 1727 }
d38ceaf9 1728 }
2dc80b00
S
1729 return 0;
1730}
1731
e3ecdffa
AD
1732/**
1733 * amdgpu_device_ip_late_init - run late init for hardware IPs
1734 *
1735 * @adev: amdgpu_device pointer
1736 *
1737 * Late initialization pass for hardware IPs. The list of all the hardware
1738 * IPs that make up the asic is walked and the late_init callbacks are run.
1739 * late_init covers any special initialization that an IP requires
1740 * after all of the have been initialized or something that needs to happen
1741 * late in the init process.
1742 * Returns 0 on success, negative error code on failure.
1743 */
06ec9070 1744static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2dc80b00
S
1745{
1746 int i = 0, r;
1747
1748 for (i = 0; i < adev->num_ip_blocks; i++) {
1749 if (!adev->ip_blocks[i].status.valid)
1750 continue;
1751 if (adev->ip_blocks[i].version->funcs->late_init) {
1752 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1753 if (r) {
1754 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1755 adev->ip_blocks[i].version->funcs->name, r);
1756 return r;
1757 }
1758 adev->ip_blocks[i].status.late_initialized = true;
1759 }
1760 }
1761
2c773de2
S
1762 queue_delayed_work(system_wq, &adev->late_init_work,
1763 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1764
06ec9070 1765 amdgpu_device_fill_reset_magic(adev);
d38ceaf9
AD
1766
1767 return 0;
1768}
1769
e3ecdffa
AD
1770/**
1771 * amdgpu_device_ip_fini - run fini for hardware IPs
1772 *
1773 * @adev: amdgpu_device pointer
1774 *
1775 * Main teardown pass for hardware IPs. The list of all the hardware
1776 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
1777 * are run. hw_fini tears down the hardware associated with each IP
1778 * and sw_fini tears down any software state associated with each IP.
1779 * Returns 0 on success, negative error code on failure.
1780 */
06ec9070 1781static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
d38ceaf9
AD
1782{
1783 int i, r;
1784
1884734a 1785 amdgpu_amdkfd_device_fini(adev);
3e96dbfd
AD
1786 /* need to disable SMC first */
1787 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1788 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1789 continue;
57716327
RZ
1790 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
1791 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
3e96dbfd 1792 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1793 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1794 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1795 if (r) {
1796 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1797 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1798 return r;
1799 }
a1255107 1800 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1801 /* XXX handle errors */
1802 if (r) {
1803 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1804 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1805 }
a1255107 1806 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1807 break;
1808 }
1809 }
1810
d38ceaf9 1811 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1812 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1813 continue;
8201a67a
RZ
1814
1815 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
81ce8bea
RZ
1816 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1817 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
8201a67a
RZ
1818 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1819 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1820 AMD_CG_STATE_UNGATE);
1821 if (r) {
1822 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1823 adev->ip_blocks[i].version->funcs->name, r);
1824 return r;
1825 }
2c1a2784 1826 }
8201a67a 1827
a1255107 1828 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1829 /* XXX handle errors */
2c1a2784 1830 if (r) {
a1255107
AD
1831 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1832 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1833 }
8201a67a 1834
a1255107 1835 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1836 }
1837
9950cda2 1838
d38ceaf9 1839 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1840 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1841 continue;
c12aba3a
ML
1842
1843 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1844 amdgpu_free_static_csa(adev);
1845 amdgpu_device_wb_fini(adev);
1846 amdgpu_device_vram_scratch_fini(adev);
1847 }
1848
a1255107 1849 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1850 /* XXX handle errors */
2c1a2784 1851 if (r) {
a1255107
AD
1852 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1853 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1854 }
a1255107
AD
1855 adev->ip_blocks[i].status.sw = false;
1856 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1857 }
1858
a6dcfd9c 1859 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1860 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1861 continue;
a1255107
AD
1862 if (adev->ip_blocks[i].version->funcs->late_fini)
1863 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1864 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1865 }
1866
030308fc 1867 if (amdgpu_sriov_vf(adev))
24136135
ML
1868 if (amdgpu_virt_release_full_gpu(adev, false))
1869 DRM_ERROR("failed to release exclusive mode on fini\n");
2493664f 1870
d38ceaf9
AD
1871 return 0;
1872}
1873
e3ecdffa
AD
1874/**
1875 * amdgpu_device_ip_late_init_func_handler - work handler for clockgating
1876 *
1877 * @work: work_struct
1878 *
1879 * Work handler for amdgpu_device_ip_late_set_cg_state. We put the
1880 * clockgating setup into a worker thread to speed up driver init and
1881 * resume from suspend.
1882 */
06ec9070 1883static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
2dc80b00
S
1884{
1885 struct amdgpu_device *adev =
1886 container_of(work, struct amdgpu_device, late_init_work.work);
06ec9070 1887 amdgpu_device_ip_late_set_cg_state(adev);
2dc80b00
S
1888}
1889
e3ecdffa
AD
1890/**
1891 * amdgpu_device_ip_suspend - run suspend for hardware IPs
1892 *
1893 * @adev: amdgpu_device pointer
1894 *
1895 * Main suspend function for hardware IPs. The list of all the hardware
1896 * IPs that make up the asic is walked, clockgating is disabled and the
1897 * suspend callbacks are run. suspend puts the hardware and software state
1898 * in each IP into a state suitable for suspend.
1899 * Returns 0 on success, negative error code on failure.
1900 */
cdd61df6 1901int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1902{
1903 int i, r;
1904
e941ea99
XY
1905 if (amdgpu_sriov_vf(adev))
1906 amdgpu_virt_request_full_gpu(adev, false);
1907
b0833696
HR
1908 /* ungate SMC block powergating */
1909 if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
1910 amdgpu_device_ip_set_powergating_state(adev,
1911 AMD_IP_BLOCK_TYPE_SMC,
1912 AMD_CG_STATE_UNGATE);
1913
c5a93a28 1914 /* ungate SMC block first */
2990a1fc
AD
1915 r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1916 AMD_CG_STATE_UNGATE);
c5a93a28 1917 if (r) {
2990a1fc 1918 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
c5a93a28
FC
1919 }
1920
d38ceaf9 1921 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1922 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1923 continue;
1924 /* ungate blocks so that suspend can properly shut them down */
5b2a3d2c 1925 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
57716327 1926 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
a1255107
AD
1927 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1928 AMD_CG_STATE_UNGATE);
c5a93a28 1929 if (r) {
a1255107
AD
1930 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1931 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1932 }
2c1a2784 1933 }
d38ceaf9 1934 /* XXX handle errors */
a1255107 1935 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1936 /* XXX handle errors */
2c1a2784 1937 if (r) {
a1255107
AD
1938 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1939 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1940 }
d38ceaf9
AD
1941 }
1942
e941ea99
XY
1943 if (amdgpu_sriov_vf(adev))
1944 amdgpu_virt_release_full_gpu(adev, false);
1945
d38ceaf9
AD
1946 return 0;
1947}
1948
06ec9070 1949static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1950{
1951 int i, r;
1952
2cb681b6
ML
1953 static enum amd_ip_block_type ip_order[] = {
1954 AMD_IP_BLOCK_TYPE_GMC,
1955 AMD_IP_BLOCK_TYPE_COMMON,
2cb681b6
ML
1956 AMD_IP_BLOCK_TYPE_IH,
1957 };
a90ad3c2 1958
2cb681b6
ML
1959 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1960 int j;
1961 struct amdgpu_ip_block *block;
a90ad3c2 1962
2cb681b6
ML
1963 for (j = 0; j < adev->num_ip_blocks; j++) {
1964 block = &adev->ip_blocks[j];
1965
1966 if (block->version->type != ip_order[i] ||
1967 !block->status.valid)
1968 continue;
1969
1970 r = block->version->funcs->hw_init(adev);
1971 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
c41d1cf6
ML
1972 if (r)
1973 return r;
a90ad3c2
ML
1974 }
1975 }
1976
1977 return 0;
1978}
1979
06ec9070 1980static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1981{
1982 int i, r;
1983
2cb681b6
ML
1984 static enum amd_ip_block_type ip_order[] = {
1985 AMD_IP_BLOCK_TYPE_SMC,
ef4c166d 1986 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
1987 AMD_IP_BLOCK_TYPE_DCE,
1988 AMD_IP_BLOCK_TYPE_GFX,
1989 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
1990 AMD_IP_BLOCK_TYPE_UVD,
1991 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 1992 };
a90ad3c2 1993
2cb681b6
ML
1994 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1995 int j;
1996 struct amdgpu_ip_block *block;
a90ad3c2 1997
2cb681b6
ML
1998 for (j = 0; j < adev->num_ip_blocks; j++) {
1999 block = &adev->ip_blocks[j];
2000
2001 if (block->version->type != ip_order[i] ||
2002 !block->status.valid)
2003 continue;
2004
2005 r = block->version->funcs->hw_init(adev);
2006 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
c41d1cf6
ML
2007 if (r)
2008 return r;
a90ad3c2
ML
2009 }
2010 }
2011
2012 return 0;
2013}
2014
e3ecdffa
AD
2015/**
2016 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2017 *
2018 * @adev: amdgpu_device pointer
2019 *
2020 * First resume function for hardware IPs. The list of all the hardware
2021 * IPs that make up the asic is walked and the resume callbacks are run for
2022 * COMMON, GMC, and IH. resume puts the hardware into a functional state
2023 * after a suspend and updates the software state as necessary. This
2024 * function is also used for restoring the GPU after a GPU reset.
2025 * Returns 0 on success, negative error code on failure.
2026 */
06ec9070 2027static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
2028{
2029 int i, r;
2030
a90ad3c2
ML
2031 for (i = 0; i < adev->num_ip_blocks; i++) {
2032 if (!adev->ip_blocks[i].status.valid)
2033 continue;
a90ad3c2 2034 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa
AD
2035 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2036 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
fcf0649f
CZ
2037 r = adev->ip_blocks[i].version->funcs->resume(adev);
2038 if (r) {
2039 DRM_ERROR("resume of IP block <%s> failed %d\n",
2040 adev->ip_blocks[i].version->funcs->name, r);
2041 return r;
2042 }
a90ad3c2
ML
2043 }
2044 }
2045
2046 return 0;
2047}
2048
e3ecdffa
AD
2049/**
2050 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2051 *
2052 * @adev: amdgpu_device pointer
2053 *
2054 * First resume function for hardware IPs. The list of all the hardware
2055 * IPs that make up the asic is walked and the resume callbacks are run for
2056 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
2057 * functional state after a suspend and updates the software state as
2058 * necessary. This function is also used for restoring the GPU after a GPU
2059 * reset.
2060 * Returns 0 on success, negative error code on failure.
2061 */
06ec9070 2062static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2063{
2064 int i, r;
2065
2066 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2067 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2068 continue;
fcf0649f 2069 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa
AD
2070 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2071 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
fcf0649f 2072 continue;
a1255107 2073 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 2074 if (r) {
a1255107
AD
2075 DRM_ERROR("resume of IP block <%s> failed %d\n",
2076 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 2077 return r;
2c1a2784 2078 }
d38ceaf9
AD
2079 }
2080
2081 return 0;
2082}
2083
e3ecdffa
AD
2084/**
2085 * amdgpu_device_ip_resume - run resume for hardware IPs
2086 *
2087 * @adev: amdgpu_device pointer
2088 *
2089 * Main resume function for hardware IPs. The hardware IPs
2090 * are split into two resume functions because they are
2091 * are also used in in recovering from a GPU reset and some additional
2092 * steps need to be take between them. In this case (S3/S4) they are
2093 * run sequentially.
2094 * Returns 0 on success, negative error code on failure.
2095 */
06ec9070 2096static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
fcf0649f
CZ
2097{
2098 int r;
2099
06ec9070 2100 r = amdgpu_device_ip_resume_phase1(adev);
fcf0649f
CZ
2101 if (r)
2102 return r;
06ec9070 2103 r = amdgpu_device_ip_resume_phase2(adev);
fcf0649f
CZ
2104
2105 return r;
2106}
2107
e3ecdffa
AD
2108/**
2109 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2110 *
2111 * @adev: amdgpu_device pointer
2112 *
2113 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2114 */
4e99a44e 2115static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 2116{
6867e1b5
ML
2117 if (amdgpu_sriov_vf(adev)) {
2118 if (adev->is_atom_fw) {
2119 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2120 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2121 } else {
2122 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2123 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2124 }
2125
2126 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2127 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 2128 }
048765ad
AR
2129}
2130
e3ecdffa
AD
2131/**
2132 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2133 *
2134 * @asic_type: AMD asic type
2135 *
2136 * Check if there is DC (new modesetting infrastructre) support for an asic.
2137 * returns true if DC has support, false if not.
2138 */
4562236b
HW
2139bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2140{
2141 switch (asic_type) {
2142#if defined(CONFIG_DRM_AMD_DC)
2143 case CHIP_BONAIRE:
2144 case CHIP_HAWAII:
0d6fbccb 2145 case CHIP_KAVERI:
367e6687
AD
2146 case CHIP_KABINI:
2147 case CHIP_MULLINS:
4562236b
HW
2148 case CHIP_CARRIZO:
2149 case CHIP_STONEY:
4562236b 2150 case CHIP_POLARIS10:
675fd32b 2151 case CHIP_POLARIS11:
2c8ad2d5 2152 case CHIP_POLARIS12:
675fd32b 2153 case CHIP_VEGAM:
4562236b
HW
2154 case CHIP_TONGA:
2155 case CHIP_FIJI:
42f8ffa1 2156 case CHIP_VEGA10:
dca7b401 2157 case CHIP_VEGA12:
42f8ffa1 2158#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 2159 case CHIP_RAVEN:
42f8ffa1 2160#endif
fd187853 2161 return amdgpu_dc != 0;
4562236b
HW
2162#endif
2163 default:
2164 return false;
2165 }
2166}
2167
2168/**
2169 * amdgpu_device_has_dc_support - check if dc is supported
2170 *
2171 * @adev: amdgpu_device_pointer
2172 *
2173 * Returns true for supported, false for not supported
2174 */
2175bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2176{
2555039d
XY
2177 if (amdgpu_sriov_vf(adev))
2178 return false;
2179
4562236b
HW
2180 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2181}
2182
d38ceaf9
AD
2183/**
2184 * amdgpu_device_init - initialize the driver
2185 *
2186 * @adev: amdgpu_device pointer
2187 * @pdev: drm dev pointer
2188 * @pdev: pci dev pointer
2189 * @flags: driver flags
2190 *
2191 * Initializes the driver info and hw (all asics).
2192 * Returns 0 for success or an error on failure.
2193 * Called at driver startup.
2194 */
2195int amdgpu_device_init(struct amdgpu_device *adev,
2196 struct drm_device *ddev,
2197 struct pci_dev *pdev,
2198 uint32_t flags)
2199{
2200 int r, i;
2201 bool runtime = false;
95844d20 2202 u32 max_MBps;
d38ceaf9
AD
2203
2204 adev->shutdown = false;
2205 adev->dev = &pdev->dev;
2206 adev->ddev = ddev;
2207 adev->pdev = pdev;
2208 adev->flags = flags;
2f7d10b3 2209 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 2210 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
593aa2d2
SL
2211 if (amdgpu_emu_mode == 1)
2212 adev->usec_timeout *= 2;
770d13b1 2213 adev->gmc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
2214 adev->accel_working = false;
2215 adev->num_rings = 0;
2216 adev->mman.buffer_funcs = NULL;
2217 adev->mman.buffer_funcs_ring = NULL;
2218 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 2219 adev->vm_manager.vm_pte_num_rings = 0;
132f34e4 2220 adev->gmc.gmc_funcs = NULL;
f54d1867 2221 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 2222 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
2223
2224 adev->smc_rreg = &amdgpu_invalid_rreg;
2225 adev->smc_wreg = &amdgpu_invalid_wreg;
2226 adev->pcie_rreg = &amdgpu_invalid_rreg;
2227 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
2228 adev->pciep_rreg = &amdgpu_invalid_rreg;
2229 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2230 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2231 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2232 adev->didt_rreg = &amdgpu_invalid_rreg;
2233 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
2234 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2235 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2236 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2237 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2238
3e39ab90
AD
2239 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2240 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2241 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
2242
2243 /* mutex initialization are all done here so we
2244 * can recall function without having locking issues */
d38ceaf9 2245 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 2246 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
2247 mutex_init(&adev->pm.mutex);
2248 mutex_init(&adev->gfx.gpu_clock_mutex);
2249 mutex_init(&adev->srbm_mutex);
b8866c26 2250 mutex_init(&adev->gfx.pipe_reserve_mutex);
d38ceaf9 2251 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 2252 mutex_init(&adev->mn_lock);
e23b74aa 2253 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 2254 hash_init(adev->mn_hash);
13a752e3 2255 mutex_init(&adev->lock_reset);
d38ceaf9 2256
06ec9070 2257 amdgpu_device_check_arguments(adev);
d38ceaf9 2258
d38ceaf9
AD
2259 spin_lock_init(&adev->mmio_idx_lock);
2260 spin_lock_init(&adev->smc_idx_lock);
2261 spin_lock_init(&adev->pcie_idx_lock);
2262 spin_lock_init(&adev->uvd_ctx_idx_lock);
2263 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 2264 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 2265 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 2266 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 2267 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 2268
0c4e7fa5
CZ
2269 INIT_LIST_HEAD(&adev->shadow_list);
2270 mutex_init(&adev->shadow_list_lock);
2271
795f2813
AR
2272 INIT_LIST_HEAD(&adev->ring_lru_list);
2273 spin_lock_init(&adev->ring_lru_list_lock);
2274
06ec9070
AD
2275 INIT_DELAYED_WORK(&adev->late_init_work,
2276 amdgpu_device_ip_late_init_func_handler);
2dc80b00 2277
0fa49558
AX
2278 /* Registers mapping */
2279 /* TODO: block userspace mapping of io register */
da69c161
KW
2280 if (adev->asic_type >= CHIP_BONAIRE) {
2281 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2282 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2283 } else {
2284 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2285 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2286 }
d38ceaf9 2287
d38ceaf9
AD
2288 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2289 if (adev->rmmio == NULL) {
2290 return -ENOMEM;
2291 }
2292 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2293 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2294
705e519e 2295 /* doorbell bar mapping */
06ec9070 2296 amdgpu_device_doorbell_init(adev);
d38ceaf9
AD
2297
2298 /* io port mapping */
2299 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2300 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2301 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2302 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2303 break;
2304 }
2305 }
2306 if (adev->rio_mem == NULL)
b64a18c5 2307 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9 2308
5494d864
AD
2309 amdgpu_device_get_pcie_info(adev);
2310
d38ceaf9 2311 /* early init functions */
06ec9070 2312 r = amdgpu_device_ip_early_init(adev);
d38ceaf9
AD
2313 if (r)
2314 return r;
2315
2316 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2317 /* this will fail for cards that aren't VGA class devices, just
2318 * ignore it */
06ec9070 2319 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
d38ceaf9 2320
e9bef455 2321 if (amdgpu_device_is_px(ddev))
d38ceaf9 2322 runtime = true;
84c8b22e
LW
2323 if (!pci_is_thunderbolt_attached(adev->pdev))
2324 vga_switcheroo_register_client(adev->pdev,
2325 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
2326 if (runtime)
2327 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2328
9475a943
SL
2329 if (amdgpu_emu_mode == 1) {
2330 /* post the asic on emulation mode */
2331 emu_soc_asic_init(adev);
bfca0289 2332 goto fence_driver_init;
9475a943 2333 }
bfca0289 2334
d38ceaf9 2335 /* Read BIOS */
83ba126a
AD
2336 if (!amdgpu_get_bios(adev)) {
2337 r = -EINVAL;
2338 goto failed;
2339 }
f7e9e9fe 2340
d38ceaf9 2341 r = amdgpu_atombios_init(adev);
2c1a2784
AD
2342 if (r) {
2343 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 2344 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 2345 goto failed;
2c1a2784 2346 }
d38ceaf9 2347
4e99a44e
ML
2348 /* detect if we are with an SRIOV vbios */
2349 amdgpu_device_detect_sriov_bios(adev);
048765ad 2350
d38ceaf9 2351 /* Post card if necessary */
39c640c0 2352 if (amdgpu_device_need_post(adev)) {
d38ceaf9 2353 if (!adev->bios) {
bec86378 2354 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
2355 r = -EINVAL;
2356 goto failed;
d38ceaf9 2357 }
bec86378 2358 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
2359 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2360 if (r) {
2361 dev_err(adev->dev, "gpu post error!\n");
2362 goto failed;
2363 }
d38ceaf9
AD
2364 }
2365
88b64e95
AD
2366 if (adev->is_atom_fw) {
2367 /* Initialize clocks */
2368 r = amdgpu_atomfirmware_get_clock_info(adev);
2369 if (r) {
2370 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 2371 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
2372 goto failed;
2373 }
2374 } else {
a5bde2f9
AD
2375 /* Initialize clocks */
2376 r = amdgpu_atombios_get_clock_info(adev);
2377 if (r) {
2378 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 2379 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 2380 goto failed;
a5bde2f9
AD
2381 }
2382 /* init i2c buses */
4562236b
HW
2383 if (!amdgpu_device_has_dc_support(adev))
2384 amdgpu_atombios_i2c_init(adev);
2c1a2784 2385 }
d38ceaf9 2386
bfca0289 2387fence_driver_init:
d38ceaf9
AD
2388 /* Fence driver */
2389 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
2390 if (r) {
2391 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 2392 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 2393 goto failed;
2c1a2784 2394 }
d38ceaf9
AD
2395
2396 /* init the mode config */
2397 drm_mode_config_init(adev->ddev);
2398
06ec9070 2399 r = amdgpu_device_ip_init(adev);
d38ceaf9 2400 if (r) {
8840a387 2401 /* failed in exclusive mode due to timeout */
2402 if (amdgpu_sriov_vf(adev) &&
2403 !amdgpu_sriov_runtime(adev) &&
2404 amdgpu_virt_mmio_blocked(adev) &&
2405 !amdgpu_virt_wait_reset(adev)) {
2406 dev_err(adev->dev, "VF exclusive mode timeout\n");
1daee8b4
PD
2407 /* Don't send request since VF is inactive. */
2408 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2409 adev->virt.ops = NULL;
8840a387 2410 r = -EAGAIN;
2411 goto failed;
2412 }
06ec9070 2413 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
e23b74aa 2414 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
83ba126a 2415 goto failed;
d38ceaf9
AD
2416 }
2417
2418 adev->accel_working = true;
2419
e59c0205
AX
2420 amdgpu_vm_check_compute_bug(adev);
2421
95844d20
MO
2422 /* Initialize the buffer migration limit. */
2423 if (amdgpu_moverate >= 0)
2424 max_MBps = amdgpu_moverate;
2425 else
2426 max_MBps = 8; /* Allow 8 MB/s. */
2427 /* Get a log2 for easy divisions. */
2428 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2429
d38ceaf9
AD
2430 r = amdgpu_ib_pool_init(adev);
2431 if (r) {
2432 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 2433 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 2434 goto failed;
d38ceaf9
AD
2435 }
2436
2dc8f81e
HC
2437 if (amdgpu_sriov_vf(adev))
2438 amdgpu_virt_init_data_exchange(adev);
2439
9bc92b9c
ML
2440 amdgpu_fbdev_init(adev);
2441
d2f52ac8
RZ
2442 r = amdgpu_pm_sysfs_init(adev);
2443 if (r)
2444 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2445
75758255 2446 r = amdgpu_debugfs_gem_init(adev);
3f14e623 2447 if (r)
d38ceaf9 2448 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2449
2450 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2451 if (r)
d38ceaf9 2452 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2453
50ab2533 2454 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2455 if (r)
50ab2533 2456 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2457
763efb6c 2458 r = amdgpu_debugfs_init(adev);
db95e218 2459 if (r)
763efb6c 2460 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
db95e218 2461
d38ceaf9
AD
2462 if ((amdgpu_testing & 1)) {
2463 if (adev->accel_working)
2464 amdgpu_test_moves(adev);
2465 else
2466 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2467 }
d38ceaf9
AD
2468 if (amdgpu_benchmarking) {
2469 if (adev->accel_working)
2470 amdgpu_benchmark(adev, amdgpu_benchmarking);
2471 else
2472 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2473 }
2474
2475 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2476 * explicit gating rather than handling it automatically.
2477 */
06ec9070 2478 r = amdgpu_device_ip_late_init(adev);
2c1a2784 2479 if (r) {
06ec9070 2480 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
e23b74aa 2481 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2482 goto failed;
2c1a2784 2483 }
d38ceaf9
AD
2484
2485 return 0;
83ba126a
AD
2486
2487failed:
89041940 2488 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2489 if (runtime)
2490 vga_switcheroo_fini_domain_pm_ops(adev->dev);
8840a387 2491
83ba126a 2492 return r;
d38ceaf9
AD
2493}
2494
d38ceaf9
AD
2495/**
2496 * amdgpu_device_fini - tear down the driver
2497 *
2498 * @adev: amdgpu_device pointer
2499 *
2500 * Tear down the driver info (all asics).
2501 * Called at driver shutdown.
2502 */
2503void amdgpu_device_fini(struct amdgpu_device *adev)
2504{
2505 int r;
2506
2507 DRM_INFO("amdgpu: finishing device.\n");
2508 adev->shutdown = true;
e5b03032
ML
2509 /* disable all interrupts */
2510 amdgpu_irq_disable_all(adev);
ff97cba8
ML
2511 if (adev->mode_info.mode_config_initialized){
2512 if (!amdgpu_device_has_dc_support(adev))
2513 drm_crtc_force_disable_all(adev->ddev);
2514 else
2515 drm_atomic_helper_shutdown(adev->ddev);
2516 }
d38ceaf9
AD
2517 amdgpu_ib_pool_fini(adev);
2518 amdgpu_fence_driver_fini(adev);
58e955d9 2519 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2520 amdgpu_fbdev_fini(adev);
06ec9070 2521 r = amdgpu_device_ip_fini(adev);
ab4fe3e1
HR
2522 if (adev->firmware.gpu_info_fw) {
2523 release_firmware(adev->firmware.gpu_info_fw);
2524 adev->firmware.gpu_info_fw = NULL;
2525 }
d38ceaf9 2526 adev->accel_working = false;
2dc80b00 2527 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2528 /* free i2c buses */
4562236b
HW
2529 if (!amdgpu_device_has_dc_support(adev))
2530 amdgpu_i2c_fini(adev);
bfca0289
SL
2531
2532 if (amdgpu_emu_mode != 1)
2533 amdgpu_atombios_fini(adev);
2534
d38ceaf9
AD
2535 kfree(adev->bios);
2536 adev->bios = NULL;
84c8b22e
LW
2537 if (!pci_is_thunderbolt_attached(adev->pdev))
2538 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2539 if (adev->flags & AMD_IS_PX)
2540 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2541 vga_client_register(adev->pdev, NULL, NULL, NULL);
2542 if (adev->rio_mem)
2543 pci_iounmap(adev->pdev, adev->rio_mem);
2544 adev->rio_mem = NULL;
2545 iounmap(adev->rmmio);
2546 adev->rmmio = NULL;
06ec9070 2547 amdgpu_device_doorbell_fini(adev);
d38ceaf9 2548 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2549}
2550
2551
2552/*
2553 * Suspend & resume.
2554 */
2555/**
810ddc3a 2556 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2557 *
2558 * @pdev: drm dev pointer
2559 * @state: suspend state
2560 *
2561 * Puts the hw in the suspend state (all asics).
2562 * Returns 0 for success or an error on failure.
2563 * Called at driver suspend.
2564 */
810ddc3a 2565int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2566{
2567 struct amdgpu_device *adev;
2568 struct drm_crtc *crtc;
2569 struct drm_connector *connector;
5ceb54c6 2570 int r;
d38ceaf9
AD
2571
2572 if (dev == NULL || dev->dev_private == NULL) {
2573 return -ENODEV;
2574 }
2575
2576 adev = dev->dev_private;
2577
2578 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2579 return 0;
2580
2581 drm_kms_helper_poll_disable(dev);
2582
4562236b
HW
2583 if (!amdgpu_device_has_dc_support(adev)) {
2584 /* turn off display hw */
2585 drm_modeset_lock_all(dev);
2586 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2587 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2588 }
2589 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2590 }
2591
ba997709
YZ
2592 amdgpu_amdkfd_suspend(adev);
2593
756e6880 2594 /* unpin the front buffers and cursors */
d38ceaf9 2595 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2596 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
e68d14dd 2597 struct drm_framebuffer *fb = crtc->primary->fb;
d38ceaf9
AD
2598 struct amdgpu_bo *robj;
2599
756e6880
AD
2600 if (amdgpu_crtc->cursor_bo) {
2601 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2602 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2603 if (r == 0) {
2604 amdgpu_bo_unpin(aobj);
2605 amdgpu_bo_unreserve(aobj);
2606 }
2607 }
2608
e68d14dd 2609 if (fb == NULL || fb->obj[0] == NULL) {
d38ceaf9
AD
2610 continue;
2611 }
e68d14dd 2612 robj = gem_to_amdgpu_bo(fb->obj[0]);
d38ceaf9
AD
2613 /* don't unpin kernel fb objects */
2614 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2615 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2616 if (r == 0) {
2617 amdgpu_bo_unpin(robj);
2618 amdgpu_bo_unreserve(robj);
2619 }
2620 }
2621 }
2622 /* evict vram memory */
2623 amdgpu_bo_evict_vram(adev);
2624
5ceb54c6 2625 amdgpu_fence_driver_suspend(adev);
d38ceaf9 2626
cdd61df6 2627 r = amdgpu_device_ip_suspend(adev);
d38ceaf9 2628
a0a71e49
AD
2629 /* evict remaining vram memory
2630 * This second call to evict vram is to evict the gart page table
2631 * using the CPU.
2632 */
d38ceaf9
AD
2633 amdgpu_bo_evict_vram(adev);
2634
2635 pci_save_state(dev->pdev);
2636 if (suspend) {
2637 /* Shut down the device */
2638 pci_disable_device(dev->pdev);
2639 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2640 } else {
2641 r = amdgpu_asic_reset(adev);
2642 if (r)
2643 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2644 }
2645
2646 if (fbcon) {
2647 console_lock();
2648 amdgpu_fbdev_set_suspend(adev, 1);
2649 console_unlock();
2650 }
2651 return 0;
2652}
2653
2654/**
810ddc3a 2655 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2656 *
2657 * @pdev: drm dev pointer
2658 *
2659 * Bring the hw back to operating state (all asics).
2660 * Returns 0 for success or an error on failure.
2661 * Called at driver resume.
2662 */
810ddc3a 2663int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2664{
2665 struct drm_connector *connector;
2666 struct amdgpu_device *adev = dev->dev_private;
756e6880 2667 struct drm_crtc *crtc;
03161a6e 2668 int r = 0;
d38ceaf9
AD
2669
2670 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2671 return 0;
2672
74b0b157 2673 if (fbcon)
d38ceaf9 2674 console_lock();
74b0b157 2675
d38ceaf9
AD
2676 if (resume) {
2677 pci_set_power_state(dev->pdev, PCI_D0);
2678 pci_restore_state(dev->pdev);
74b0b157 2679 r = pci_enable_device(dev->pdev);
03161a6e
HR
2680 if (r)
2681 goto unlock;
d38ceaf9
AD
2682 }
2683
2684 /* post card */
39c640c0 2685 if (amdgpu_device_need_post(adev)) {
74b0b157 2686 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2687 if (r)
2688 DRM_ERROR("amdgpu asic init failed\n");
2689 }
d38ceaf9 2690
06ec9070 2691 r = amdgpu_device_ip_resume(adev);
e6707218 2692 if (r) {
06ec9070 2693 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
03161a6e 2694 goto unlock;
e6707218 2695 }
5ceb54c6
AD
2696 amdgpu_fence_driver_resume(adev);
2697
d38ceaf9 2698
06ec9070 2699 r = amdgpu_device_ip_late_init(adev);
03161a6e
HR
2700 if (r)
2701 goto unlock;
d38ceaf9 2702
756e6880
AD
2703 /* pin cursors */
2704 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2705 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2706
2707 if (amdgpu_crtc->cursor_bo) {
2708 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2709 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2710 if (r == 0) {
2711 r = amdgpu_bo_pin(aobj,
2712 AMDGPU_GEM_DOMAIN_VRAM,
2713 &amdgpu_crtc->cursor_addr);
2714 if (r != 0)
2715 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2716 amdgpu_bo_unreserve(aobj);
2717 }
2718 }
2719 }
ba997709
YZ
2720 r = amdgpu_amdkfd_resume(adev);
2721 if (r)
2722 return r;
756e6880 2723
d38ceaf9
AD
2724 /* blat the mode back in */
2725 if (fbcon) {
4562236b
HW
2726 if (!amdgpu_device_has_dc_support(adev)) {
2727 /* pre DCE11 */
2728 drm_helper_resume_force_mode(dev);
2729
2730 /* turn on display hw */
2731 drm_modeset_lock_all(dev);
2732 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2733 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2734 }
2735 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2736 }
2737 }
2738
2739 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2740
2741 /*
2742 * Most of the connector probing functions try to acquire runtime pm
2743 * refs to ensure that the GPU is powered on when connector polling is
2744 * performed. Since we're calling this from a runtime PM callback,
2745 * trying to acquire rpm refs will cause us to deadlock.
2746 *
2747 * Since we're guaranteed to be holding the rpm lock, it's safe to
2748 * temporarily disable the rpm helpers so this doesn't deadlock us.
2749 */
2750#ifdef CONFIG_PM
2751 dev->dev->power.disable_depth++;
2752#endif
4562236b
HW
2753 if (!amdgpu_device_has_dc_support(adev))
2754 drm_helper_hpd_irq_event(dev);
2755 else
2756 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2757#ifdef CONFIG_PM
2758 dev->dev->power.disable_depth--;
2759#endif
d38ceaf9 2760
03161a6e 2761 if (fbcon)
d38ceaf9 2762 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2763
2764unlock:
2765 if (fbcon)
d38ceaf9 2766 console_unlock();
d38ceaf9 2767
03161a6e 2768 return r;
d38ceaf9
AD
2769}
2770
e3ecdffa
AD
2771/**
2772 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
2773 *
2774 * @adev: amdgpu_device pointer
2775 *
2776 * The list of all the hardware IPs that make up the asic is walked and
2777 * the check_soft_reset callbacks are run. check_soft_reset determines
2778 * if the asic is still hung or not.
2779 * Returns true if any of the IPs are still in a hung state, false if not.
2780 */
06ec9070 2781static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
63fbf42f
CZ
2782{
2783 int i;
2784 bool asic_hang = false;
2785
f993d628
ML
2786 if (amdgpu_sriov_vf(adev))
2787 return true;
2788
8bc04c29
AD
2789 if (amdgpu_asic_need_full_reset(adev))
2790 return true;
2791
63fbf42f 2792 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2793 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2794 continue;
a1255107
AD
2795 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2796 adev->ip_blocks[i].status.hang =
2797 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2798 if (adev->ip_blocks[i].status.hang) {
2799 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2800 asic_hang = true;
2801 }
2802 }
2803 return asic_hang;
2804}
2805
e3ecdffa
AD
2806/**
2807 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
2808 *
2809 * @adev: amdgpu_device pointer
2810 *
2811 * The list of all the hardware IPs that make up the asic is walked and the
2812 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
2813 * handles any IP specific hardware or software state changes that are
2814 * necessary for a soft reset to succeed.
2815 * Returns 0 on success, negative error code on failure.
2816 */
06ec9070 2817static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2818{
2819 int i, r = 0;
2820
2821 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2822 if (!adev->ip_blocks[i].status.valid)
d31a501e 2823 continue;
a1255107
AD
2824 if (adev->ip_blocks[i].status.hang &&
2825 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2826 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2827 if (r)
2828 return r;
2829 }
2830 }
2831
2832 return 0;
2833}
2834
e3ecdffa
AD
2835/**
2836 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
2837 *
2838 * @adev: amdgpu_device pointer
2839 *
2840 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
2841 * reset is necessary to recover.
2842 * Returns true if a full asic reset is required, false if not.
2843 */
06ec9070 2844static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
35d782fe 2845{
da146d3b
AD
2846 int i;
2847
8bc04c29
AD
2848 if (amdgpu_asic_need_full_reset(adev))
2849 return true;
2850
da146d3b 2851 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2852 if (!adev->ip_blocks[i].status.valid)
da146d3b 2853 continue;
a1255107
AD
2854 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2855 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2856 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
2857 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2858 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 2859 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2860 DRM_INFO("Some block need full reset!\n");
2861 return true;
2862 }
2863 }
35d782fe
CZ
2864 }
2865 return false;
2866}
2867
e3ecdffa
AD
2868/**
2869 * amdgpu_device_ip_soft_reset - do a soft reset
2870 *
2871 * @adev: amdgpu_device pointer
2872 *
2873 * The list of all the hardware IPs that make up the asic is walked and the
2874 * soft_reset callbacks are run if the block is hung. soft_reset handles any
2875 * IP specific hardware or software state changes that are necessary to soft
2876 * reset the IP.
2877 * Returns 0 on success, negative error code on failure.
2878 */
06ec9070 2879static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2880{
2881 int i, r = 0;
2882
2883 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2884 if (!adev->ip_blocks[i].status.valid)
35d782fe 2885 continue;
a1255107
AD
2886 if (adev->ip_blocks[i].status.hang &&
2887 adev->ip_blocks[i].version->funcs->soft_reset) {
2888 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2889 if (r)
2890 return r;
2891 }
2892 }
2893
2894 return 0;
2895}
2896
e3ecdffa
AD
2897/**
2898 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
2899 *
2900 * @adev: amdgpu_device pointer
2901 *
2902 * The list of all the hardware IPs that make up the asic is walked and the
2903 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
2904 * handles any IP specific hardware or software state changes that are
2905 * necessary after the IP has been soft reset.
2906 * Returns 0 on success, negative error code on failure.
2907 */
06ec9070 2908static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2909{
2910 int i, r = 0;
2911
2912 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2913 if (!adev->ip_blocks[i].status.valid)
35d782fe 2914 continue;
a1255107
AD
2915 if (adev->ip_blocks[i].status.hang &&
2916 adev->ip_blocks[i].version->funcs->post_soft_reset)
2917 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2918 if (r)
2919 return r;
2920 }
2921
2922 return 0;
2923}
2924
e3ecdffa
AD
2925/**
2926 * amdgpu_device_recover_vram_from_shadow - restore shadowed VRAM buffers
2927 *
2928 * @adev: amdgpu_device pointer
2929 * @ring: amdgpu_ring for the engine handling the buffer operations
2930 * @bo: amdgpu_bo buffer whose shadow is being restored
2931 * @fence: dma_fence associated with the operation
2932 *
2933 * Restores the VRAM buffer contents from the shadow in GTT. Used to
2934 * restore things like GPUVM page tables after a GPU reset where
2935 * the contents of VRAM might be lost.
2936 * Returns 0 on success, negative error code on failure.
2937 */
06ec9070
AD
2938static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
2939 struct amdgpu_ring *ring,
2940 struct amdgpu_bo *bo,
2941 struct dma_fence **fence)
53cdccd5
CZ
2942{
2943 uint32_t domain;
2944 int r;
2945
23d2e504
RH
2946 if (!bo->shadow)
2947 return 0;
2948
1d284797 2949 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2950 if (r)
2951 return r;
2952 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2953 /* if bo has been evicted, then no need to recover */
2954 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2955 r = amdgpu_bo_validate(bo->shadow);
2956 if (r) {
2957 DRM_ERROR("bo validate failed!\n");
2958 goto err;
2959 }
2960
23d2e504 2961 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2962 NULL, fence, true);
23d2e504
RH
2963 if (r) {
2964 DRM_ERROR("recover page table failed!\n");
2965 goto err;
2966 }
2967 }
53cdccd5 2968err:
23d2e504
RH
2969 amdgpu_bo_unreserve(bo);
2970 return r;
53cdccd5
CZ
2971}
2972
e3ecdffa
AD
2973/**
2974 * amdgpu_device_handle_vram_lost - Handle the loss of VRAM contents
2975 *
2976 * @adev: amdgpu_device pointer
2977 *
2978 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
2979 * restore things like GPUVM page tables after a GPU reset where
2980 * the contents of VRAM might be lost.
2981 * Returns 0 on success, 1 on failure.
2982 */
c41d1cf6
ML
2983static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
2984{
2985 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2986 struct amdgpu_bo *bo, *tmp;
2987 struct dma_fence *fence = NULL, *next = NULL;
2988 long r = 1;
2989 int i = 0;
2990 long tmo;
2991
2992 if (amdgpu_sriov_runtime(adev))
2993 tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
2994 else
2995 tmo = msecs_to_jiffies(100);
2996
2997 DRM_INFO("recover vram bo from shadow start\n");
2998 mutex_lock(&adev->shadow_list_lock);
2999 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
3000 next = NULL;
3001 amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
3002 if (fence) {
3003 r = dma_fence_wait_timeout(fence, false, tmo);
3004 if (r == 0)
3005 pr_err("wait fence %p[%d] timeout\n", fence, i);
3006 else if (r < 0)
3007 pr_err("wait fence %p[%d] interrupted\n", fence, i);
3008 if (r < 1) {
3009 dma_fence_put(fence);
3010 fence = next;
3011 break;
3012 }
3013 i++;
3014 }
3015
3016 dma_fence_put(fence);
3017 fence = next;
3018 }
3019 mutex_unlock(&adev->shadow_list_lock);
3020
3021 if (fence) {
3022 r = dma_fence_wait_timeout(fence, false, tmo);
3023 if (r == 0)
3024 pr_err("wait fence %p[%d] timeout\n", fence, i);
3025 else if (r < 0)
3026 pr_err("wait fence %p[%d] interrupted\n", fence, i);
3027
3028 }
3029 dma_fence_put(fence);
3030
3031 if (r > 0)
3032 DRM_INFO("recover vram bo from shadow done\n");
3033 else
3034 DRM_ERROR("recover vram bo from shadow failed\n");
3035
e3ecdffa 3036 return (r > 0) ? 0 : 1;
c41d1cf6
ML
3037}
3038
e3ecdffa 3039/**
06ec9070 3040 * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
a90ad3c2
ML
3041 *
3042 * @adev: amdgpu device pointer
a90ad3c2 3043 *
5740682e
ML
3044 * attempt to do soft-reset or full-reset and reinitialize Asic
3045 * return 0 means successed otherwise failed
e3ecdffa 3046 */
c41d1cf6 3047static int amdgpu_device_reset(struct amdgpu_device *adev)
a90ad3c2 3048{
5740682e
ML
3049 bool need_full_reset, vram_lost = 0;
3050 int r;
a90ad3c2 3051
06ec9070 3052 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
a90ad3c2 3053
5740682e 3054 if (!need_full_reset) {
06ec9070
AD
3055 amdgpu_device_ip_pre_soft_reset(adev);
3056 r = amdgpu_device_ip_soft_reset(adev);
3057 amdgpu_device_ip_post_soft_reset(adev);
3058 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5740682e
ML
3059 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3060 need_full_reset = true;
3061 }
5740682e 3062 }
a90ad3c2 3063
5740682e 3064 if (need_full_reset) {
cdd61df6 3065 r = amdgpu_device_ip_suspend(adev);
a90ad3c2 3066
5740682e 3067retry:
5740682e 3068 r = amdgpu_asic_reset(adev);
5740682e
ML
3069 /* post card */
3070 amdgpu_atom_asic_init(adev->mode_info.atom_context);
65781c78 3071
5740682e
ML
3072 if (!r) {
3073 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
06ec9070 3074 r = amdgpu_device_ip_resume_phase1(adev);
5740682e
ML
3075 if (r)
3076 goto out;
65781c78 3077
06ec9070 3078 vram_lost = amdgpu_device_check_vram_lost(adev);
5740682e
ML
3079 if (vram_lost) {
3080 DRM_ERROR("VRAM is lost!\n");
3081 atomic_inc(&adev->vram_lost_counter);
3082 }
3083
c1c7ce8f
CK
3084 r = amdgpu_gtt_mgr_recover(
3085 &adev->mman.bdev.man[TTM_PL_TT]);
5740682e
ML
3086 if (r)
3087 goto out;
3088
06ec9070 3089 r = amdgpu_device_ip_resume_phase2(adev);
5740682e
ML
3090 if (r)
3091 goto out;
3092
3093 if (vram_lost)
06ec9070 3094 amdgpu_device_fill_reset_magic(adev);
65781c78 3095 }
5740682e 3096 }
65781c78 3097
5740682e
ML
3098out:
3099 if (!r) {
3100 amdgpu_irq_gpu_reset_resume_helper(adev);
3101 r = amdgpu_ib_ring_tests(adev);
3102 if (r) {
3103 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
cdd61df6 3104 r = amdgpu_device_ip_suspend(adev);
5740682e
ML
3105 need_full_reset = true;
3106 goto retry;
3107 }
3108 }
65781c78 3109
c41d1cf6
ML
3110 if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
3111 r = amdgpu_device_handle_vram_lost(adev);
a90ad3c2 3112
5740682e
ML
3113 return r;
3114}
a90ad3c2 3115
e3ecdffa 3116/**
06ec9070 3117 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5740682e
ML
3118 *
3119 * @adev: amdgpu device pointer
5740682e
ML
3120 *
3121 * do VF FLR and reinitialize Asic
3122 * return 0 means successed otherwise failed
e3ecdffa
AD
3123 */
3124static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3125 bool from_hypervisor)
5740682e
ML
3126{
3127 int r;
3128
3129 if (from_hypervisor)
3130 r = amdgpu_virt_request_full_gpu(adev, true);
3131 else
3132 r = amdgpu_virt_reset_gpu(adev);
3133 if (r)
3134 return r;
a90ad3c2
ML
3135
3136 /* Resume IP prior to SMC */
06ec9070 3137 r = amdgpu_device_ip_reinit_early_sriov(adev);
5740682e
ML
3138 if (r)
3139 goto error;
a90ad3c2
ML
3140
3141 /* we need recover gart prior to run SMC/CP/SDMA resume */
c1c7ce8f 3142 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
a90ad3c2
ML
3143
3144 /* now we are okay to resume SMC/CP/SDMA */
06ec9070 3145 r = amdgpu_device_ip_reinit_late_sriov(adev);
5740682e
ML
3146 if (r)
3147 goto error;
a90ad3c2
ML
3148
3149 amdgpu_irq_gpu_reset_resume_helper(adev);
5740682e 3150 r = amdgpu_ib_ring_tests(adev);
a90ad3c2 3151
abc34253
ED
3152error:
3153 amdgpu_virt_release_full_gpu(adev, true);
c41d1cf6
ML
3154 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3155 atomic_inc(&adev->vram_lost_counter);
3156 r = amdgpu_device_handle_vram_lost(adev);
a90ad3c2
ML
3157 }
3158
3159 return r;
3160}
3161
d38ceaf9 3162/**
5f152b5e 3163 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
d38ceaf9
AD
3164 *
3165 * @adev: amdgpu device pointer
5740682e 3166 * @job: which job trigger hang
dcebf026 3167 * @force forces reset regardless of amdgpu_gpu_recovery
d38ceaf9 3168 *
5740682e 3169 * Attempt to reset the GPU if it has hung (all asics).
d38ceaf9
AD
3170 * Returns 0 for success or an error on failure.
3171 */
5f152b5e
AD
3172int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3173 struct amdgpu_job *job, bool force)
d38ceaf9 3174{
4562236b 3175 struct drm_atomic_state *state = NULL;
5740682e 3176 int i, r, resched;
fb140b29 3177
54bc1398 3178 if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
63fbf42f
CZ
3179 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
3180 return 0;
3181 }
d38ceaf9 3182
dcebf026
AG
3183 if (!force && (amdgpu_gpu_recovery == 0 ||
3184 (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
3185 DRM_INFO("GPU recovery disabled.\n");
3186 return 0;
3187 }
3188
5740682e
ML
3189 dev_info(adev->dev, "GPU reset begin!\n");
3190
13a752e3 3191 mutex_lock(&adev->lock_reset);
d94aed5a 3192 atomic_inc(&adev->gpu_reset_counter);
13a752e3 3193 adev->in_gpu_reset = 1;
d38ceaf9 3194
a3c47d6b
CZ
3195 /* block TTM */
3196 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
71182665 3197
4562236b
HW
3198 /* store modesetting */
3199 if (amdgpu_device_has_dc_support(adev))
3200 state = drm_atomic_helper_suspend(adev->ddev);
a3c47d6b 3201
71182665 3202 /* block all schedulers and reset given job's ring */
0875dc9e
CZ
3203 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3204 struct amdgpu_ring *ring = adev->rings[i];
3205
51687759 3206 if (!ring || !ring->sched.thread)
0875dc9e 3207 continue;
5740682e 3208
71182665
ML
3209 kthread_park(ring->sched.thread);
3210
5740682e
ML
3211 if (job && job->ring->idx != i)
3212 continue;
3213
1b1f42d8 3214 drm_sched_hw_job_reset(&ring->sched, &job->base);
5740682e 3215
2f9d4084
ML
3216 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3217 amdgpu_fence_driver_force_completion(ring);
0875dc9e 3218 }
d38ceaf9 3219
5740682e 3220 if (amdgpu_sriov_vf(adev))
c41d1cf6 3221 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5740682e 3222 else
c41d1cf6 3223 r = amdgpu_device_reset(adev);
5740682e 3224
71182665
ML
3225 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3226 struct amdgpu_ring *ring = adev->rings[i];
53cdccd5 3227
71182665
ML
3228 if (!ring || !ring->sched.thread)
3229 continue;
5740682e 3230
71182665
ML
3231 /* only need recovery sched of the given job's ring
3232 * or all rings (in the case @job is NULL)
3233 * after above amdgpu_reset accomplished
3234 */
3235 if ((!job || job->ring->idx == i) && !r)
1b1f42d8 3236 drm_sched_job_recovery(&ring->sched);
5740682e 3237
71182665 3238 kthread_unpark(ring->sched.thread);
d38ceaf9
AD
3239 }
3240
4562236b 3241 if (amdgpu_device_has_dc_support(adev)) {
5740682e
ML
3242 if (drm_atomic_helper_resume(adev->ddev, state))
3243 dev_info(adev->dev, "drm resume failed:%d\n", r);
5740682e 3244 } else {
4562236b 3245 drm_helper_resume_force_mode(adev->ddev);
5740682e 3246 }
d38ceaf9
AD
3247
3248 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
5740682e 3249
89041940 3250 if (r) {
d38ceaf9 3251 /* bad news, how to tell it to userspace ? */
5740682e
ML
3252 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3253 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3254 } else {
3255 dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
89041940 3256 }
d38ceaf9 3257
89041940 3258 amdgpu_vf_error_trans_all(adev);
13a752e3
ML
3259 adev->in_gpu_reset = 0;
3260 mutex_unlock(&adev->lock_reset);
d38ceaf9
AD
3261 return r;
3262}
3263
e3ecdffa
AD
3264/**
3265 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
3266 *
3267 * @adev: amdgpu_device pointer
3268 *
3269 * Fetchs and stores in the driver the PCIE capabilities (gen speed
3270 * and lanes) of the slot the device is in. Handles APUs and
3271 * virtualized environments where PCIE config space may not be available.
3272 */
5494d864 3273static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
d0dd7f0c
AD
3274{
3275 u32 mask;
3276 int ret;
3277
cd474ba0
AD
3278 if (amdgpu_pcie_gen_cap)
3279 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 3280
cd474ba0
AD
3281 if (amdgpu_pcie_lane_cap)
3282 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 3283
cd474ba0
AD
3284 /* covers APUs as well */
3285 if (pci_is_root_bus(adev->pdev->bus)) {
3286 if (adev->pm.pcie_gen_mask == 0)
3287 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3288 if (adev->pm.pcie_mlw_mask == 0)
3289 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 3290 return;
cd474ba0 3291 }
d0dd7f0c 3292
cd474ba0
AD
3293 if (adev->pm.pcie_gen_mask == 0) {
3294 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3295 if (!ret) {
3296 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3297 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3298 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3299
3300 if (mask & DRM_PCIE_SPEED_25)
3301 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3302 if (mask & DRM_PCIE_SPEED_50)
3303 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3304 if (mask & DRM_PCIE_SPEED_80)
3305 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3306 } else {
3307 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3308 }
3309 }
3310 if (adev->pm.pcie_mlw_mask == 0) {
3311 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3312 if (!ret) {
3313 switch (mask) {
3314 case 32:
3315 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3316 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3317 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3318 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3319 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3320 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3321 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3322 break;
3323 case 16:
3324 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3325 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3326 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3327 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3328 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3329 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3330 break;
3331 case 12:
3332 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3333 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3334 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3335 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3336 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3337 break;
3338 case 8:
3339 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3340 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3341 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3342 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3343 break;
3344 case 4:
3345 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3346 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3347 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3348 break;
3349 case 2:
3350 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3351 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3352 break;
3353 case 1:
3354 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3355 break;
3356 default:
3357 break;
3358 }
3359 } else {
3360 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
3361 }
3362 }
3363}
d38ceaf9 3364