drm/amdgpu: Delete some cgs functions
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
d38ceaf9
AD
31#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
4562236b 33#include <drm/drm_atomic_helper.h>
d38ceaf9
AD
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
f4b373f4 39#include "amdgpu_trace.h"
d38ceaf9
AD
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
a5bde2f9 43#include "amdgpu_atomfirmware.h"
d0dd7f0c 44#include "amd_pcie.h"
33f34802
KW
45#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
a2e73f56
AD
48#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
aaa36a97 51#include "vi.h"
460826e6 52#include "soc15.h"
d38ceaf9 53#include "bif/bif_4_1_d.h"
9accf2fd 54#include <linux/pci.h>
bec86378 55#include <linux/firmware.h>
89041940 56#include "amdgpu_vf_error.h"
d38ceaf9 57
ba997709 58#include "amdgpu_amdkfd.h"
d2f52ac8 59#include "amdgpu_pm.h"
d38ceaf9 60
e2a75f88 61MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
3f76dced 62MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
2d2e5e7e 63MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
e2a75f88 64
2dc80b00
S
65#define AMDGPU_RESUME_MS 2000
66
d38ceaf9 67static const char *amdgpu_asic_name[] = {
da69c161
KW
68 "TAHITI",
69 "PITCAIRN",
70 "VERDE",
71 "OLAND",
72 "HAINAN",
d38ceaf9
AD
73 "BONAIRE",
74 "KAVERI",
75 "KABINI",
76 "HAWAII",
77 "MULLINS",
78 "TOPAZ",
79 "TONGA",
48299f95 80 "FIJI",
d38ceaf9 81 "CARRIZO",
139f4917 82 "STONEY",
2cc0c0b5
FC
83 "POLARIS10",
84 "POLARIS11",
c4642a47 85 "POLARIS12",
d4196f01 86 "VEGA10",
8fab806a 87 "VEGA12",
2ca8a5d2 88 "RAVEN",
d38ceaf9
AD
89 "LAST",
90};
91
5494d864
AD
92static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
93
e3ecdffa
AD
94/**
95 * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
96 *
97 * @dev: drm_device pointer
98 *
99 * Returns true if the device is a dGPU with HG/PX power control,
100 * otherwise return false.
101 */
d38ceaf9
AD
102bool amdgpu_device_is_px(struct drm_device *dev)
103{
104 struct amdgpu_device *adev = dev->dev_private;
105
2f7d10b3 106 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
107 return true;
108 return false;
109}
110
111/*
112 * MMIO register access helper functions.
113 */
e3ecdffa
AD
114/**
115 * amdgpu_mm_rreg - read a memory mapped IO register
116 *
117 * @adev: amdgpu_device pointer
118 * @reg: dword aligned register offset
119 * @acc_flags: access flags which require special behavior
120 *
121 * Returns the 32 bit value from the offset specified.
122 */
d38ceaf9 123uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 124 uint32_t acc_flags)
d38ceaf9 125{
f4b373f4
TSD
126 uint32_t ret;
127
43ca8efa 128 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 129 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 130
15d72fd7 131 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 132 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
133 else {
134 unsigned long flags;
d38ceaf9
AD
135
136 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
137 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
138 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
139 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 140 }
f4b373f4
TSD
141 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
142 return ret;
d38ceaf9
AD
143}
144
421a2a30
ML
145/*
146 * MMIO register read with bytes helper functions
147 * @offset:bytes offset from MMIO start
148 *
149*/
150
e3ecdffa
AD
151/**
152 * amdgpu_mm_rreg8 - read a memory mapped IO register
153 *
154 * @adev: amdgpu_device pointer
155 * @offset: byte aligned register offset
156 *
157 * Returns the 8 bit value from the offset specified.
158 */
421a2a30
ML
159uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
160 if (offset < adev->rmmio_size)
161 return (readb(adev->rmmio + offset));
162 BUG();
163}
164
165/*
166 * MMIO register write with bytes helper functions
167 * @offset:bytes offset from MMIO start
168 * @value: the value want to be written to the register
169 *
170*/
e3ecdffa
AD
171/**
172 * amdgpu_mm_wreg8 - read a memory mapped IO register
173 *
174 * @adev: amdgpu_device pointer
175 * @offset: byte aligned register offset
176 * @value: 8 bit value to write
177 *
178 * Writes the value specified to the offset specified.
179 */
421a2a30
ML
180void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
181 if (offset < adev->rmmio_size)
182 writeb(value, adev->rmmio + offset);
183 else
184 BUG();
185}
186
e3ecdffa
AD
187/**
188 * amdgpu_mm_wreg - write to a memory mapped IO register
189 *
190 * @adev: amdgpu_device pointer
191 * @reg: dword aligned register offset
192 * @v: 32 bit value to write to the register
193 * @acc_flags: access flags which require special behavior
194 *
195 * Writes the value specified to the offset specified.
196 */
d38ceaf9 197void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 198 uint32_t acc_flags)
d38ceaf9 199{
f4b373f4 200 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 201
47ed4e1c
KW
202 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
203 adev->last_mm_index = v;
204 }
205
43ca8efa 206 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 207 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 208
15d72fd7 209 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
210 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
211 else {
212 unsigned long flags;
213
214 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
215 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
216 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
217 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
218 }
47ed4e1c
KW
219
220 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
221 udelay(500);
222 }
d38ceaf9
AD
223}
224
e3ecdffa
AD
225/**
226 * amdgpu_io_rreg - read an IO register
227 *
228 * @adev: amdgpu_device pointer
229 * @reg: dword aligned register offset
230 *
231 * Returns the 32 bit value from the offset specified.
232 */
d38ceaf9
AD
233u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
234{
235 if ((reg * 4) < adev->rio_mem_size)
236 return ioread32(adev->rio_mem + (reg * 4));
237 else {
238 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
239 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
240 }
241}
242
e3ecdffa
AD
243/**
244 * amdgpu_io_wreg - write to an IO register
245 *
246 * @adev: amdgpu_device pointer
247 * @reg: dword aligned register offset
248 * @v: 32 bit value to write to the register
249 *
250 * Writes the value specified to the offset specified.
251 */
d38ceaf9
AD
252void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
253{
47ed4e1c
KW
254 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
255 adev->last_mm_index = v;
256 }
d38ceaf9
AD
257
258 if ((reg * 4) < adev->rio_mem_size)
259 iowrite32(v, adev->rio_mem + (reg * 4));
260 else {
261 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
262 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
263 }
47ed4e1c
KW
264
265 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
266 udelay(500);
267 }
d38ceaf9
AD
268}
269
270/**
271 * amdgpu_mm_rdoorbell - read a doorbell dword
272 *
273 * @adev: amdgpu_device pointer
274 * @index: doorbell index
275 *
276 * Returns the value in the doorbell aperture at the
277 * requested doorbell index (CIK).
278 */
279u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
280{
281 if (index < adev->doorbell.num_doorbells) {
282 return readl(adev->doorbell.ptr + index);
283 } else {
284 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
285 return 0;
286 }
287}
288
289/**
290 * amdgpu_mm_wdoorbell - write a doorbell dword
291 *
292 * @adev: amdgpu_device pointer
293 * @index: doorbell index
294 * @v: value to write
295 *
296 * Writes @v to the doorbell aperture at the
297 * requested doorbell index (CIK).
298 */
299void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
300{
301 if (index < adev->doorbell.num_doorbells) {
302 writel(v, adev->doorbell.ptr + index);
303 } else {
304 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
305 }
306}
307
832be404
KW
308/**
309 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
310 *
311 * @adev: amdgpu_device pointer
312 * @index: doorbell index
313 *
314 * Returns the value in the doorbell aperture at the
315 * requested doorbell index (VEGA10+).
316 */
317u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
318{
319 if (index < adev->doorbell.num_doorbells) {
320 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
321 } else {
322 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
323 return 0;
324 }
325}
326
327/**
328 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
329 *
330 * @adev: amdgpu_device pointer
331 * @index: doorbell index
332 * @v: value to write
333 *
334 * Writes @v to the doorbell aperture at the
335 * requested doorbell index (VEGA10+).
336 */
337void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
338{
339 if (index < adev->doorbell.num_doorbells) {
340 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
341 } else {
342 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
343 }
344}
345
d38ceaf9
AD
346/**
347 * amdgpu_invalid_rreg - dummy reg read function
348 *
349 * @adev: amdgpu device pointer
350 * @reg: offset of register
351 *
352 * Dummy register read function. Used for register blocks
353 * that certain asics don't have (all asics).
354 * Returns the value in the register.
355 */
356static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
357{
358 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
359 BUG();
360 return 0;
361}
362
363/**
364 * amdgpu_invalid_wreg - dummy reg write function
365 *
366 * @adev: amdgpu device pointer
367 * @reg: offset of register
368 * @v: value to write to the register
369 *
370 * Dummy register read function. Used for register blocks
371 * that certain asics don't have (all asics).
372 */
373static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
374{
375 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
376 reg, v);
377 BUG();
378}
379
380/**
381 * amdgpu_block_invalid_rreg - dummy reg read function
382 *
383 * @adev: amdgpu device pointer
384 * @block: offset of instance
385 * @reg: offset of register
386 *
387 * Dummy register read function. Used for register blocks
388 * that certain asics don't have (all asics).
389 * Returns the value in the register.
390 */
391static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
392 uint32_t block, uint32_t reg)
393{
394 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
395 reg, block);
396 BUG();
397 return 0;
398}
399
400/**
401 * amdgpu_block_invalid_wreg - dummy reg write function
402 *
403 * @adev: amdgpu device pointer
404 * @block: offset of instance
405 * @reg: offset of register
406 * @v: value to write to the register
407 *
408 * Dummy register read function. Used for register blocks
409 * that certain asics don't have (all asics).
410 */
411static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
412 uint32_t block,
413 uint32_t reg, uint32_t v)
414{
415 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
416 reg, block, v);
417 BUG();
418}
419
e3ecdffa
AD
420/**
421 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
422 *
423 * @adev: amdgpu device pointer
424 *
425 * Allocates a scratch page of VRAM for use by various things in the
426 * driver.
427 */
06ec9070 428static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
d38ceaf9 429{
a4a02777
CK
430 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
431 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
432 &adev->vram_scratch.robj,
433 &adev->vram_scratch.gpu_addr,
434 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
435}
436
e3ecdffa
AD
437/**
438 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
439 *
440 * @adev: amdgpu device pointer
441 *
442 * Frees the VRAM scratch page.
443 */
06ec9070 444static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
d38ceaf9 445{
078af1a3 446 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
447}
448
449/**
9c3f2b54 450 * amdgpu_device_program_register_sequence - program an array of registers.
d38ceaf9
AD
451 *
452 * @adev: amdgpu_device pointer
453 * @registers: pointer to the register array
454 * @array_size: size of the register array
455 *
456 * Programs an array or registers with and and or masks.
457 * This is a helper for setting golden registers.
458 */
9c3f2b54
AD
459void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
460 const u32 *registers,
461 const u32 array_size)
d38ceaf9
AD
462{
463 u32 tmp, reg, and_mask, or_mask;
464 int i;
465
466 if (array_size % 3)
467 return;
468
469 for (i = 0; i < array_size; i +=3) {
470 reg = registers[i + 0];
471 and_mask = registers[i + 1];
472 or_mask = registers[i + 2];
473
474 if (and_mask == 0xffffffff) {
475 tmp = or_mask;
476 } else {
477 tmp = RREG32(reg);
478 tmp &= ~and_mask;
479 tmp |= or_mask;
480 }
481 WREG32(reg, tmp);
482 }
483}
484
e3ecdffa
AD
485/**
486 * amdgpu_device_pci_config_reset - reset the GPU
487 *
488 * @adev: amdgpu_device pointer
489 *
490 * Resets the GPU using the pci config reset sequence.
491 * Only applicable to asics prior to vega10.
492 */
8111c387 493void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
d38ceaf9
AD
494{
495 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
496}
497
498/*
499 * GPU doorbell aperture helpers function.
500 */
501/**
06ec9070 502 * amdgpu_device_doorbell_init - Init doorbell driver information.
d38ceaf9
AD
503 *
504 * @adev: amdgpu_device pointer
505 *
506 * Init doorbell driver information (CIK)
507 * Returns 0 on success, error on failure.
508 */
06ec9070 509static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
d38ceaf9 510{
705e519e
CK
511 /* No doorbell on SI hardware generation */
512 if (adev->asic_type < CHIP_BONAIRE) {
513 adev->doorbell.base = 0;
514 adev->doorbell.size = 0;
515 adev->doorbell.num_doorbells = 0;
516 adev->doorbell.ptr = NULL;
517 return 0;
518 }
519
d6895ad3
CK
520 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
521 return -EINVAL;
522
d38ceaf9
AD
523 /* doorbell bar mapping */
524 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
525 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
526
edf600da 527 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
528 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
529 if (adev->doorbell.num_doorbells == 0)
530 return -EINVAL;
531
8972e5d2
CK
532 adev->doorbell.ptr = ioremap(adev->doorbell.base,
533 adev->doorbell.num_doorbells *
534 sizeof(u32));
535 if (adev->doorbell.ptr == NULL)
d38ceaf9 536 return -ENOMEM;
d38ceaf9
AD
537
538 return 0;
539}
540
541/**
06ec9070 542 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
d38ceaf9
AD
543 *
544 * @adev: amdgpu_device pointer
545 *
546 * Tear down doorbell driver information (CIK)
547 */
06ec9070 548static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
d38ceaf9
AD
549{
550 iounmap(adev->doorbell.ptr);
551 adev->doorbell.ptr = NULL;
552}
553
22cb0164 554
d38ceaf9
AD
555
556/*
06ec9070 557 * amdgpu_device_wb_*()
455a7bc2 558 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 559 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
560 */
561
562/**
06ec9070 563 * amdgpu_device_wb_fini - Disable Writeback and free memory
d38ceaf9
AD
564 *
565 * @adev: amdgpu_device pointer
566 *
567 * Disables Writeback and frees the Writeback memory (all asics).
568 * Used at driver shutdown.
569 */
06ec9070 570static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
d38ceaf9
AD
571{
572 if (adev->wb.wb_obj) {
a76ed485
AD
573 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
574 &adev->wb.gpu_addr,
575 (void **)&adev->wb.wb);
d38ceaf9
AD
576 adev->wb.wb_obj = NULL;
577 }
578}
579
580/**
06ec9070 581 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
d38ceaf9
AD
582 *
583 * @adev: amdgpu_device pointer
584 *
455a7bc2 585 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
586 * Used at driver startup.
587 * Returns 0 on success or an -error on failure.
588 */
06ec9070 589static int amdgpu_device_wb_init(struct amdgpu_device *adev)
d38ceaf9
AD
590{
591 int r;
592
593 if (adev->wb.wb_obj == NULL) {
97407b63
AD
594 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
595 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
596 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
597 &adev->wb.wb_obj, &adev->wb.gpu_addr,
598 (void **)&adev->wb.wb);
d38ceaf9
AD
599 if (r) {
600 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
601 return r;
602 }
d38ceaf9
AD
603
604 adev->wb.num_wb = AMDGPU_MAX_WB;
605 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
606
607 /* clear wb memory */
73469585 608 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
d38ceaf9
AD
609 }
610
611 return 0;
612}
613
614/**
131b4b36 615 * amdgpu_device_wb_get - Allocate a wb entry
d38ceaf9
AD
616 *
617 * @adev: amdgpu_device pointer
618 * @wb: wb index
619 *
620 * Allocate a wb slot for use by the driver (all asics).
621 * Returns 0 on success or -EINVAL on failure.
622 */
131b4b36 623int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
d38ceaf9
AD
624{
625 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 626
97407b63 627 if (offset < adev->wb.num_wb) {
7014285a 628 __set_bit(offset, adev->wb.used);
63ae07ca 629 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
630 return 0;
631 } else {
632 return -EINVAL;
633 }
634}
635
d38ceaf9 636/**
131b4b36 637 * amdgpu_device_wb_free - Free a wb entry
d38ceaf9
AD
638 *
639 * @adev: amdgpu_device pointer
640 * @wb: wb index
641 *
642 * Free a wb slot allocated for use by the driver (all asics)
643 */
131b4b36 644void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
d38ceaf9 645{
73469585 646 wb >>= 3;
d38ceaf9 647 if (wb < adev->wb.num_wb)
73469585 648 __clear_bit(wb, adev->wb.used);
d38ceaf9
AD
649}
650
651/**
2543e28a 652 * amdgpu_device_vram_location - try to find VRAM location
e3ecdffa 653 *
d38ceaf9
AD
654 * @adev: amdgpu device structure holding all necessary informations
655 * @mc: memory controller structure holding memory informations
656 * @base: base address at which to put VRAM
657 *
455a7bc2 658 * Function will try to place VRAM at base address provided
3d647c8f 659 * as parameter.
d38ceaf9 660 */
2543e28a 661void amdgpu_device_vram_location(struct amdgpu_device *adev,
770d13b1 662 struct amdgpu_gmc *mc, u64 base)
d38ceaf9
AD
663{
664 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
665
666 mc->vram_start = base;
d38ceaf9
AD
667 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
668 if (limit && limit < mc->real_vram_size)
669 mc->real_vram_size = limit;
670 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
671 mc->mc_vram_size >> 20, mc->vram_start,
672 mc->vram_end, mc->real_vram_size >> 20);
673}
674
675/**
2543e28a 676 * amdgpu_device_gart_location - try to find GTT location
e3ecdffa 677 *
d38ceaf9
AD
678 * @adev: amdgpu device structure holding all necessary informations
679 * @mc: memory controller structure holding memory informations
680 *
681 * Function will place try to place GTT before or after VRAM.
682 *
683 * If GTT size is bigger than space left then we ajust GTT size.
684 * Thus function will never fails.
685 *
686 * FIXME: when reducing GTT size align new size on power of 2.
687 */
2543e28a 688void amdgpu_device_gart_location(struct amdgpu_device *adev,
770d13b1 689 struct amdgpu_gmc *mc)
d38ceaf9
AD
690{
691 u64 size_af, size_bf;
692
770d13b1 693 size_af = adev->gmc.mc_mask - mc->vram_end;
ed21c047 694 size_bf = mc->vram_start;
d38ceaf9 695 if (size_bf > size_af) {
6f02a696 696 if (mc->gart_size > size_bf) {
d38ceaf9 697 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 698 mc->gart_size = size_bf;
d38ceaf9 699 }
6f02a696 700 mc->gart_start = 0;
d38ceaf9 701 } else {
6f02a696 702 if (mc->gart_size > size_af) {
d38ceaf9 703 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 704 mc->gart_size = size_af;
d38ceaf9 705 }
b98f1b9e
CK
706 /* VCE doesn't like it when BOs cross a 4GB segment, so align
707 * the GART base on a 4GB boundary as well.
708 */
709 mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
d38ceaf9 710 }
6f02a696 711 mc->gart_end = mc->gart_start + mc->gart_size - 1;
d38ceaf9 712 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6f02a696 713 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
d38ceaf9
AD
714}
715
d6895ad3
CK
716/**
717 * amdgpu_device_resize_fb_bar - try to resize FB BAR
718 *
719 * @adev: amdgpu_device pointer
720 *
721 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
722 * to fail, but if any of the BARs is not accessible after the size we abort
723 * driver loading by returning -ENODEV.
724 */
725int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
726{
770d13b1 727 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
d6895ad3 728 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
31b8adab
CK
729 struct pci_bus *root;
730 struct resource *res;
731 unsigned i;
d6895ad3
CK
732 u16 cmd;
733 int r;
734
0c03b912 735 /* Bypass for VF */
736 if (amdgpu_sriov_vf(adev))
737 return 0;
738
31b8adab
CK
739 /* Check if the root BUS has 64bit memory resources */
740 root = adev->pdev->bus;
741 while (root->parent)
742 root = root->parent;
743
744 pci_bus_for_each_resource(root, res, i) {
0ebb7c54 745 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
31b8adab
CK
746 res->start > 0x100000000ull)
747 break;
748 }
749
750 /* Trying to resize is pointless without a root hub window above 4GB */
751 if (!res)
752 return 0;
753
d6895ad3
CK
754 /* Disable memory decoding while we change the BAR addresses and size */
755 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
756 pci_write_config_word(adev->pdev, PCI_COMMAND,
757 cmd & ~PCI_COMMAND_MEMORY);
758
759 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
06ec9070 760 amdgpu_device_doorbell_fini(adev);
d6895ad3
CK
761 if (adev->asic_type >= CHIP_BONAIRE)
762 pci_release_resource(adev->pdev, 2);
763
764 pci_release_resource(adev->pdev, 0);
765
766 r = pci_resize_resource(adev->pdev, 0, rbar_size);
767 if (r == -ENOSPC)
768 DRM_INFO("Not enough PCI address space for a large BAR.");
769 else if (r && r != -ENOTSUPP)
770 DRM_ERROR("Problem resizing BAR0 (%d).", r);
771
772 pci_assign_unassigned_bus_resources(adev->pdev->bus);
773
774 /* When the doorbell or fb BAR isn't available we have no chance of
775 * using the device.
776 */
06ec9070 777 r = amdgpu_device_doorbell_init(adev);
d6895ad3
CK
778 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
779 return -ENODEV;
780
781 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
782
783 return 0;
784}
a05502e5 785
d38ceaf9
AD
786/*
787 * GPU helpers function.
788 */
789/**
39c640c0 790 * amdgpu_device_need_post - check if the hw need post or not
d38ceaf9
AD
791 *
792 * @adev: amdgpu_device pointer
793 *
c836fec5
JQ
794 * Check if the asic has been initialized (all asics) at driver startup
795 * or post is needed if hw reset is performed.
796 * Returns true if need or false if not.
d38ceaf9 797 */
39c640c0 798bool amdgpu_device_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
799{
800 uint32_t reg;
801
bec86378
ML
802 if (amdgpu_sriov_vf(adev))
803 return false;
804
805 if (amdgpu_passthrough(adev)) {
1da2c326
ML
806 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
807 * some old smc fw still need driver do vPost otherwise gpu hang, while
808 * those smc fw version above 22.15 doesn't have this flaw, so we force
809 * vpost executed for smc version below 22.15
bec86378
ML
810 */
811 if (adev->asic_type == CHIP_FIJI) {
812 int err;
813 uint32_t fw_ver;
814 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
815 /* force vPost if error occured */
816 if (err)
817 return true;
818
819 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
820 if (fw_ver < 0x00160e00)
821 return true;
bec86378 822 }
bec86378 823 }
91fe77eb 824
825 if (adev->has_hw_reset) {
826 adev->has_hw_reset = false;
827 return true;
828 }
829
830 /* bios scratch used on CIK+ */
831 if (adev->asic_type >= CHIP_BONAIRE)
832 return amdgpu_atombios_scratch_need_asic_init(adev);
833
834 /* check MEM_SIZE for older asics */
835 reg = amdgpu_asic_get_config_memsize(adev);
836
837 if ((reg != 0) && (reg != 0xffffffff))
838 return false;
839
840 return true;
bec86378
ML
841}
842
d38ceaf9
AD
843/* if we get transitioned to only one device, take VGA back */
844/**
06ec9070 845 * amdgpu_device_vga_set_decode - enable/disable vga decode
d38ceaf9
AD
846 *
847 * @cookie: amdgpu_device pointer
848 * @state: enable/disable vga decode
849 *
850 * Enable/disable vga decode (all asics).
851 * Returns VGA resource flags.
852 */
06ec9070 853static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
d38ceaf9
AD
854{
855 struct amdgpu_device *adev = cookie;
856 amdgpu_asic_set_vga_state(adev, state);
857 if (state)
858 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
859 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
860 else
861 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
862}
863
e3ecdffa
AD
864/**
865 * amdgpu_device_check_block_size - validate the vm block size
866 *
867 * @adev: amdgpu_device pointer
868 *
869 * Validates the vm block size specified via module parameter.
870 * The vm block size defines number of bits in page table versus page directory,
871 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
872 * page table and the remaining bits are in the page directory.
873 */
06ec9070 874static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
875{
876 /* defines number of bits in page table versus page directory,
877 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
878 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
879 if (amdgpu_vm_block_size == -1)
880 return;
a1adf8be 881
bab4fee7 882 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
883 dev_warn(adev->dev, "VM page table size (%d) too small\n",
884 amdgpu_vm_block_size);
97489129 885 amdgpu_vm_block_size = -1;
a1adf8be 886 }
a1adf8be
CZ
887}
888
e3ecdffa
AD
889/**
890 * amdgpu_device_check_vm_size - validate the vm size
891 *
892 * @adev: amdgpu_device pointer
893 *
894 * Validates the vm size in GB specified via module parameter.
895 * The VM size is the size of the GPU virtual memory space in GB.
896 */
06ec9070 897static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
83ca145d 898{
64dab074
AD
899 /* no need to check the default value */
900 if (amdgpu_vm_size == -1)
901 return;
902
83ca145d
ZJ
903 if (amdgpu_vm_size < 1) {
904 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
905 amdgpu_vm_size);
f3368128 906 amdgpu_vm_size = -1;
83ca145d 907 }
83ca145d
ZJ
908}
909
d38ceaf9 910/**
06ec9070 911 * amdgpu_device_check_arguments - validate module params
d38ceaf9
AD
912 *
913 * @adev: amdgpu_device pointer
914 *
915 * Validates certain module parameters and updates
916 * the associated values used by the driver (all asics).
917 */
06ec9070 918static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
d38ceaf9 919{
5b011235
CZ
920 if (amdgpu_sched_jobs < 4) {
921 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
922 amdgpu_sched_jobs);
923 amdgpu_sched_jobs = 4;
76117507 924 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
925 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
926 amdgpu_sched_jobs);
927 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
928 }
d38ceaf9 929
83e74db6 930 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
931 /* gart size must be greater or equal to 32M */
932 dev_warn(adev->dev, "gart size (%d) too small\n",
933 amdgpu_gart_size);
83e74db6 934 amdgpu_gart_size = -1;
d38ceaf9
AD
935 }
936
36d38372 937 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 938 /* gtt size must be greater or equal to 32M */
36d38372
CK
939 dev_warn(adev->dev, "gtt size (%d) too small\n",
940 amdgpu_gtt_size);
941 amdgpu_gtt_size = -1;
d38ceaf9
AD
942 }
943
d07f14be
RH
944 /* valid range is between 4 and 9 inclusive */
945 if (amdgpu_vm_fragment_size != -1 &&
946 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
947 dev_warn(adev->dev, "valid range is between 4 and 9\n");
948 amdgpu_vm_fragment_size = -1;
949 }
950
06ec9070 951 amdgpu_device_check_vm_size(adev);
d38ceaf9 952
06ec9070 953 amdgpu_device_check_block_size(adev);
6a7f76e7 954
526bae37 955 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 956 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
957 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
958 amdgpu_vram_page_split);
959 amdgpu_vram_page_split = 1024;
960 }
8854695a
AG
961
962 if (amdgpu_lockup_timeout == 0) {
963 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
964 amdgpu_lockup_timeout = 10000;
965 }
19aede77
AD
966
967 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
d38ceaf9
AD
968}
969
970/**
971 * amdgpu_switcheroo_set_state - set switcheroo state
972 *
973 * @pdev: pci dev pointer
1694467b 974 * @state: vga_switcheroo state
d38ceaf9
AD
975 *
976 * Callback for the switcheroo driver. Suspends or resumes the
977 * the asics before or after it is powered up using ACPI methods.
978 */
979static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
980{
981 struct drm_device *dev = pci_get_drvdata(pdev);
982
983 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
984 return;
985
986 if (state == VGA_SWITCHEROO_ON) {
7ca85295 987 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
988 /* don't suspend or resume card normally */
989 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
990
810ddc3a 991 amdgpu_device_resume(dev, true, true);
d38ceaf9 992
d38ceaf9
AD
993 dev->switch_power_state = DRM_SWITCH_POWER_ON;
994 drm_kms_helper_poll_enable(dev);
995 } else {
7ca85295 996 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
997 drm_kms_helper_poll_disable(dev);
998 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 999 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1000 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1001 }
1002}
1003
1004/**
1005 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1006 *
1007 * @pdev: pci dev pointer
1008 *
1009 * Callback for the switcheroo driver. Check of the switcheroo
1010 * state can be changed.
1011 * Returns true if the state can be changed, false if not.
1012 */
1013static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1014{
1015 struct drm_device *dev = pci_get_drvdata(pdev);
1016
1017 /*
1018 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1019 * locking inversion with the driver load path. And the access here is
1020 * completely racy anyway. So don't bother with locking for now.
1021 */
1022 return dev->open_count == 0;
1023}
1024
1025static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1026 .set_gpu_state = amdgpu_switcheroo_set_state,
1027 .reprobe = NULL,
1028 .can_switch = amdgpu_switcheroo_can_switch,
1029};
1030
e3ecdffa
AD
1031/**
1032 * amdgpu_device_ip_set_clockgating_state - set the CG state
1033 *
1034 * @adev: amdgpu_device pointer
1035 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1036 * @state: clockgating state (gate or ungate)
1037 *
1038 * Sets the requested clockgating state for all instances of
1039 * the hardware IP specified.
1040 * Returns the error code from the last instance.
1041 */
2990a1fc
AD
1042int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
1043 enum amd_ip_block_type block_type,
1044 enum amd_clockgating_state state)
d38ceaf9
AD
1045{
1046 int i, r = 0;
1047
1048 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1049 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1050 continue;
c722865a
RZ
1051 if (adev->ip_blocks[i].version->type != block_type)
1052 continue;
1053 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1054 continue;
1055 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1056 (void *)adev, state);
1057 if (r)
1058 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1059 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1060 }
1061 return r;
1062}
1063
e3ecdffa
AD
1064/**
1065 * amdgpu_device_ip_set_powergating_state - set the PG state
1066 *
1067 * @adev: amdgpu_device pointer
1068 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1069 * @state: powergating state (gate or ungate)
1070 *
1071 * Sets the requested powergating state for all instances of
1072 * the hardware IP specified.
1073 * Returns the error code from the last instance.
1074 */
2990a1fc
AD
1075int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
1076 enum amd_ip_block_type block_type,
1077 enum amd_powergating_state state)
d38ceaf9
AD
1078{
1079 int i, r = 0;
1080
1081 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1082 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1083 continue;
c722865a
RZ
1084 if (adev->ip_blocks[i].version->type != block_type)
1085 continue;
1086 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1087 continue;
1088 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1089 (void *)adev, state);
1090 if (r)
1091 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1092 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1093 }
1094 return r;
1095}
1096
e3ecdffa
AD
1097/**
1098 * amdgpu_device_ip_get_clockgating_state - get the CG state
1099 *
1100 * @adev: amdgpu_device pointer
1101 * @flags: clockgating feature flags
1102 *
1103 * Walks the list of IPs on the device and updates the clockgating
1104 * flags for each IP.
1105 * Updates @flags with the feature flags for each hardware IP where
1106 * clockgating is enabled.
1107 */
2990a1fc
AD
1108void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1109 u32 *flags)
6cb2d4e4
HR
1110{
1111 int i;
1112
1113 for (i = 0; i < adev->num_ip_blocks; i++) {
1114 if (!adev->ip_blocks[i].status.valid)
1115 continue;
1116 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1117 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1118 }
1119}
1120
e3ecdffa
AD
1121/**
1122 * amdgpu_device_ip_wait_for_idle - wait for idle
1123 *
1124 * @adev: amdgpu_device pointer
1125 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1126 *
1127 * Waits for the request hardware IP to be idle.
1128 * Returns 0 for success or a negative error code on failure.
1129 */
2990a1fc
AD
1130int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1131 enum amd_ip_block_type block_type)
5dbbb60b
AD
1132{
1133 int i, r;
1134
1135 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1136 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1137 continue;
a1255107
AD
1138 if (adev->ip_blocks[i].version->type == block_type) {
1139 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1140 if (r)
1141 return r;
1142 break;
1143 }
1144 }
1145 return 0;
1146
1147}
1148
e3ecdffa
AD
1149/**
1150 * amdgpu_device_ip_is_idle - is the hardware IP idle
1151 *
1152 * @adev: amdgpu_device pointer
1153 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1154 *
1155 * Check if the hardware IP is idle or not.
1156 * Returns true if it the IP is idle, false if not.
1157 */
2990a1fc
AD
1158bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1159 enum amd_ip_block_type block_type)
5dbbb60b
AD
1160{
1161 int i;
1162
1163 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1164 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1165 continue;
a1255107
AD
1166 if (adev->ip_blocks[i].version->type == block_type)
1167 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1168 }
1169 return true;
1170
1171}
1172
e3ecdffa
AD
1173/**
1174 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1175 *
1176 * @adev: amdgpu_device pointer
1177 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1178 *
1179 * Returns a pointer to the hardware IP block structure
1180 * if it exists for the asic, otherwise NULL.
1181 */
2990a1fc
AD
1182struct amdgpu_ip_block *
1183amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1184 enum amd_ip_block_type type)
d38ceaf9
AD
1185{
1186 int i;
1187
1188 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1189 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1190 return &adev->ip_blocks[i];
1191
1192 return NULL;
1193}
1194
1195/**
2990a1fc 1196 * amdgpu_device_ip_block_version_cmp
d38ceaf9
AD
1197 *
1198 * @adev: amdgpu_device pointer
5fc3aeeb 1199 * @type: enum amd_ip_block_type
d38ceaf9
AD
1200 * @major: major version
1201 * @minor: minor version
1202 *
1203 * return 0 if equal or greater
1204 * return 1 if smaller or the ip_block doesn't exist
1205 */
2990a1fc
AD
1206int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1207 enum amd_ip_block_type type,
1208 u32 major, u32 minor)
d38ceaf9 1209{
2990a1fc 1210 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
d38ceaf9 1211
a1255107
AD
1212 if (ip_block && ((ip_block->version->major > major) ||
1213 ((ip_block->version->major == major) &&
1214 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1215 return 0;
1216
1217 return 1;
1218}
1219
a1255107 1220/**
2990a1fc 1221 * amdgpu_device_ip_block_add
a1255107
AD
1222 *
1223 * @adev: amdgpu_device pointer
1224 * @ip_block_version: pointer to the IP to add
1225 *
1226 * Adds the IP block driver information to the collection of IPs
1227 * on the asic.
1228 */
2990a1fc
AD
1229int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1230 const struct amdgpu_ip_block_version *ip_block_version)
a1255107
AD
1231{
1232 if (!ip_block_version)
1233 return -EINVAL;
1234
e966a725 1235 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
a0bae357
HR
1236 ip_block_version->funcs->name);
1237
a1255107
AD
1238 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1239
1240 return 0;
1241}
1242
e3ecdffa
AD
1243/**
1244 * amdgpu_device_enable_virtual_display - enable virtual display feature
1245 *
1246 * @adev: amdgpu_device pointer
1247 *
1248 * Enabled the virtual display feature if the user has enabled it via
1249 * the module parameter virtual_display. This feature provides a virtual
1250 * display hardware on headless boards or in virtualized environments.
1251 * This function parses and validates the configuration string specified by
1252 * the user and configues the virtual display configuration (number of
1253 * virtual connectors, crtcs, etc.) specified.
1254 */
483ef985 1255static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1256{
1257 adev->enable_virtual_display = false;
1258
1259 if (amdgpu_virtual_display) {
1260 struct drm_device *ddev = adev->ddev;
1261 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1262 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1263
1264 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1265 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1266 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1267 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1268 if (!strcmp("all", pciaddname)
1269 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1270 long num_crtc;
1271 int res = -1;
1272
9accf2fd 1273 adev->enable_virtual_display = true;
0f66356d
ED
1274
1275 if (pciaddname_tmp)
1276 res = kstrtol(pciaddname_tmp, 10,
1277 &num_crtc);
1278
1279 if (!res) {
1280 if (num_crtc < 1)
1281 num_crtc = 1;
1282 if (num_crtc > 6)
1283 num_crtc = 6;
1284 adev->mode_info.num_crtc = num_crtc;
1285 } else {
1286 adev->mode_info.num_crtc = 1;
1287 }
9accf2fd
ED
1288 break;
1289 }
1290 }
1291
0f66356d
ED
1292 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1293 amdgpu_virtual_display, pci_address_name,
1294 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1295
1296 kfree(pciaddstr);
1297 }
1298}
1299
e3ecdffa
AD
1300/**
1301 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1302 *
1303 * @adev: amdgpu_device pointer
1304 *
1305 * Parses the asic configuration parameters specified in the gpu info
1306 * firmware and makes them availale to the driver for use in configuring
1307 * the asic.
1308 * Returns 0 on success, -EINVAL on failure.
1309 */
e2a75f88
AD
1310static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1311{
e2a75f88
AD
1312 const char *chip_name;
1313 char fw_name[30];
1314 int err;
1315 const struct gpu_info_firmware_header_v1_0 *hdr;
1316
ab4fe3e1
HR
1317 adev->firmware.gpu_info_fw = NULL;
1318
e2a75f88
AD
1319 switch (adev->asic_type) {
1320 case CHIP_TOPAZ:
1321 case CHIP_TONGA:
1322 case CHIP_FIJI:
1323 case CHIP_POLARIS11:
1324 case CHIP_POLARIS10:
1325 case CHIP_POLARIS12:
1326 case CHIP_CARRIZO:
1327 case CHIP_STONEY:
1328#ifdef CONFIG_DRM_AMDGPU_SI
1329 case CHIP_VERDE:
1330 case CHIP_TAHITI:
1331 case CHIP_PITCAIRN:
1332 case CHIP_OLAND:
1333 case CHIP_HAINAN:
1334#endif
1335#ifdef CONFIG_DRM_AMDGPU_CIK
1336 case CHIP_BONAIRE:
1337 case CHIP_HAWAII:
1338 case CHIP_KAVERI:
1339 case CHIP_KABINI:
1340 case CHIP_MULLINS:
1341#endif
1342 default:
1343 return 0;
1344 case CHIP_VEGA10:
1345 chip_name = "vega10";
1346 break;
3f76dced
AD
1347 case CHIP_VEGA12:
1348 chip_name = "vega12";
1349 break;
2d2e5e7e
AD
1350 case CHIP_RAVEN:
1351 chip_name = "raven";
1352 break;
e2a75f88
AD
1353 }
1354
1355 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1356 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1357 if (err) {
1358 dev_err(adev->dev,
1359 "Failed to load gpu_info firmware \"%s\"\n",
1360 fw_name);
1361 goto out;
1362 }
ab4fe3e1 1363 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1364 if (err) {
1365 dev_err(adev->dev,
1366 "Failed to validate gpu_info firmware \"%s\"\n",
1367 fw_name);
1368 goto out;
1369 }
1370
ab4fe3e1 1371 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1372 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1373
1374 switch (hdr->version_major) {
1375 case 1:
1376 {
1377 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1378 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1379 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1380
b5ab16bf
AD
1381 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1382 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1383 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1384 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1385 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1386 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1387 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1388 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1389 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1390 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1391 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1392 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1393 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1394 adev->gfx.cu_info.max_waves_per_simd =
1395 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1396 adev->gfx.cu_info.max_scratch_slots_per_cu =
1397 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1398 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1399 break;
1400 }
1401 default:
1402 dev_err(adev->dev,
1403 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1404 err = -EINVAL;
1405 goto out;
1406 }
1407out:
e2a75f88
AD
1408 return err;
1409}
1410
e3ecdffa
AD
1411/**
1412 * amdgpu_device_ip_early_init - run early init for hardware IPs
1413 *
1414 * @adev: amdgpu_device pointer
1415 *
1416 * Early initialization pass for hardware IPs. The hardware IPs that make
1417 * up each asic are discovered each IP's early_init callback is run. This
1418 * is the first stage in initializing the asic.
1419 * Returns 0 on success, negative error code on failure.
1420 */
06ec9070 1421static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
d38ceaf9 1422{
aaa36a97 1423 int i, r;
d38ceaf9 1424
483ef985 1425 amdgpu_device_enable_virtual_display(adev);
a6be7570 1426
d38ceaf9 1427 switch (adev->asic_type) {
aaa36a97
AD
1428 case CHIP_TOPAZ:
1429 case CHIP_TONGA:
48299f95 1430 case CHIP_FIJI:
2cc0c0b5
FC
1431 case CHIP_POLARIS11:
1432 case CHIP_POLARIS10:
c4642a47 1433 case CHIP_POLARIS12:
aaa36a97 1434 case CHIP_CARRIZO:
39bb0c92
SL
1435 case CHIP_STONEY:
1436 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1437 adev->family = AMDGPU_FAMILY_CZ;
1438 else
1439 adev->family = AMDGPU_FAMILY_VI;
1440
1441 r = vi_set_ip_blocks(adev);
1442 if (r)
1443 return r;
1444 break;
33f34802
KW
1445#ifdef CONFIG_DRM_AMDGPU_SI
1446 case CHIP_VERDE:
1447 case CHIP_TAHITI:
1448 case CHIP_PITCAIRN:
1449 case CHIP_OLAND:
1450 case CHIP_HAINAN:
295d0daf 1451 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1452 r = si_set_ip_blocks(adev);
1453 if (r)
1454 return r;
1455 break;
1456#endif
a2e73f56
AD
1457#ifdef CONFIG_DRM_AMDGPU_CIK
1458 case CHIP_BONAIRE:
1459 case CHIP_HAWAII:
1460 case CHIP_KAVERI:
1461 case CHIP_KABINI:
1462 case CHIP_MULLINS:
1463 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1464 adev->family = AMDGPU_FAMILY_CI;
1465 else
1466 adev->family = AMDGPU_FAMILY_KV;
1467
1468 r = cik_set_ip_blocks(adev);
1469 if (r)
1470 return r;
1471 break;
1472#endif
e48a3cd9
AD
1473 case CHIP_VEGA10:
1474 case CHIP_VEGA12:
1475 case CHIP_RAVEN:
2ca8a5d2
CZ
1476 if (adev->asic_type == CHIP_RAVEN)
1477 adev->family = AMDGPU_FAMILY_RV;
1478 else
1479 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1480
1481 r = soc15_set_ip_blocks(adev);
1482 if (r)
1483 return r;
1484 break;
d38ceaf9
AD
1485 default:
1486 /* FIXME: not supported yet */
1487 return -EINVAL;
1488 }
1489
e2a75f88
AD
1490 r = amdgpu_device_parse_gpu_info_fw(adev);
1491 if (r)
1492 return r;
1493
1884734a 1494 amdgpu_amdkfd_device_probe(adev);
1495
3149d9da
XY
1496 if (amdgpu_sriov_vf(adev)) {
1497 r = amdgpu_virt_request_full_gpu(adev, true);
1498 if (r)
5ffa61c1 1499 return -EAGAIN;
3149d9da
XY
1500 }
1501
d38ceaf9
AD
1502 for (i = 0; i < adev->num_ip_blocks; i++) {
1503 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1504 DRM_ERROR("disabled ip block: %d <%s>\n",
1505 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1506 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1507 } else {
a1255107
AD
1508 if (adev->ip_blocks[i].version->funcs->early_init) {
1509 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1510 if (r == -ENOENT) {
a1255107 1511 adev->ip_blocks[i].status.valid = false;
2c1a2784 1512 } else if (r) {
a1255107
AD
1513 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1514 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1515 return r;
2c1a2784 1516 } else {
a1255107 1517 adev->ip_blocks[i].status.valid = true;
2c1a2784 1518 }
974e6b64 1519 } else {
a1255107 1520 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1521 }
d38ceaf9
AD
1522 }
1523 }
1524
395d1fb9
NH
1525 adev->cg_flags &= amdgpu_cg_mask;
1526 adev->pg_flags &= amdgpu_pg_mask;
1527
d38ceaf9
AD
1528 return 0;
1529}
1530
e3ecdffa
AD
1531/**
1532 * amdgpu_device_ip_init - run init for hardware IPs
1533 *
1534 * @adev: amdgpu_device pointer
1535 *
1536 * Main initialization pass for hardware IPs. The list of all the hardware
1537 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1538 * are run. sw_init initializes the software state associated with each IP
1539 * and hw_init initializes the hardware associated with each IP.
1540 * Returns 0 on success, negative error code on failure.
1541 */
06ec9070 1542static int amdgpu_device_ip_init(struct amdgpu_device *adev)
d38ceaf9
AD
1543{
1544 int i, r;
1545
1546 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1547 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1548 continue;
a1255107 1549 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1550 if (r) {
a1255107
AD
1551 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1552 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1553 return r;
2c1a2784 1554 }
a1255107 1555 adev->ip_blocks[i].status.sw = true;
bfca0289 1556
d38ceaf9 1557 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1558 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
06ec9070 1559 r = amdgpu_device_vram_scratch_init(adev);
2c1a2784
AD
1560 if (r) {
1561 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1562 return r;
2c1a2784 1563 }
a1255107 1564 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1565 if (r) {
1566 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1567 return r;
2c1a2784 1568 }
06ec9070 1569 r = amdgpu_device_wb_init(adev);
2c1a2784 1570 if (r) {
06ec9070 1571 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
d38ceaf9 1572 return r;
2c1a2784 1573 }
a1255107 1574 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1575
1576 /* right after GMC hw init, we create CSA */
1577 if (amdgpu_sriov_vf(adev)) {
1578 r = amdgpu_allocate_static_csa(adev);
1579 if (r) {
1580 DRM_ERROR("allocate CSA failed %d\n", r);
1581 return r;
1582 }
1583 }
d38ceaf9
AD
1584 }
1585 }
1586
1587 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1588 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1589 continue;
bfca0289 1590 if (adev->ip_blocks[i].status.hw)
d38ceaf9 1591 continue;
a1255107 1592 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1593 if (r) {
a1255107
AD
1594 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1595 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1596 return r;
2c1a2784 1597 }
a1255107 1598 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1599 }
1600
1884734a 1601 amdgpu_amdkfd_device_init(adev);
c6332b97 1602
1603 if (amdgpu_sriov_vf(adev))
1604 amdgpu_virt_release_full_gpu(adev, true);
1605
d38ceaf9
AD
1606 return 0;
1607}
1608
e3ecdffa
AD
1609/**
1610 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1611 *
1612 * @adev: amdgpu_device pointer
1613 *
1614 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
1615 * this function before a GPU reset. If the value is retained after a
1616 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
1617 */
06ec9070 1618static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
0c49e0b8
CZ
1619{
1620 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1621}
1622
e3ecdffa
AD
1623/**
1624 * amdgpu_device_check_vram_lost - check if vram is valid
1625 *
1626 * @adev: amdgpu_device pointer
1627 *
1628 * Checks the reset magic value written to the gart pointer in VRAM.
1629 * The driver calls this after a GPU reset to see if the contents of
1630 * VRAM is lost or now.
1631 * returns true if vram is lost, false if not.
1632 */
06ec9070 1633static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
0c49e0b8
CZ
1634{
1635 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1636 AMDGPU_RESET_MAGIC_NUM);
1637}
1638
e3ecdffa
AD
1639/**
1640 * amdgpu_device_ip_late_set_cg_state - late init for clockgating
1641 *
1642 * @adev: amdgpu_device pointer
1643 *
1644 * Late initialization pass enabling clockgating for hardware IPs.
1645 * The list of all the hardware IPs that make up the asic is walked and the
1646 * set_clockgating_state callbacks are run. This stage is run late
1647 * in the init process.
1648 * Returns 0 on success, negative error code on failure.
1649 */
06ec9070 1650static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
d38ceaf9
AD
1651{
1652 int i = 0, r;
1653
4a2ba394
SL
1654 if (amdgpu_emu_mode == 1)
1655 return 0;
1656
d38ceaf9 1657 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1658 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1659 continue;
4a446d55 1660 /* skip CG for VCE/UVD, it's handled specially */
a1255107 1661 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
57716327
RZ
1662 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1663 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
4a446d55 1664 /* enable clockgating to save power */
a1255107
AD
1665 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1666 AMD_CG_STATE_GATE);
4a446d55
AD
1667 if (r) {
1668 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1669 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1670 return r;
1671 }
b0b00ff1 1672 }
d38ceaf9 1673 }
2dc80b00
S
1674 return 0;
1675}
1676
e3ecdffa
AD
1677/**
1678 * amdgpu_device_ip_late_init - run late init for hardware IPs
1679 *
1680 * @adev: amdgpu_device pointer
1681 *
1682 * Late initialization pass for hardware IPs. The list of all the hardware
1683 * IPs that make up the asic is walked and the late_init callbacks are run.
1684 * late_init covers any special initialization that an IP requires
1685 * after all of the have been initialized or something that needs to happen
1686 * late in the init process.
1687 * Returns 0 on success, negative error code on failure.
1688 */
06ec9070 1689static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2dc80b00
S
1690{
1691 int i = 0, r;
1692
1693 for (i = 0; i < adev->num_ip_blocks; i++) {
1694 if (!adev->ip_blocks[i].status.valid)
1695 continue;
1696 if (adev->ip_blocks[i].version->funcs->late_init) {
1697 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1698 if (r) {
1699 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1700 adev->ip_blocks[i].version->funcs->name, r);
1701 return r;
1702 }
1703 adev->ip_blocks[i].status.late_initialized = true;
1704 }
1705 }
1706
1707 mod_delayed_work(system_wq, &adev->late_init_work,
1708 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1709
06ec9070 1710 amdgpu_device_fill_reset_magic(adev);
d38ceaf9
AD
1711
1712 return 0;
1713}
1714
e3ecdffa
AD
1715/**
1716 * amdgpu_device_ip_fini - run fini for hardware IPs
1717 *
1718 * @adev: amdgpu_device pointer
1719 *
1720 * Main teardown pass for hardware IPs. The list of all the hardware
1721 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
1722 * are run. hw_fini tears down the hardware associated with each IP
1723 * and sw_fini tears down any software state associated with each IP.
1724 * Returns 0 on success, negative error code on failure.
1725 */
06ec9070 1726static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
d38ceaf9
AD
1727{
1728 int i, r;
1729
1884734a 1730 amdgpu_amdkfd_device_fini(adev);
3e96dbfd
AD
1731 /* need to disable SMC first */
1732 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1733 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1734 continue;
57716327
RZ
1735 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
1736 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
3e96dbfd 1737 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1738 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1739 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1740 if (r) {
1741 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1742 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1743 return r;
1744 }
a1255107 1745 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1746 /* XXX handle errors */
1747 if (r) {
1748 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1749 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1750 }
a1255107 1751 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1752 break;
1753 }
1754 }
1755
d38ceaf9 1756 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1757 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1758 continue;
8201a67a
RZ
1759
1760 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
81ce8bea
RZ
1761 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1762 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
8201a67a
RZ
1763 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1764 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1765 AMD_CG_STATE_UNGATE);
1766 if (r) {
1767 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1768 adev->ip_blocks[i].version->funcs->name, r);
1769 return r;
1770 }
2c1a2784 1771 }
8201a67a 1772
a1255107 1773 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1774 /* XXX handle errors */
2c1a2784 1775 if (r) {
a1255107
AD
1776 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1777 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1778 }
8201a67a 1779
a1255107 1780 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1781 }
1782
9950cda2 1783
d38ceaf9 1784 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1785 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1786 continue;
c12aba3a
ML
1787
1788 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1789 amdgpu_free_static_csa(adev);
1790 amdgpu_device_wb_fini(adev);
1791 amdgpu_device_vram_scratch_fini(adev);
1792 }
1793
a1255107 1794 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1795 /* XXX handle errors */
2c1a2784 1796 if (r) {
a1255107
AD
1797 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1798 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1799 }
a1255107
AD
1800 adev->ip_blocks[i].status.sw = false;
1801 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1802 }
1803
a6dcfd9c 1804 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1805 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1806 continue;
a1255107
AD
1807 if (adev->ip_blocks[i].version->funcs->late_fini)
1808 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1809 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1810 }
1811
030308fc 1812 if (amdgpu_sriov_vf(adev))
24136135
ML
1813 if (amdgpu_virt_release_full_gpu(adev, false))
1814 DRM_ERROR("failed to release exclusive mode on fini\n");
2493664f 1815
d38ceaf9
AD
1816 return 0;
1817}
1818
e3ecdffa
AD
1819/**
1820 * amdgpu_device_ip_late_init_func_handler - work handler for clockgating
1821 *
1822 * @work: work_struct
1823 *
1824 * Work handler for amdgpu_device_ip_late_set_cg_state. We put the
1825 * clockgating setup into a worker thread to speed up driver init and
1826 * resume from suspend.
1827 */
06ec9070 1828static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
2dc80b00
S
1829{
1830 struct amdgpu_device *adev =
1831 container_of(work, struct amdgpu_device, late_init_work.work);
06ec9070 1832 amdgpu_device_ip_late_set_cg_state(adev);
2dc80b00
S
1833}
1834
e3ecdffa
AD
1835/**
1836 * amdgpu_device_ip_suspend - run suspend for hardware IPs
1837 *
1838 * @adev: amdgpu_device pointer
1839 *
1840 * Main suspend function for hardware IPs. The list of all the hardware
1841 * IPs that make up the asic is walked, clockgating is disabled and the
1842 * suspend callbacks are run. suspend puts the hardware and software state
1843 * in each IP into a state suitable for suspend.
1844 * Returns 0 on success, negative error code on failure.
1845 */
cdd61df6 1846int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1847{
1848 int i, r;
1849
e941ea99
XY
1850 if (amdgpu_sriov_vf(adev))
1851 amdgpu_virt_request_full_gpu(adev, false);
1852
c5a93a28 1853 /* ungate SMC block first */
2990a1fc
AD
1854 r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1855 AMD_CG_STATE_UNGATE);
c5a93a28 1856 if (r) {
2990a1fc 1857 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
c5a93a28
FC
1858 }
1859
d38ceaf9 1860 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1861 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1862 continue;
1863 /* ungate blocks so that suspend can properly shut them down */
5b2a3d2c 1864 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
57716327 1865 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
a1255107
AD
1866 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1867 AMD_CG_STATE_UNGATE);
c5a93a28 1868 if (r) {
a1255107
AD
1869 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1870 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1871 }
2c1a2784 1872 }
d38ceaf9 1873 /* XXX handle errors */
a1255107 1874 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1875 /* XXX handle errors */
2c1a2784 1876 if (r) {
a1255107
AD
1877 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1878 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1879 }
d38ceaf9
AD
1880 }
1881
e941ea99
XY
1882 if (amdgpu_sriov_vf(adev))
1883 amdgpu_virt_release_full_gpu(adev, false);
1884
d38ceaf9
AD
1885 return 0;
1886}
1887
06ec9070 1888static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1889{
1890 int i, r;
1891
2cb681b6
ML
1892 static enum amd_ip_block_type ip_order[] = {
1893 AMD_IP_BLOCK_TYPE_GMC,
1894 AMD_IP_BLOCK_TYPE_COMMON,
2cb681b6
ML
1895 AMD_IP_BLOCK_TYPE_IH,
1896 };
a90ad3c2 1897
2cb681b6
ML
1898 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1899 int j;
1900 struct amdgpu_ip_block *block;
a90ad3c2 1901
2cb681b6
ML
1902 for (j = 0; j < adev->num_ip_blocks; j++) {
1903 block = &adev->ip_blocks[j];
1904
1905 if (block->version->type != ip_order[i] ||
1906 !block->status.valid)
1907 continue;
1908
1909 r = block->version->funcs->hw_init(adev);
1910 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
c41d1cf6
ML
1911 if (r)
1912 return r;
a90ad3c2
ML
1913 }
1914 }
1915
1916 return 0;
1917}
1918
06ec9070 1919static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1920{
1921 int i, r;
1922
2cb681b6
ML
1923 static enum amd_ip_block_type ip_order[] = {
1924 AMD_IP_BLOCK_TYPE_SMC,
ef4c166d 1925 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
1926 AMD_IP_BLOCK_TYPE_DCE,
1927 AMD_IP_BLOCK_TYPE_GFX,
1928 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
1929 AMD_IP_BLOCK_TYPE_UVD,
1930 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 1931 };
a90ad3c2 1932
2cb681b6
ML
1933 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1934 int j;
1935 struct amdgpu_ip_block *block;
a90ad3c2 1936
2cb681b6
ML
1937 for (j = 0; j < adev->num_ip_blocks; j++) {
1938 block = &adev->ip_blocks[j];
1939
1940 if (block->version->type != ip_order[i] ||
1941 !block->status.valid)
1942 continue;
1943
1944 r = block->version->funcs->hw_init(adev);
1945 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
c41d1cf6
ML
1946 if (r)
1947 return r;
a90ad3c2
ML
1948 }
1949 }
1950
1951 return 0;
1952}
1953
e3ecdffa
AD
1954/**
1955 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
1956 *
1957 * @adev: amdgpu_device pointer
1958 *
1959 * First resume function for hardware IPs. The list of all the hardware
1960 * IPs that make up the asic is walked and the resume callbacks are run for
1961 * COMMON, GMC, and IH. resume puts the hardware into a functional state
1962 * after a suspend and updates the software state as necessary. This
1963 * function is also used for restoring the GPU after a GPU reset.
1964 * Returns 0 on success, negative error code on failure.
1965 */
06ec9070 1966static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
1967{
1968 int i, r;
1969
a90ad3c2
ML
1970 for (i = 0; i < adev->num_ip_blocks; i++) {
1971 if (!adev->ip_blocks[i].status.valid)
1972 continue;
a90ad3c2 1973 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa
AD
1974 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1975 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
fcf0649f
CZ
1976 r = adev->ip_blocks[i].version->funcs->resume(adev);
1977 if (r) {
1978 DRM_ERROR("resume of IP block <%s> failed %d\n",
1979 adev->ip_blocks[i].version->funcs->name, r);
1980 return r;
1981 }
a90ad3c2
ML
1982 }
1983 }
1984
1985 return 0;
1986}
1987
e3ecdffa
AD
1988/**
1989 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
1990 *
1991 * @adev: amdgpu_device pointer
1992 *
1993 * First resume function for hardware IPs. The list of all the hardware
1994 * IPs that make up the asic is walked and the resume callbacks are run for
1995 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
1996 * functional state after a suspend and updates the software state as
1997 * necessary. This function is also used for restoring the GPU after a GPU
1998 * reset.
1999 * Returns 0 on success, negative error code on failure.
2000 */
06ec9070 2001static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2002{
2003 int i, r;
2004
2005 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2006 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2007 continue;
fcf0649f 2008 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
e3ecdffa
AD
2009 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2010 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
fcf0649f 2011 continue;
a1255107 2012 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 2013 if (r) {
a1255107
AD
2014 DRM_ERROR("resume of IP block <%s> failed %d\n",
2015 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 2016 return r;
2c1a2784 2017 }
d38ceaf9
AD
2018 }
2019
2020 return 0;
2021}
2022
e3ecdffa
AD
2023/**
2024 * amdgpu_device_ip_resume - run resume for hardware IPs
2025 *
2026 * @adev: amdgpu_device pointer
2027 *
2028 * Main resume function for hardware IPs. The hardware IPs
2029 * are split into two resume functions because they are
2030 * are also used in in recovering from a GPU reset and some additional
2031 * steps need to be take between them. In this case (S3/S4) they are
2032 * run sequentially.
2033 * Returns 0 on success, negative error code on failure.
2034 */
06ec9070 2035static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
fcf0649f
CZ
2036{
2037 int r;
2038
06ec9070 2039 r = amdgpu_device_ip_resume_phase1(adev);
fcf0649f
CZ
2040 if (r)
2041 return r;
06ec9070 2042 r = amdgpu_device_ip_resume_phase2(adev);
fcf0649f
CZ
2043
2044 return r;
2045}
2046
e3ecdffa
AD
2047/**
2048 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2049 *
2050 * @adev: amdgpu_device pointer
2051 *
2052 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2053 */
4e99a44e 2054static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 2055{
6867e1b5
ML
2056 if (amdgpu_sriov_vf(adev)) {
2057 if (adev->is_atom_fw) {
2058 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2059 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2060 } else {
2061 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2062 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2063 }
2064
2065 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2066 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 2067 }
048765ad
AR
2068}
2069
e3ecdffa
AD
2070/**
2071 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2072 *
2073 * @asic_type: AMD asic type
2074 *
2075 * Check if there is DC (new modesetting infrastructre) support for an asic.
2076 * returns true if DC has support, false if not.
2077 */
4562236b
HW
2078bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2079{
2080 switch (asic_type) {
2081#if defined(CONFIG_DRM_AMD_DC)
2082 case CHIP_BONAIRE:
2083 case CHIP_HAWAII:
0d6fbccb 2084 case CHIP_KAVERI:
367e6687
AD
2085 case CHIP_KABINI:
2086 case CHIP_MULLINS:
4562236b
HW
2087 case CHIP_CARRIZO:
2088 case CHIP_STONEY:
2089 case CHIP_POLARIS11:
2090 case CHIP_POLARIS10:
2c8ad2d5 2091 case CHIP_POLARIS12:
4562236b
HW
2092 case CHIP_TONGA:
2093 case CHIP_FIJI:
2094#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
2095 return amdgpu_dc != 0;
4562236b 2096#endif
42f8ffa1 2097 case CHIP_VEGA10:
dca7b401 2098 case CHIP_VEGA12:
42f8ffa1 2099#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 2100 case CHIP_RAVEN:
42f8ffa1 2101#endif
fd187853 2102 return amdgpu_dc != 0;
4562236b
HW
2103#endif
2104 default:
2105 return false;
2106 }
2107}
2108
2109/**
2110 * amdgpu_device_has_dc_support - check if dc is supported
2111 *
2112 * @adev: amdgpu_device_pointer
2113 *
2114 * Returns true for supported, false for not supported
2115 */
2116bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2117{
2555039d
XY
2118 if (amdgpu_sriov_vf(adev))
2119 return false;
2120
4562236b
HW
2121 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2122}
2123
d38ceaf9
AD
2124/**
2125 * amdgpu_device_init - initialize the driver
2126 *
2127 * @adev: amdgpu_device pointer
2128 * @pdev: drm dev pointer
2129 * @pdev: pci dev pointer
2130 * @flags: driver flags
2131 *
2132 * Initializes the driver info and hw (all asics).
2133 * Returns 0 for success or an error on failure.
2134 * Called at driver startup.
2135 */
2136int amdgpu_device_init(struct amdgpu_device *adev,
2137 struct drm_device *ddev,
2138 struct pci_dev *pdev,
2139 uint32_t flags)
2140{
2141 int r, i;
2142 bool runtime = false;
95844d20 2143 u32 max_MBps;
d38ceaf9
AD
2144
2145 adev->shutdown = false;
2146 adev->dev = &pdev->dev;
2147 adev->ddev = ddev;
2148 adev->pdev = pdev;
2149 adev->flags = flags;
2f7d10b3 2150 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 2151 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
593aa2d2
SL
2152 if (amdgpu_emu_mode == 1)
2153 adev->usec_timeout *= 2;
770d13b1 2154 adev->gmc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
2155 adev->accel_working = false;
2156 adev->num_rings = 0;
2157 adev->mman.buffer_funcs = NULL;
2158 adev->mman.buffer_funcs_ring = NULL;
2159 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 2160 adev->vm_manager.vm_pte_num_rings = 0;
132f34e4 2161 adev->gmc.gmc_funcs = NULL;
f54d1867 2162 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 2163 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
2164
2165 adev->smc_rreg = &amdgpu_invalid_rreg;
2166 adev->smc_wreg = &amdgpu_invalid_wreg;
2167 adev->pcie_rreg = &amdgpu_invalid_rreg;
2168 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
2169 adev->pciep_rreg = &amdgpu_invalid_rreg;
2170 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2171 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2172 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2173 adev->didt_rreg = &amdgpu_invalid_rreg;
2174 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
2175 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2176 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2177 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2178 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2179
3e39ab90
AD
2180 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2181 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2182 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
2183
2184 /* mutex initialization are all done here so we
2185 * can recall function without having locking issues */
d38ceaf9 2186 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 2187 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
2188 mutex_init(&adev->pm.mutex);
2189 mutex_init(&adev->gfx.gpu_clock_mutex);
2190 mutex_init(&adev->srbm_mutex);
b8866c26 2191 mutex_init(&adev->gfx.pipe_reserve_mutex);
d38ceaf9 2192 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 2193 mutex_init(&adev->mn_lock);
e23b74aa 2194 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 2195 hash_init(adev->mn_hash);
13a752e3 2196 mutex_init(&adev->lock_reset);
d38ceaf9 2197
06ec9070 2198 amdgpu_device_check_arguments(adev);
d38ceaf9 2199
d38ceaf9
AD
2200 spin_lock_init(&adev->mmio_idx_lock);
2201 spin_lock_init(&adev->smc_idx_lock);
2202 spin_lock_init(&adev->pcie_idx_lock);
2203 spin_lock_init(&adev->uvd_ctx_idx_lock);
2204 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 2205 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 2206 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 2207 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 2208 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 2209
0c4e7fa5
CZ
2210 INIT_LIST_HEAD(&adev->shadow_list);
2211 mutex_init(&adev->shadow_list_lock);
2212
795f2813
AR
2213 INIT_LIST_HEAD(&adev->ring_lru_list);
2214 spin_lock_init(&adev->ring_lru_list_lock);
2215
06ec9070
AD
2216 INIT_DELAYED_WORK(&adev->late_init_work,
2217 amdgpu_device_ip_late_init_func_handler);
2dc80b00 2218
0fa49558
AX
2219 /* Registers mapping */
2220 /* TODO: block userspace mapping of io register */
da69c161
KW
2221 if (adev->asic_type >= CHIP_BONAIRE) {
2222 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2223 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2224 } else {
2225 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2226 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2227 }
d38ceaf9 2228
d38ceaf9
AD
2229 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2230 if (adev->rmmio == NULL) {
2231 return -ENOMEM;
2232 }
2233 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2234 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2235
705e519e 2236 /* doorbell bar mapping */
06ec9070 2237 amdgpu_device_doorbell_init(adev);
d38ceaf9
AD
2238
2239 /* io port mapping */
2240 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2241 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2242 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2243 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2244 break;
2245 }
2246 }
2247 if (adev->rio_mem == NULL)
b64a18c5 2248 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9 2249
5494d864
AD
2250 amdgpu_device_get_pcie_info(adev);
2251
d38ceaf9 2252 /* early init functions */
06ec9070 2253 r = amdgpu_device_ip_early_init(adev);
d38ceaf9
AD
2254 if (r)
2255 return r;
2256
2257 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2258 /* this will fail for cards that aren't VGA class devices, just
2259 * ignore it */
06ec9070 2260 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
d38ceaf9 2261
e9bef455 2262 if (amdgpu_device_is_px(ddev))
d38ceaf9 2263 runtime = true;
84c8b22e
LW
2264 if (!pci_is_thunderbolt_attached(adev->pdev))
2265 vga_switcheroo_register_client(adev->pdev,
2266 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
2267 if (runtime)
2268 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2269
9475a943
SL
2270 if (amdgpu_emu_mode == 1) {
2271 /* post the asic on emulation mode */
2272 emu_soc_asic_init(adev);
bfca0289 2273 goto fence_driver_init;
9475a943 2274 }
bfca0289 2275
d38ceaf9 2276 /* Read BIOS */
83ba126a
AD
2277 if (!amdgpu_get_bios(adev)) {
2278 r = -EINVAL;
2279 goto failed;
2280 }
f7e9e9fe 2281
d38ceaf9 2282 r = amdgpu_atombios_init(adev);
2c1a2784
AD
2283 if (r) {
2284 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 2285 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 2286 goto failed;
2c1a2784 2287 }
d38ceaf9 2288
4e99a44e
ML
2289 /* detect if we are with an SRIOV vbios */
2290 amdgpu_device_detect_sriov_bios(adev);
048765ad 2291
d38ceaf9 2292 /* Post card if necessary */
39c640c0 2293 if (amdgpu_device_need_post(adev)) {
d38ceaf9 2294 if (!adev->bios) {
bec86378 2295 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
2296 r = -EINVAL;
2297 goto failed;
d38ceaf9 2298 }
bec86378 2299 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
2300 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2301 if (r) {
2302 dev_err(adev->dev, "gpu post error!\n");
2303 goto failed;
2304 }
d38ceaf9
AD
2305 }
2306
88b64e95
AD
2307 if (adev->is_atom_fw) {
2308 /* Initialize clocks */
2309 r = amdgpu_atomfirmware_get_clock_info(adev);
2310 if (r) {
2311 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 2312 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
2313 goto failed;
2314 }
2315 } else {
a5bde2f9
AD
2316 /* Initialize clocks */
2317 r = amdgpu_atombios_get_clock_info(adev);
2318 if (r) {
2319 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 2320 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 2321 goto failed;
a5bde2f9
AD
2322 }
2323 /* init i2c buses */
4562236b
HW
2324 if (!amdgpu_device_has_dc_support(adev))
2325 amdgpu_atombios_i2c_init(adev);
2c1a2784 2326 }
d38ceaf9 2327
bfca0289 2328fence_driver_init:
d38ceaf9
AD
2329 /* Fence driver */
2330 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
2331 if (r) {
2332 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 2333 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 2334 goto failed;
2c1a2784 2335 }
d38ceaf9
AD
2336
2337 /* init the mode config */
2338 drm_mode_config_init(adev->ddev);
2339
06ec9070 2340 r = amdgpu_device_ip_init(adev);
d38ceaf9 2341 if (r) {
8840a387 2342 /* failed in exclusive mode due to timeout */
2343 if (amdgpu_sriov_vf(adev) &&
2344 !amdgpu_sriov_runtime(adev) &&
2345 amdgpu_virt_mmio_blocked(adev) &&
2346 !amdgpu_virt_wait_reset(adev)) {
2347 dev_err(adev->dev, "VF exclusive mode timeout\n");
1daee8b4
PD
2348 /* Don't send request since VF is inactive. */
2349 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2350 adev->virt.ops = NULL;
8840a387 2351 r = -EAGAIN;
2352 goto failed;
2353 }
06ec9070 2354 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
e23b74aa 2355 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
83ba126a 2356 goto failed;
d38ceaf9
AD
2357 }
2358
2359 adev->accel_working = true;
2360
e59c0205
AX
2361 amdgpu_vm_check_compute_bug(adev);
2362
95844d20
MO
2363 /* Initialize the buffer migration limit. */
2364 if (amdgpu_moverate >= 0)
2365 max_MBps = amdgpu_moverate;
2366 else
2367 max_MBps = 8; /* Allow 8 MB/s. */
2368 /* Get a log2 for easy divisions. */
2369 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2370
d38ceaf9
AD
2371 r = amdgpu_ib_pool_init(adev);
2372 if (r) {
2373 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 2374 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 2375 goto failed;
d38ceaf9
AD
2376 }
2377
2378 r = amdgpu_ib_ring_tests(adev);
2379 if (r)
2380 DRM_ERROR("ib ring test failed (%d).\n", r);
2381
2dc8f81e
HC
2382 if (amdgpu_sriov_vf(adev))
2383 amdgpu_virt_init_data_exchange(adev);
2384
9bc92b9c
ML
2385 amdgpu_fbdev_init(adev);
2386
d2f52ac8
RZ
2387 r = amdgpu_pm_sysfs_init(adev);
2388 if (r)
2389 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2390
75758255 2391 r = amdgpu_debugfs_gem_init(adev);
3f14e623 2392 if (r)
d38ceaf9 2393 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2394
2395 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2396 if (r)
d38ceaf9 2397 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2398
50ab2533 2399 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2400 if (r)
50ab2533 2401 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2402
763efb6c 2403 r = amdgpu_debugfs_init(adev);
db95e218 2404 if (r)
763efb6c 2405 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
db95e218 2406
d38ceaf9
AD
2407 if ((amdgpu_testing & 1)) {
2408 if (adev->accel_working)
2409 amdgpu_test_moves(adev);
2410 else
2411 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2412 }
d38ceaf9
AD
2413 if (amdgpu_benchmarking) {
2414 if (adev->accel_working)
2415 amdgpu_benchmark(adev, amdgpu_benchmarking);
2416 else
2417 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2418 }
2419
2420 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2421 * explicit gating rather than handling it automatically.
2422 */
06ec9070 2423 r = amdgpu_device_ip_late_init(adev);
2c1a2784 2424 if (r) {
06ec9070 2425 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
e23b74aa 2426 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2427 goto failed;
2c1a2784 2428 }
d38ceaf9
AD
2429
2430 return 0;
83ba126a
AD
2431
2432failed:
89041940 2433 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2434 if (runtime)
2435 vga_switcheroo_fini_domain_pm_ops(adev->dev);
8840a387 2436
83ba126a 2437 return r;
d38ceaf9
AD
2438}
2439
d38ceaf9
AD
2440/**
2441 * amdgpu_device_fini - tear down the driver
2442 *
2443 * @adev: amdgpu_device pointer
2444 *
2445 * Tear down the driver info (all asics).
2446 * Called at driver shutdown.
2447 */
2448void amdgpu_device_fini(struct amdgpu_device *adev)
2449{
2450 int r;
2451
2452 DRM_INFO("amdgpu: finishing device.\n");
2453 adev->shutdown = true;
e5b03032
ML
2454 /* disable all interrupts */
2455 amdgpu_irq_disable_all(adev);
ff97cba8
ML
2456 if (adev->mode_info.mode_config_initialized){
2457 if (!amdgpu_device_has_dc_support(adev))
2458 drm_crtc_force_disable_all(adev->ddev);
2459 else
2460 drm_atomic_helper_shutdown(adev->ddev);
2461 }
d38ceaf9
AD
2462 amdgpu_ib_pool_fini(adev);
2463 amdgpu_fence_driver_fini(adev);
58e955d9 2464 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2465 amdgpu_fbdev_fini(adev);
06ec9070 2466 r = amdgpu_device_ip_fini(adev);
ab4fe3e1
HR
2467 if (adev->firmware.gpu_info_fw) {
2468 release_firmware(adev->firmware.gpu_info_fw);
2469 adev->firmware.gpu_info_fw = NULL;
2470 }
d38ceaf9 2471 adev->accel_working = false;
2dc80b00 2472 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2473 /* free i2c buses */
4562236b
HW
2474 if (!amdgpu_device_has_dc_support(adev))
2475 amdgpu_i2c_fini(adev);
bfca0289
SL
2476
2477 if (amdgpu_emu_mode != 1)
2478 amdgpu_atombios_fini(adev);
2479
d38ceaf9
AD
2480 kfree(adev->bios);
2481 adev->bios = NULL;
84c8b22e
LW
2482 if (!pci_is_thunderbolt_attached(adev->pdev))
2483 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2484 if (adev->flags & AMD_IS_PX)
2485 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2486 vga_client_register(adev->pdev, NULL, NULL, NULL);
2487 if (adev->rio_mem)
2488 pci_iounmap(adev->pdev, adev->rio_mem);
2489 adev->rio_mem = NULL;
2490 iounmap(adev->rmmio);
2491 adev->rmmio = NULL;
06ec9070 2492 amdgpu_device_doorbell_fini(adev);
d38ceaf9 2493 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2494}
2495
2496
2497/*
2498 * Suspend & resume.
2499 */
2500/**
810ddc3a 2501 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2502 *
2503 * @pdev: drm dev pointer
2504 * @state: suspend state
2505 *
2506 * Puts the hw in the suspend state (all asics).
2507 * Returns 0 for success or an error on failure.
2508 * Called at driver suspend.
2509 */
810ddc3a 2510int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2511{
2512 struct amdgpu_device *adev;
2513 struct drm_crtc *crtc;
2514 struct drm_connector *connector;
5ceb54c6 2515 int r;
d38ceaf9
AD
2516
2517 if (dev == NULL || dev->dev_private == NULL) {
2518 return -ENODEV;
2519 }
2520
2521 adev = dev->dev_private;
2522
2523 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2524 return 0;
2525
2526 drm_kms_helper_poll_disable(dev);
2527
4562236b
HW
2528 if (!amdgpu_device_has_dc_support(adev)) {
2529 /* turn off display hw */
2530 drm_modeset_lock_all(dev);
2531 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2532 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2533 }
2534 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2535 }
2536
ba997709
YZ
2537 amdgpu_amdkfd_suspend(adev);
2538
756e6880 2539 /* unpin the front buffers and cursors */
d38ceaf9 2540 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2541 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2542 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2543 struct amdgpu_bo *robj;
2544
756e6880
AD
2545 if (amdgpu_crtc->cursor_bo) {
2546 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2547 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2548 if (r == 0) {
2549 amdgpu_bo_unpin(aobj);
2550 amdgpu_bo_unreserve(aobj);
2551 }
2552 }
2553
d38ceaf9
AD
2554 if (rfb == NULL || rfb->obj == NULL) {
2555 continue;
2556 }
2557 robj = gem_to_amdgpu_bo(rfb->obj);
2558 /* don't unpin kernel fb objects */
2559 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2560 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2561 if (r == 0) {
2562 amdgpu_bo_unpin(robj);
2563 amdgpu_bo_unreserve(robj);
2564 }
2565 }
2566 }
2567 /* evict vram memory */
2568 amdgpu_bo_evict_vram(adev);
2569
5ceb54c6 2570 amdgpu_fence_driver_suspend(adev);
d38ceaf9 2571
cdd61df6 2572 r = amdgpu_device_ip_suspend(adev);
d38ceaf9 2573
a0a71e49
AD
2574 /* evict remaining vram memory
2575 * This second call to evict vram is to evict the gart page table
2576 * using the CPU.
2577 */
d38ceaf9
AD
2578 amdgpu_bo_evict_vram(adev);
2579
2580 pci_save_state(dev->pdev);
2581 if (suspend) {
2582 /* Shut down the device */
2583 pci_disable_device(dev->pdev);
2584 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2585 } else {
2586 r = amdgpu_asic_reset(adev);
2587 if (r)
2588 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2589 }
2590
2591 if (fbcon) {
2592 console_lock();
2593 amdgpu_fbdev_set_suspend(adev, 1);
2594 console_unlock();
2595 }
2596 return 0;
2597}
2598
2599/**
810ddc3a 2600 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2601 *
2602 * @pdev: drm dev pointer
2603 *
2604 * Bring the hw back to operating state (all asics).
2605 * Returns 0 for success or an error on failure.
2606 * Called at driver resume.
2607 */
810ddc3a 2608int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2609{
2610 struct drm_connector *connector;
2611 struct amdgpu_device *adev = dev->dev_private;
756e6880 2612 struct drm_crtc *crtc;
03161a6e 2613 int r = 0;
d38ceaf9
AD
2614
2615 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2616 return 0;
2617
74b0b157 2618 if (fbcon)
d38ceaf9 2619 console_lock();
74b0b157 2620
d38ceaf9
AD
2621 if (resume) {
2622 pci_set_power_state(dev->pdev, PCI_D0);
2623 pci_restore_state(dev->pdev);
74b0b157 2624 r = pci_enable_device(dev->pdev);
03161a6e
HR
2625 if (r)
2626 goto unlock;
d38ceaf9
AD
2627 }
2628
2629 /* post card */
39c640c0 2630 if (amdgpu_device_need_post(adev)) {
74b0b157 2631 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2632 if (r)
2633 DRM_ERROR("amdgpu asic init failed\n");
2634 }
d38ceaf9 2635
06ec9070 2636 r = amdgpu_device_ip_resume(adev);
e6707218 2637 if (r) {
06ec9070 2638 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
03161a6e 2639 goto unlock;
e6707218 2640 }
5ceb54c6
AD
2641 amdgpu_fence_driver_resume(adev);
2642
ca198528
FC
2643 if (resume) {
2644 r = amdgpu_ib_ring_tests(adev);
2645 if (r)
2646 DRM_ERROR("ib ring test failed (%d).\n", r);
2647 }
d38ceaf9 2648
06ec9070 2649 r = amdgpu_device_ip_late_init(adev);
03161a6e
HR
2650 if (r)
2651 goto unlock;
d38ceaf9 2652
756e6880
AD
2653 /* pin cursors */
2654 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2655 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2656
2657 if (amdgpu_crtc->cursor_bo) {
2658 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2659 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2660 if (r == 0) {
2661 r = amdgpu_bo_pin(aobj,
2662 AMDGPU_GEM_DOMAIN_VRAM,
2663 &amdgpu_crtc->cursor_addr);
2664 if (r != 0)
2665 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2666 amdgpu_bo_unreserve(aobj);
2667 }
2668 }
2669 }
ba997709
YZ
2670 r = amdgpu_amdkfd_resume(adev);
2671 if (r)
2672 return r;
756e6880 2673
d38ceaf9
AD
2674 /* blat the mode back in */
2675 if (fbcon) {
4562236b
HW
2676 if (!amdgpu_device_has_dc_support(adev)) {
2677 /* pre DCE11 */
2678 drm_helper_resume_force_mode(dev);
2679
2680 /* turn on display hw */
2681 drm_modeset_lock_all(dev);
2682 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2683 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2684 }
2685 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2686 }
2687 }
2688
2689 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2690
2691 /*
2692 * Most of the connector probing functions try to acquire runtime pm
2693 * refs to ensure that the GPU is powered on when connector polling is
2694 * performed. Since we're calling this from a runtime PM callback,
2695 * trying to acquire rpm refs will cause us to deadlock.
2696 *
2697 * Since we're guaranteed to be holding the rpm lock, it's safe to
2698 * temporarily disable the rpm helpers so this doesn't deadlock us.
2699 */
2700#ifdef CONFIG_PM
2701 dev->dev->power.disable_depth++;
2702#endif
4562236b
HW
2703 if (!amdgpu_device_has_dc_support(adev))
2704 drm_helper_hpd_irq_event(dev);
2705 else
2706 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2707#ifdef CONFIG_PM
2708 dev->dev->power.disable_depth--;
2709#endif
d38ceaf9 2710
03161a6e 2711 if (fbcon)
d38ceaf9 2712 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2713
2714unlock:
2715 if (fbcon)
d38ceaf9 2716 console_unlock();
d38ceaf9 2717
03161a6e 2718 return r;
d38ceaf9
AD
2719}
2720
e3ecdffa
AD
2721/**
2722 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
2723 *
2724 * @adev: amdgpu_device pointer
2725 *
2726 * The list of all the hardware IPs that make up the asic is walked and
2727 * the check_soft_reset callbacks are run. check_soft_reset determines
2728 * if the asic is still hung or not.
2729 * Returns true if any of the IPs are still in a hung state, false if not.
2730 */
06ec9070 2731static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
63fbf42f
CZ
2732{
2733 int i;
2734 bool asic_hang = false;
2735
f993d628
ML
2736 if (amdgpu_sriov_vf(adev))
2737 return true;
2738
63fbf42f 2739 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2740 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2741 continue;
a1255107
AD
2742 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2743 adev->ip_blocks[i].status.hang =
2744 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2745 if (adev->ip_blocks[i].status.hang) {
2746 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2747 asic_hang = true;
2748 }
2749 }
2750 return asic_hang;
2751}
2752
e3ecdffa
AD
2753/**
2754 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
2755 *
2756 * @adev: amdgpu_device pointer
2757 *
2758 * The list of all the hardware IPs that make up the asic is walked and the
2759 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
2760 * handles any IP specific hardware or software state changes that are
2761 * necessary for a soft reset to succeed.
2762 * Returns 0 on success, negative error code on failure.
2763 */
06ec9070 2764static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2765{
2766 int i, r = 0;
2767
2768 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2769 if (!adev->ip_blocks[i].status.valid)
d31a501e 2770 continue;
a1255107
AD
2771 if (adev->ip_blocks[i].status.hang &&
2772 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2773 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2774 if (r)
2775 return r;
2776 }
2777 }
2778
2779 return 0;
2780}
2781
e3ecdffa
AD
2782/**
2783 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
2784 *
2785 * @adev: amdgpu_device pointer
2786 *
2787 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
2788 * reset is necessary to recover.
2789 * Returns true if a full asic reset is required, false if not.
2790 */
06ec9070 2791static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
35d782fe 2792{
da146d3b
AD
2793 int i;
2794
2795 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2796 if (!adev->ip_blocks[i].status.valid)
da146d3b 2797 continue;
a1255107
AD
2798 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2799 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2800 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
2801 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2802 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 2803 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2804 DRM_INFO("Some block need full reset!\n");
2805 return true;
2806 }
2807 }
35d782fe
CZ
2808 }
2809 return false;
2810}
2811
e3ecdffa
AD
2812/**
2813 * amdgpu_device_ip_soft_reset - do a soft reset
2814 *
2815 * @adev: amdgpu_device pointer
2816 *
2817 * The list of all the hardware IPs that make up the asic is walked and the
2818 * soft_reset callbacks are run if the block is hung. soft_reset handles any
2819 * IP specific hardware or software state changes that are necessary to soft
2820 * reset the IP.
2821 * Returns 0 on success, negative error code on failure.
2822 */
06ec9070 2823static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2824{
2825 int i, r = 0;
2826
2827 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2828 if (!adev->ip_blocks[i].status.valid)
35d782fe 2829 continue;
a1255107
AD
2830 if (adev->ip_blocks[i].status.hang &&
2831 adev->ip_blocks[i].version->funcs->soft_reset) {
2832 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2833 if (r)
2834 return r;
2835 }
2836 }
2837
2838 return 0;
2839}
2840
e3ecdffa
AD
2841/**
2842 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
2843 *
2844 * @adev: amdgpu_device pointer
2845 *
2846 * The list of all the hardware IPs that make up the asic is walked and the
2847 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
2848 * handles any IP specific hardware or software state changes that are
2849 * necessary after the IP has been soft reset.
2850 * Returns 0 on success, negative error code on failure.
2851 */
06ec9070 2852static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2853{
2854 int i, r = 0;
2855
2856 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2857 if (!adev->ip_blocks[i].status.valid)
35d782fe 2858 continue;
a1255107
AD
2859 if (adev->ip_blocks[i].status.hang &&
2860 adev->ip_blocks[i].version->funcs->post_soft_reset)
2861 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2862 if (r)
2863 return r;
2864 }
2865
2866 return 0;
2867}
2868
e3ecdffa
AD
2869/**
2870 * amdgpu_device_recover_vram_from_shadow - restore shadowed VRAM buffers
2871 *
2872 * @adev: amdgpu_device pointer
2873 * @ring: amdgpu_ring for the engine handling the buffer operations
2874 * @bo: amdgpu_bo buffer whose shadow is being restored
2875 * @fence: dma_fence associated with the operation
2876 *
2877 * Restores the VRAM buffer contents from the shadow in GTT. Used to
2878 * restore things like GPUVM page tables after a GPU reset where
2879 * the contents of VRAM might be lost.
2880 * Returns 0 on success, negative error code on failure.
2881 */
06ec9070
AD
2882static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
2883 struct amdgpu_ring *ring,
2884 struct amdgpu_bo *bo,
2885 struct dma_fence **fence)
53cdccd5
CZ
2886{
2887 uint32_t domain;
2888 int r;
2889
23d2e504
RH
2890 if (!bo->shadow)
2891 return 0;
2892
1d284797 2893 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2894 if (r)
2895 return r;
2896 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2897 /* if bo has been evicted, then no need to recover */
2898 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2899 r = amdgpu_bo_validate(bo->shadow);
2900 if (r) {
2901 DRM_ERROR("bo validate failed!\n");
2902 goto err;
2903 }
2904
23d2e504 2905 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2906 NULL, fence, true);
23d2e504
RH
2907 if (r) {
2908 DRM_ERROR("recover page table failed!\n");
2909 goto err;
2910 }
2911 }
53cdccd5 2912err:
23d2e504
RH
2913 amdgpu_bo_unreserve(bo);
2914 return r;
53cdccd5
CZ
2915}
2916
e3ecdffa
AD
2917/**
2918 * amdgpu_device_handle_vram_lost - Handle the loss of VRAM contents
2919 *
2920 * @adev: amdgpu_device pointer
2921 *
2922 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
2923 * restore things like GPUVM page tables after a GPU reset where
2924 * the contents of VRAM might be lost.
2925 * Returns 0 on success, 1 on failure.
2926 */
c41d1cf6
ML
2927static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
2928{
2929 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2930 struct amdgpu_bo *bo, *tmp;
2931 struct dma_fence *fence = NULL, *next = NULL;
2932 long r = 1;
2933 int i = 0;
2934 long tmo;
2935
2936 if (amdgpu_sriov_runtime(adev))
2937 tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
2938 else
2939 tmo = msecs_to_jiffies(100);
2940
2941 DRM_INFO("recover vram bo from shadow start\n");
2942 mutex_lock(&adev->shadow_list_lock);
2943 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2944 next = NULL;
2945 amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
2946 if (fence) {
2947 r = dma_fence_wait_timeout(fence, false, tmo);
2948 if (r == 0)
2949 pr_err("wait fence %p[%d] timeout\n", fence, i);
2950 else if (r < 0)
2951 pr_err("wait fence %p[%d] interrupted\n", fence, i);
2952 if (r < 1) {
2953 dma_fence_put(fence);
2954 fence = next;
2955 break;
2956 }
2957 i++;
2958 }
2959
2960 dma_fence_put(fence);
2961 fence = next;
2962 }
2963 mutex_unlock(&adev->shadow_list_lock);
2964
2965 if (fence) {
2966 r = dma_fence_wait_timeout(fence, false, tmo);
2967 if (r == 0)
2968 pr_err("wait fence %p[%d] timeout\n", fence, i);
2969 else if (r < 0)
2970 pr_err("wait fence %p[%d] interrupted\n", fence, i);
2971
2972 }
2973 dma_fence_put(fence);
2974
2975 if (r > 0)
2976 DRM_INFO("recover vram bo from shadow done\n");
2977 else
2978 DRM_ERROR("recover vram bo from shadow failed\n");
2979
e3ecdffa 2980 return (r > 0) ? 0 : 1;
c41d1cf6
ML
2981}
2982
e3ecdffa 2983/**
06ec9070 2984 * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
a90ad3c2
ML
2985 *
2986 * @adev: amdgpu device pointer
a90ad3c2 2987 *
5740682e
ML
2988 * attempt to do soft-reset or full-reset and reinitialize Asic
2989 * return 0 means successed otherwise failed
e3ecdffa 2990 */
c41d1cf6 2991static int amdgpu_device_reset(struct amdgpu_device *adev)
a90ad3c2 2992{
5740682e
ML
2993 bool need_full_reset, vram_lost = 0;
2994 int r;
a90ad3c2 2995
06ec9070 2996 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
a90ad3c2 2997
5740682e 2998 if (!need_full_reset) {
06ec9070
AD
2999 amdgpu_device_ip_pre_soft_reset(adev);
3000 r = amdgpu_device_ip_soft_reset(adev);
3001 amdgpu_device_ip_post_soft_reset(adev);
3002 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5740682e
ML
3003 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3004 need_full_reset = true;
3005 }
5740682e 3006 }
a90ad3c2 3007
5740682e 3008 if (need_full_reset) {
cdd61df6 3009 r = amdgpu_device_ip_suspend(adev);
a90ad3c2 3010
5740682e 3011retry:
5740682e 3012 r = amdgpu_asic_reset(adev);
5740682e
ML
3013 /* post card */
3014 amdgpu_atom_asic_init(adev->mode_info.atom_context);
65781c78 3015
5740682e
ML
3016 if (!r) {
3017 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
06ec9070 3018 r = amdgpu_device_ip_resume_phase1(adev);
5740682e
ML
3019 if (r)
3020 goto out;
65781c78 3021
06ec9070 3022 vram_lost = amdgpu_device_check_vram_lost(adev);
5740682e
ML
3023 if (vram_lost) {
3024 DRM_ERROR("VRAM is lost!\n");
3025 atomic_inc(&adev->vram_lost_counter);
3026 }
3027
c1c7ce8f
CK
3028 r = amdgpu_gtt_mgr_recover(
3029 &adev->mman.bdev.man[TTM_PL_TT]);
5740682e
ML
3030 if (r)
3031 goto out;
3032
06ec9070 3033 r = amdgpu_device_ip_resume_phase2(adev);
5740682e
ML
3034 if (r)
3035 goto out;
3036
3037 if (vram_lost)
06ec9070 3038 amdgpu_device_fill_reset_magic(adev);
65781c78 3039 }
5740682e 3040 }
65781c78 3041
5740682e
ML
3042out:
3043 if (!r) {
3044 amdgpu_irq_gpu_reset_resume_helper(adev);
3045 r = amdgpu_ib_ring_tests(adev);
3046 if (r) {
3047 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
cdd61df6 3048 r = amdgpu_device_ip_suspend(adev);
5740682e
ML
3049 need_full_reset = true;
3050 goto retry;
3051 }
3052 }
65781c78 3053
c41d1cf6
ML
3054 if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
3055 r = amdgpu_device_handle_vram_lost(adev);
a90ad3c2 3056
5740682e
ML
3057 return r;
3058}
a90ad3c2 3059
e3ecdffa 3060/**
06ec9070 3061 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5740682e
ML
3062 *
3063 * @adev: amdgpu device pointer
5740682e
ML
3064 *
3065 * do VF FLR and reinitialize Asic
3066 * return 0 means successed otherwise failed
e3ecdffa
AD
3067 */
3068static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3069 bool from_hypervisor)
5740682e
ML
3070{
3071 int r;
3072
3073 if (from_hypervisor)
3074 r = amdgpu_virt_request_full_gpu(adev, true);
3075 else
3076 r = amdgpu_virt_reset_gpu(adev);
3077 if (r)
3078 return r;
a90ad3c2
ML
3079
3080 /* Resume IP prior to SMC */
06ec9070 3081 r = amdgpu_device_ip_reinit_early_sriov(adev);
5740682e
ML
3082 if (r)
3083 goto error;
a90ad3c2
ML
3084
3085 /* we need recover gart prior to run SMC/CP/SDMA resume */
c1c7ce8f 3086 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
a90ad3c2
ML
3087
3088 /* now we are okay to resume SMC/CP/SDMA */
06ec9070 3089 r = amdgpu_device_ip_reinit_late_sriov(adev);
c41d1cf6 3090 amdgpu_virt_release_full_gpu(adev, true);
5740682e
ML
3091 if (r)
3092 goto error;
a90ad3c2
ML
3093
3094 amdgpu_irq_gpu_reset_resume_helper(adev);
5740682e 3095 r = amdgpu_ib_ring_tests(adev);
a90ad3c2 3096
c41d1cf6
ML
3097 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3098 atomic_inc(&adev->vram_lost_counter);
3099 r = amdgpu_device_handle_vram_lost(adev);
a90ad3c2
ML
3100 }
3101
c41d1cf6
ML
3102error:
3103
a90ad3c2
ML
3104 return r;
3105}
3106
d38ceaf9 3107/**
5f152b5e 3108 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
d38ceaf9
AD
3109 *
3110 * @adev: amdgpu device pointer
5740682e 3111 * @job: which job trigger hang
dcebf026 3112 * @force forces reset regardless of amdgpu_gpu_recovery
d38ceaf9 3113 *
5740682e 3114 * Attempt to reset the GPU if it has hung (all asics).
d38ceaf9
AD
3115 * Returns 0 for success or an error on failure.
3116 */
5f152b5e
AD
3117int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3118 struct amdgpu_job *job, bool force)
d38ceaf9 3119{
4562236b 3120 struct drm_atomic_state *state = NULL;
5740682e 3121 int i, r, resched;
fb140b29 3122
54bc1398 3123 if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
63fbf42f
CZ
3124 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
3125 return 0;
3126 }
d38ceaf9 3127
dcebf026
AG
3128 if (!force && (amdgpu_gpu_recovery == 0 ||
3129 (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
3130 DRM_INFO("GPU recovery disabled.\n");
3131 return 0;
3132 }
3133
5740682e
ML
3134 dev_info(adev->dev, "GPU reset begin!\n");
3135
13a752e3 3136 mutex_lock(&adev->lock_reset);
d94aed5a 3137 atomic_inc(&adev->gpu_reset_counter);
13a752e3 3138 adev->in_gpu_reset = 1;
d38ceaf9 3139
a3c47d6b
CZ
3140 /* block TTM */
3141 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
71182665 3142
4562236b
HW
3143 /* store modesetting */
3144 if (amdgpu_device_has_dc_support(adev))
3145 state = drm_atomic_helper_suspend(adev->ddev);
a3c47d6b 3146
71182665 3147 /* block all schedulers and reset given job's ring */
0875dc9e
CZ
3148 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3149 struct amdgpu_ring *ring = adev->rings[i];
3150
51687759 3151 if (!ring || !ring->sched.thread)
0875dc9e 3152 continue;
5740682e 3153
71182665
ML
3154 kthread_park(ring->sched.thread);
3155
5740682e
ML
3156 if (job && job->ring->idx != i)
3157 continue;
3158
1b1f42d8 3159 drm_sched_hw_job_reset(&ring->sched, &job->base);
5740682e 3160
2f9d4084
ML
3161 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3162 amdgpu_fence_driver_force_completion(ring);
0875dc9e 3163 }
d38ceaf9 3164
5740682e 3165 if (amdgpu_sriov_vf(adev))
c41d1cf6 3166 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5740682e 3167 else
c41d1cf6 3168 r = amdgpu_device_reset(adev);
5740682e 3169
71182665
ML
3170 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3171 struct amdgpu_ring *ring = adev->rings[i];
53cdccd5 3172
71182665
ML
3173 if (!ring || !ring->sched.thread)
3174 continue;
5740682e 3175
71182665
ML
3176 /* only need recovery sched of the given job's ring
3177 * or all rings (in the case @job is NULL)
3178 * after above amdgpu_reset accomplished
3179 */
3180 if ((!job || job->ring->idx == i) && !r)
1b1f42d8 3181 drm_sched_job_recovery(&ring->sched);
5740682e 3182
71182665 3183 kthread_unpark(ring->sched.thread);
d38ceaf9
AD
3184 }
3185
4562236b 3186 if (amdgpu_device_has_dc_support(adev)) {
5740682e
ML
3187 if (drm_atomic_helper_resume(adev->ddev, state))
3188 dev_info(adev->dev, "drm resume failed:%d\n", r);
5740682e 3189 } else {
4562236b 3190 drm_helper_resume_force_mode(adev->ddev);
5740682e 3191 }
d38ceaf9
AD
3192
3193 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
5740682e 3194
89041940 3195 if (r) {
d38ceaf9 3196 /* bad news, how to tell it to userspace ? */
5740682e
ML
3197 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3198 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3199 } else {
3200 dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
89041940 3201 }
d38ceaf9 3202
89041940 3203 amdgpu_vf_error_trans_all(adev);
13a752e3
ML
3204 adev->in_gpu_reset = 0;
3205 mutex_unlock(&adev->lock_reset);
d38ceaf9
AD
3206 return r;
3207}
3208
e3ecdffa
AD
3209/**
3210 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
3211 *
3212 * @adev: amdgpu_device pointer
3213 *
3214 * Fetchs and stores in the driver the PCIE capabilities (gen speed
3215 * and lanes) of the slot the device is in. Handles APUs and
3216 * virtualized environments where PCIE config space may not be available.
3217 */
5494d864 3218static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
d0dd7f0c
AD
3219{
3220 u32 mask;
3221 int ret;
3222
cd474ba0
AD
3223 if (amdgpu_pcie_gen_cap)
3224 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 3225
cd474ba0
AD
3226 if (amdgpu_pcie_lane_cap)
3227 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 3228
cd474ba0
AD
3229 /* covers APUs as well */
3230 if (pci_is_root_bus(adev->pdev->bus)) {
3231 if (adev->pm.pcie_gen_mask == 0)
3232 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3233 if (adev->pm.pcie_mlw_mask == 0)
3234 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 3235 return;
cd474ba0 3236 }
d0dd7f0c 3237
cd474ba0
AD
3238 if (adev->pm.pcie_gen_mask == 0) {
3239 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3240 if (!ret) {
3241 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3242 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3243 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3244
3245 if (mask & DRM_PCIE_SPEED_25)
3246 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3247 if (mask & DRM_PCIE_SPEED_50)
3248 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3249 if (mask & DRM_PCIE_SPEED_80)
3250 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3251 } else {
3252 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3253 }
3254 }
3255 if (adev->pm.pcie_mlw_mask == 0) {
3256 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3257 if (!ret) {
3258 switch (mask) {
3259 case 32:
3260 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3261 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3262 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3263 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3264 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3265 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3266 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3267 break;
3268 case 16:
3269 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3270 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3271 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3272 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3273 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3274 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3275 break;
3276 case 12:
3277 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3278 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3279 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3280 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3281 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3282 break;
3283 case 8:
3284 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3285 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3286 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3287 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3288 break;
3289 case 4:
3290 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3291 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3292 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3293 break;
3294 case 2:
3295 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3296 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3297 break;
3298 case 1:
3299 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3300 break;
3301 default:
3302 break;
3303 }
3304 } else {
3305 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
3306 }
3307 }
3308}
d38ceaf9 3309