drm/amdgpu: fix errors in comments.
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
f4b373f4 39#include "amdgpu_trace.h"
d38ceaf9
AD
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
a5bde2f9 43#include "amdgpu_atomfirmware.h"
d0dd7f0c 44#include "amd_pcie.h"
33f34802
KW
45#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
a2e73f56
AD
48#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
aaa36a97 51#include "vi.h"
460826e6 52#include "soc15.h"
d38ceaf9 53#include "bif/bif_4_1_d.h"
9accf2fd 54#include <linux/pci.h>
bec86378 55#include <linux/firmware.h>
d38ceaf9 56
e2a75f88
AD
57MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
58
d38ceaf9
AD
59static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
60static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
61
62static const char *amdgpu_asic_name[] = {
da69c161
KW
63 "TAHITI",
64 "PITCAIRN",
65 "VERDE",
66 "OLAND",
67 "HAINAN",
d38ceaf9
AD
68 "BONAIRE",
69 "KAVERI",
70 "KABINI",
71 "HAWAII",
72 "MULLINS",
73 "TOPAZ",
74 "TONGA",
48299f95 75 "FIJI",
d38ceaf9 76 "CARRIZO",
139f4917 77 "STONEY",
2cc0c0b5
FC
78 "POLARIS10",
79 "POLARIS11",
c4642a47 80 "POLARIS12",
d4196f01 81 "VEGA10",
d38ceaf9
AD
82 "LAST",
83};
84
85bool amdgpu_device_is_px(struct drm_device *dev)
86{
87 struct amdgpu_device *adev = dev->dev_private;
88
2f7d10b3 89 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
90 return true;
91 return false;
92}
93
94/*
95 * MMIO register access helper functions.
96 */
97uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 98 uint32_t acc_flags)
d38ceaf9 99{
f4b373f4
TSD
100 uint32_t ret;
101
15d72fd7 102 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
bc992ba5
XY
103 BUG_ON(in_interrupt());
104 return amdgpu_virt_kiq_rreg(adev, reg);
105 }
106
15d72fd7 107 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 108 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
109 else {
110 unsigned long flags;
d38ceaf9
AD
111
112 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
113 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
114 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
115 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 116 }
f4b373f4
TSD
117 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
118 return ret;
d38ceaf9
AD
119}
120
121void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 122 uint32_t acc_flags)
d38ceaf9 123{
f4b373f4 124 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 125
15d72fd7 126 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
bc992ba5
XY
127 BUG_ON(in_interrupt());
128 return amdgpu_virt_kiq_wreg(adev, reg, v);
129 }
130
15d72fd7 131 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
132 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
133 else {
134 unsigned long flags;
135
136 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
137 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
138 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
139 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
140 }
141}
142
143u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
144{
145 if ((reg * 4) < adev->rio_mem_size)
146 return ioread32(adev->rio_mem + (reg * 4));
147 else {
148 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
149 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
150 }
151}
152
153void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
154{
155
156 if ((reg * 4) < adev->rio_mem_size)
157 iowrite32(v, adev->rio_mem + (reg * 4));
158 else {
159 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
160 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
161 }
162}
163
164/**
165 * amdgpu_mm_rdoorbell - read a doorbell dword
166 *
167 * @adev: amdgpu_device pointer
168 * @index: doorbell index
169 *
170 * Returns the value in the doorbell aperture at the
171 * requested doorbell index (CIK).
172 */
173u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
174{
175 if (index < adev->doorbell.num_doorbells) {
176 return readl(adev->doorbell.ptr + index);
177 } else {
178 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
179 return 0;
180 }
181}
182
183/**
184 * amdgpu_mm_wdoorbell - write a doorbell dword
185 *
186 * @adev: amdgpu_device pointer
187 * @index: doorbell index
188 * @v: value to write
189 *
190 * Writes @v to the doorbell aperture at the
191 * requested doorbell index (CIK).
192 */
193void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
194{
195 if (index < adev->doorbell.num_doorbells) {
196 writel(v, adev->doorbell.ptr + index);
197 } else {
198 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
199 }
200}
201
832be404
KW
202/**
203 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
204 *
205 * @adev: amdgpu_device pointer
206 * @index: doorbell index
207 *
208 * Returns the value in the doorbell aperture at the
209 * requested doorbell index (VEGA10+).
210 */
211u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
212{
213 if (index < adev->doorbell.num_doorbells) {
214 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
215 } else {
216 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
217 return 0;
218 }
219}
220
221/**
222 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
223 *
224 * @adev: amdgpu_device pointer
225 * @index: doorbell index
226 * @v: value to write
227 *
228 * Writes @v to the doorbell aperture at the
229 * requested doorbell index (VEGA10+).
230 */
231void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
232{
233 if (index < adev->doorbell.num_doorbells) {
234 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
235 } else {
236 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
237 }
238}
239
d38ceaf9
AD
240/**
241 * amdgpu_invalid_rreg - dummy reg read function
242 *
243 * @adev: amdgpu device pointer
244 * @reg: offset of register
245 *
246 * Dummy register read function. Used for register blocks
247 * that certain asics don't have (all asics).
248 * Returns the value in the register.
249 */
250static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
251{
252 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
253 BUG();
254 return 0;
255}
256
257/**
258 * amdgpu_invalid_wreg - dummy reg write function
259 *
260 * @adev: amdgpu device pointer
261 * @reg: offset of register
262 * @v: value to write to the register
263 *
264 * Dummy register read function. Used for register blocks
265 * that certain asics don't have (all asics).
266 */
267static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
268{
269 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
270 reg, v);
271 BUG();
272}
273
274/**
275 * amdgpu_block_invalid_rreg - dummy reg read function
276 *
277 * @adev: amdgpu device pointer
278 * @block: offset of instance
279 * @reg: offset of register
280 *
281 * Dummy register read function. Used for register blocks
282 * that certain asics don't have (all asics).
283 * Returns the value in the register.
284 */
285static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
286 uint32_t block, uint32_t reg)
287{
288 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
289 reg, block);
290 BUG();
291 return 0;
292}
293
294/**
295 * amdgpu_block_invalid_wreg - dummy reg write function
296 *
297 * @adev: amdgpu device pointer
298 * @block: offset of instance
299 * @reg: offset of register
300 * @v: value to write to the register
301 *
302 * Dummy register read function. Used for register blocks
303 * that certain asics don't have (all asics).
304 */
305static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
306 uint32_t block,
307 uint32_t reg, uint32_t v)
308{
309 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
310 reg, block, v);
311 BUG();
312}
313
314static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
315{
316 int r;
317
318 if (adev->vram_scratch.robj == NULL) {
319 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
857d913d 320 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
03f48dd5
CK
321 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
322 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
72d7668b 323 NULL, NULL, &adev->vram_scratch.robj);
d38ceaf9
AD
324 if (r) {
325 return r;
326 }
327 }
328
329 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
330 if (unlikely(r != 0))
331 return r;
332 r = amdgpu_bo_pin(adev->vram_scratch.robj,
333 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
334 if (r) {
335 amdgpu_bo_unreserve(adev->vram_scratch.robj);
336 return r;
337 }
338 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
339 (void **)&adev->vram_scratch.ptr);
340 if (r)
341 amdgpu_bo_unpin(adev->vram_scratch.robj);
342 amdgpu_bo_unreserve(adev->vram_scratch.robj);
343
344 return r;
345}
346
347static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
348{
349 int r;
350
351 if (adev->vram_scratch.robj == NULL) {
352 return;
353 }
8ab25b4f 354 r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
d38ceaf9
AD
355 if (likely(r == 0)) {
356 amdgpu_bo_kunmap(adev->vram_scratch.robj);
357 amdgpu_bo_unpin(adev->vram_scratch.robj);
358 amdgpu_bo_unreserve(adev->vram_scratch.robj);
359 }
360 amdgpu_bo_unref(&adev->vram_scratch.robj);
361}
362
363/**
364 * amdgpu_program_register_sequence - program an array of registers.
365 *
366 * @adev: amdgpu_device pointer
367 * @registers: pointer to the register array
368 * @array_size: size of the register array
369 *
370 * Programs an array or registers with and and or masks.
371 * This is a helper for setting golden registers.
372 */
373void amdgpu_program_register_sequence(struct amdgpu_device *adev,
374 const u32 *registers,
375 const u32 array_size)
376{
377 u32 tmp, reg, and_mask, or_mask;
378 int i;
379
380 if (array_size % 3)
381 return;
382
383 for (i = 0; i < array_size; i +=3) {
384 reg = registers[i + 0];
385 and_mask = registers[i + 1];
386 or_mask = registers[i + 2];
387
388 if (and_mask == 0xffffffff) {
389 tmp = or_mask;
390 } else {
391 tmp = RREG32(reg);
392 tmp &= ~and_mask;
393 tmp |= or_mask;
394 }
395 WREG32(reg, tmp);
396 }
397}
398
399void amdgpu_pci_config_reset(struct amdgpu_device *adev)
400{
401 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
402}
403
404/*
405 * GPU doorbell aperture helpers function.
406 */
407/**
408 * amdgpu_doorbell_init - Init doorbell driver information.
409 *
410 * @adev: amdgpu_device pointer
411 *
412 * Init doorbell driver information (CIK)
413 * Returns 0 on success, error on failure.
414 */
415static int amdgpu_doorbell_init(struct amdgpu_device *adev)
416{
417 /* doorbell bar mapping */
418 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
419 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
420
edf600da 421 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
422 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
423 if (adev->doorbell.num_doorbells == 0)
424 return -EINVAL;
425
8972e5d2
CK
426 adev->doorbell.ptr = ioremap(adev->doorbell.base,
427 adev->doorbell.num_doorbells *
428 sizeof(u32));
429 if (adev->doorbell.ptr == NULL)
d38ceaf9 430 return -ENOMEM;
d38ceaf9
AD
431
432 return 0;
433}
434
435/**
436 * amdgpu_doorbell_fini - Tear down doorbell driver information.
437 *
438 * @adev: amdgpu_device pointer
439 *
440 * Tear down doorbell driver information (CIK)
441 */
442static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
443{
444 iounmap(adev->doorbell.ptr);
445 adev->doorbell.ptr = NULL;
446}
447
448/**
449 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
450 * setup amdkfd
451 *
452 * @adev: amdgpu_device pointer
453 * @aperture_base: output returning doorbell aperture base physical address
454 * @aperture_size: output returning doorbell aperture size in bytes
455 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
456 *
457 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
458 * takes doorbells required for its own rings and reports the setup to amdkfd.
459 * amdgpu reserved doorbells are at the start of the doorbell aperture.
460 */
461void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
462 phys_addr_t *aperture_base,
463 size_t *aperture_size,
464 size_t *start_offset)
465{
466 /*
467 * The first num_doorbells are used by amdgpu.
468 * amdkfd takes whatever's left in the aperture.
469 */
470 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
471 *aperture_base = adev->doorbell.base;
472 *aperture_size = adev->doorbell.size;
473 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
474 } else {
475 *aperture_base = 0;
476 *aperture_size = 0;
477 *start_offset = 0;
478 }
479}
480
481/*
482 * amdgpu_wb_*()
ea81a173
AX
483 * Writeback is the method by which GPU updates special pages in memory
484 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
485 */
486
487/**
488 * amdgpu_wb_fini - Disable Writeback and free memory
489 *
490 * @adev: amdgpu_device pointer
491 *
492 * Disables Writeback and frees the Writeback memory (all asics).
493 * Used at driver shutdown.
494 */
495static void amdgpu_wb_fini(struct amdgpu_device *adev)
496{
497 if (adev->wb.wb_obj) {
a76ed485
AD
498 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
499 &adev->wb.gpu_addr,
500 (void **)&adev->wb.wb);
d38ceaf9
AD
501 adev->wb.wb_obj = NULL;
502 }
503}
504
505/**
506 * amdgpu_wb_init- Init Writeback driver info and allocate memory
507 *
508 * @adev: amdgpu_device pointer
509 *
ea81a173 510 * Initialize writeback and allocates writeback memory (all asics).
d38ceaf9
AD
511 * Used at driver startup.
512 * Returns 0 on success or an -error on failure.
513 */
514static int amdgpu_wb_init(struct amdgpu_device *adev)
515{
516 int r;
517
518 if (adev->wb.wb_obj == NULL) {
60a970a6 519 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
a76ed485
AD
520 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
521 &adev->wb.wb_obj, &adev->wb.gpu_addr,
522 (void **)&adev->wb.wb);
d38ceaf9
AD
523 if (r) {
524 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
525 return r;
526 }
d38ceaf9
AD
527
528 adev->wb.num_wb = AMDGPU_MAX_WB;
529 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
530
531 /* clear wb memory */
60a970a6 532 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
d38ceaf9
AD
533 }
534
535 return 0;
536}
537
538/**
539 * amdgpu_wb_get - Allocate a wb entry
540 *
541 * @adev: amdgpu_device pointer
542 * @wb: wb index
543 *
544 * Allocate a wb slot for use by the driver (all asics).
545 * Returns 0 on success or -EINVAL on failure.
546 */
547int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
548{
549 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
550 if (offset < adev->wb.num_wb) {
551 __set_bit(offset, adev->wb.used);
552 *wb = offset;
553 return 0;
554 } else {
555 return -EINVAL;
556 }
557}
558
7014285a
KW
559/**
560 * amdgpu_wb_get_64bit - Allocate a wb entry
561 *
562 * @adev: amdgpu_device pointer
563 * @wb: wb index
564 *
565 * Allocate a wb slot for use by the driver (all asics).
566 * Returns 0 on success or -EINVAL on failure.
567 */
568int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
569{
570 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
571 adev->wb.num_wb, 0, 2, 7, 0);
572 if ((offset + 1) < adev->wb.num_wb) {
573 __set_bit(offset, adev->wb.used);
574 __set_bit(offset + 1, adev->wb.used);
575 *wb = offset;
576 return 0;
577 } else {
578 return -EINVAL;
579 }
580}
581
d38ceaf9
AD
582/**
583 * amdgpu_wb_free - Free a wb entry
584 *
585 * @adev: amdgpu_device pointer
586 * @wb: wb index
587 *
588 * Free a wb slot allocated for use by the driver (all asics)
589 */
590void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
591{
592 if (wb < adev->wb.num_wb)
593 __clear_bit(wb, adev->wb.used);
594}
595
7014285a
KW
596/**
597 * amdgpu_wb_free_64bit - Free a wb entry
598 *
599 * @adev: amdgpu_device pointer
600 * @wb: wb index
601 *
602 * Free a wb slot allocated for use by the driver (all asics)
603 */
604void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
605{
606 if ((wb + 1) < adev->wb.num_wb) {
607 __clear_bit(wb, adev->wb.used);
608 __clear_bit(wb + 1, adev->wb.used);
609 }
610}
611
d38ceaf9
AD
612/**
613 * amdgpu_vram_location - try to find VRAM location
614 * @adev: amdgpu device structure holding all necessary informations
615 * @mc: memory controller structure holding memory informations
616 * @base: base address at which to put VRAM
617 *
618 * Function will place try to place VRAM at base address provided
619 * as parameter (which is so far either PCI aperture address or
620 * for IGP TOM base address).
621 *
622 * If there is not enough space to fit the unvisible VRAM in the 32bits
623 * address space then we limit the VRAM size to the aperture.
624 *
625 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
626 * this shouldn't be a problem as we are using the PCI aperture as a reference.
627 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
628 * not IGP.
629 *
630 * Note: we use mc_vram_size as on some board we need to program the mc to
631 * cover the whole aperture even if VRAM size is inferior to aperture size
632 * Novell bug 204882 + along with lots of ubuntu ones
633 *
634 * Note: when limiting vram it's safe to overwritte real_vram_size because
635 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
636 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
637 * ones)
638 *
639 * Note: IGP TOM addr should be the same as the aperture addr, we don't
640 * explicitly check for that thought.
641 *
642 * FIXME: when reducing VRAM size align new size on power of 2.
643 */
644void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
645{
646 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
647
648 mc->vram_start = base;
649 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
650 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
651 mc->real_vram_size = mc->aper_size;
652 mc->mc_vram_size = mc->aper_size;
653 }
654 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
655 if (limit && limit < mc->real_vram_size)
656 mc->real_vram_size = limit;
657 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
658 mc->mc_vram_size >> 20, mc->vram_start,
659 mc->vram_end, mc->real_vram_size >> 20);
660}
661
662/**
663 * amdgpu_gtt_location - try to find GTT location
664 * @adev: amdgpu device structure holding all necessary informations
665 * @mc: memory controller structure holding memory informations
666 *
667 * Function will place try to place GTT before or after VRAM.
668 *
669 * If GTT size is bigger than space left then we ajust GTT size.
670 * Thus function will never fails.
671 *
672 * FIXME: when reducing GTT size align new size on power of 2.
673 */
674void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
675{
676 u64 size_af, size_bf;
677
678 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
679 size_bf = mc->vram_start & ~mc->gtt_base_align;
680 if (size_bf > size_af) {
681 if (mc->gtt_size > size_bf) {
682 dev_warn(adev->dev, "limiting GTT\n");
683 mc->gtt_size = size_bf;
684 }
9dc5a91e 685 mc->gtt_start = 0;
d38ceaf9
AD
686 } else {
687 if (mc->gtt_size > size_af) {
688 dev_warn(adev->dev, "limiting GTT\n");
689 mc->gtt_size = size_af;
690 }
691 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
692 }
693 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
694 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
695 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
696}
697
698/*
699 * GPU helpers function.
700 */
701/**
c836fec5 702 * amdgpu_need_post - check if the hw need post or not
d38ceaf9
AD
703 *
704 * @adev: amdgpu_device pointer
705 *
c836fec5
JQ
706 * Check if the asic has been initialized (all asics) at driver startup
707 * or post is needed if hw reset is performed.
708 * Returns true if need or false if not.
d38ceaf9 709 */
c836fec5 710bool amdgpu_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
711{
712 uint32_t reg;
713
c836fec5
JQ
714 if (adev->has_hw_reset) {
715 adev->has_hw_reset = false;
716 return true;
717 }
d38ceaf9 718 /* then check MEM_SIZE, in case the crtcs are off */
bbf282d8 719 reg = amdgpu_asic_get_config_memsize(adev);
d38ceaf9 720
f2713e8c 721 if ((reg != 0) && (reg != 0xffffffff))
c836fec5 722 return false;
d38ceaf9 723
c836fec5 724 return true;
d38ceaf9
AD
725
726}
727
bec86378
ML
728static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
729{
730 if (amdgpu_sriov_vf(adev))
731 return false;
732
733 if (amdgpu_passthrough(adev)) {
1da2c326
ML
734 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
735 * some old smc fw still need driver do vPost otherwise gpu hang, while
736 * those smc fw version above 22.15 doesn't have this flaw, so we force
737 * vpost executed for smc version below 22.15
bec86378
ML
738 */
739 if (adev->asic_type == CHIP_FIJI) {
740 int err;
741 uint32_t fw_ver;
742 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
743 /* force vPost if error occured */
744 if (err)
745 return true;
746
747 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
748 if (fw_ver < 0x00160e00)
749 return true;
bec86378 750 }
bec86378 751 }
c836fec5 752 return amdgpu_need_post(adev);
bec86378
ML
753}
754
d38ceaf9
AD
755/**
756 * amdgpu_dummy_page_init - init dummy page used by the driver
757 *
758 * @adev: amdgpu_device pointer
759 *
760 * Allocate the dummy page used by the driver (all asics).
761 * This dummy page is used by the driver as a filler for gart entries
762 * when pages are taken out of the GART
763 * Returns 0 on sucess, -ENOMEM on failure.
764 */
765int amdgpu_dummy_page_init(struct amdgpu_device *adev)
766{
767 if (adev->dummy_page.page)
768 return 0;
769 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
770 if (adev->dummy_page.page == NULL)
771 return -ENOMEM;
772 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
773 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
774 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
775 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
776 __free_page(adev->dummy_page.page);
777 adev->dummy_page.page = NULL;
778 return -ENOMEM;
779 }
780 return 0;
781}
782
783/**
784 * amdgpu_dummy_page_fini - free dummy page used by the driver
785 *
786 * @adev: amdgpu_device pointer
787 *
788 * Frees the dummy page used by the driver (all asics).
789 */
790void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
791{
792 if (adev->dummy_page.page == NULL)
793 return;
794 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
795 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
796 __free_page(adev->dummy_page.page);
797 adev->dummy_page.page = NULL;
798}
799
800
801/* ATOM accessor methods */
802/*
803 * ATOM is an interpreted byte code stored in tables in the vbios. The
804 * driver registers callbacks to access registers and the interpreter
805 * in the driver parses the tables and executes then to program specific
806 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
807 * atombios.h, and atom.c
808 */
809
810/**
811 * cail_pll_read - read PLL register
812 *
813 * @info: atom card_info pointer
814 * @reg: PLL register offset
815 *
816 * Provides a PLL register accessor for the atom interpreter (r4xx+).
817 * Returns the value of the PLL register.
818 */
819static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
820{
821 return 0;
822}
823
824/**
825 * cail_pll_write - write PLL register
826 *
827 * @info: atom card_info pointer
828 * @reg: PLL register offset
829 * @val: value to write to the pll register
830 *
831 * Provides a PLL register accessor for the atom interpreter (r4xx+).
832 */
833static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
834{
835
836}
837
838/**
839 * cail_mc_read - read MC (Memory Controller) register
840 *
841 * @info: atom card_info pointer
842 * @reg: MC register offset
843 *
844 * Provides an MC register accessor for the atom interpreter (r4xx+).
845 * Returns the value of the MC register.
846 */
847static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
848{
849 return 0;
850}
851
852/**
853 * cail_mc_write - write MC (Memory Controller) register
854 *
855 * @info: atom card_info pointer
856 * @reg: MC register offset
857 * @val: value to write to the pll register
858 *
859 * Provides a MC register accessor for the atom interpreter (r4xx+).
860 */
861static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
862{
863
864}
865
866/**
867 * cail_reg_write - write MMIO register
868 *
869 * @info: atom card_info pointer
870 * @reg: MMIO register offset
871 * @val: value to write to the pll register
872 *
873 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
874 */
875static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
876{
877 struct amdgpu_device *adev = info->dev->dev_private;
878
879 WREG32(reg, val);
880}
881
882/**
883 * cail_reg_read - read MMIO register
884 *
885 * @info: atom card_info pointer
886 * @reg: MMIO register offset
887 *
888 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
889 * Returns the value of the MMIO register.
890 */
891static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
892{
893 struct amdgpu_device *adev = info->dev->dev_private;
894 uint32_t r;
895
896 r = RREG32(reg);
897 return r;
898}
899
900/**
901 * cail_ioreg_write - write IO register
902 *
903 * @info: atom card_info pointer
904 * @reg: IO register offset
905 * @val: value to write to the pll register
906 *
907 * Provides a IO register accessor for the atom interpreter (r4xx+).
908 */
909static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
910{
911 struct amdgpu_device *adev = info->dev->dev_private;
912
913 WREG32_IO(reg, val);
914}
915
916/**
917 * cail_ioreg_read - read IO register
918 *
919 * @info: atom card_info pointer
920 * @reg: IO register offset
921 *
922 * Provides an IO register accessor for the atom interpreter (r4xx+).
923 * Returns the value of the IO register.
924 */
925static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
926{
927 struct amdgpu_device *adev = info->dev->dev_private;
928 uint32_t r;
929
930 r = RREG32_IO(reg);
931 return r;
932}
933
934/**
935 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
936 *
937 * @adev: amdgpu_device pointer
938 *
939 * Frees the driver info and register access callbacks for the ATOM
940 * interpreter (r4xx+).
941 * Called at driver shutdown.
942 */
943static void amdgpu_atombios_fini(struct amdgpu_device *adev)
944{
89e0ec9f 945 if (adev->mode_info.atom_context) {
d38ceaf9 946 kfree(adev->mode_info.atom_context->scratch);
89e0ec9f
ML
947 kfree(adev->mode_info.atom_context->iio);
948 }
d38ceaf9
AD
949 kfree(adev->mode_info.atom_context);
950 adev->mode_info.atom_context = NULL;
951 kfree(adev->mode_info.atom_card_info);
952 adev->mode_info.atom_card_info = NULL;
953}
954
955/**
956 * amdgpu_atombios_init - init the driver info and callbacks for atombios
957 *
958 * @adev: amdgpu_device pointer
959 *
960 * Initializes the driver info and register access callbacks for the
961 * ATOM interpreter (r4xx+).
962 * Returns 0 on sucess, -ENOMEM on failure.
963 * Called at driver startup.
964 */
965static int amdgpu_atombios_init(struct amdgpu_device *adev)
966{
967 struct card_info *atom_card_info =
968 kzalloc(sizeof(struct card_info), GFP_KERNEL);
969
970 if (!atom_card_info)
971 return -ENOMEM;
972
973 adev->mode_info.atom_card_info = atom_card_info;
974 atom_card_info->dev = adev->ddev;
975 atom_card_info->reg_read = cail_reg_read;
976 atom_card_info->reg_write = cail_reg_write;
977 /* needed for iio ops */
978 if (adev->rio_mem) {
979 atom_card_info->ioreg_read = cail_ioreg_read;
980 atom_card_info->ioreg_write = cail_ioreg_write;
981 } else {
b64a18c5 982 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
d38ceaf9
AD
983 atom_card_info->ioreg_read = cail_reg_read;
984 atom_card_info->ioreg_write = cail_reg_write;
985 }
986 atom_card_info->mc_read = cail_mc_read;
987 atom_card_info->mc_write = cail_mc_write;
988 atom_card_info->pll_read = cail_pll_read;
989 atom_card_info->pll_write = cail_pll_write;
990
991 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
992 if (!adev->mode_info.atom_context) {
993 amdgpu_atombios_fini(adev);
994 return -ENOMEM;
995 }
996
997 mutex_init(&adev->mode_info.atom_context->mutex);
a5bde2f9
AD
998 if (adev->is_atom_fw) {
999 amdgpu_atomfirmware_scratch_regs_init(adev);
1000 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1001 } else {
1002 amdgpu_atombios_scratch_regs_init(adev);
1003 amdgpu_atombios_allocate_fb_scratch(adev);
1004 }
d38ceaf9
AD
1005 return 0;
1006}
1007
1008/* if we get transitioned to only one device, take VGA back */
1009/**
1010 * amdgpu_vga_set_decode - enable/disable vga decode
1011 *
1012 * @cookie: amdgpu_device pointer
1013 * @state: enable/disable vga decode
1014 *
1015 * Enable/disable vga decode (all asics).
1016 * Returns VGA resource flags.
1017 */
1018static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1019{
1020 struct amdgpu_device *adev = cookie;
1021 amdgpu_asic_set_vga_state(adev, state);
1022 if (state)
1023 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1024 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1025 else
1026 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1027}
1028
1029/**
1030 * amdgpu_check_pot_argument - check that argument is a power of two
1031 *
1032 * @arg: value to check
1033 *
1034 * Validates that a certain argument is a power of two (all asics).
1035 * Returns true if argument is valid.
1036 */
1037static bool amdgpu_check_pot_argument(int arg)
1038{
1039 return (arg & (arg - 1)) == 0;
1040}
1041
bab4fee7 1042static void amdgpu_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
1043{
1044 /* defines number of bits in page table versus page directory,
1045 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1046 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
1047 if (amdgpu_vm_block_size == -1)
1048 return;
a1adf8be 1049
bab4fee7 1050 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
1051 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1052 amdgpu_vm_block_size);
bab4fee7 1053 goto def_value;
a1adf8be
CZ
1054 }
1055
1056 if (amdgpu_vm_block_size > 24 ||
1057 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1058 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1059 amdgpu_vm_block_size);
bab4fee7 1060 goto def_value;
a1adf8be 1061 }
bab4fee7
JZ
1062
1063 return;
1064
1065def_value:
1066 amdgpu_vm_block_size = -1;
a1adf8be
CZ
1067}
1068
83ca145d
ZJ
1069static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1070{
1071 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
1072 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1073 amdgpu_vm_size);
1074 goto def_value;
1075 }
1076
1077 if (amdgpu_vm_size < 1) {
1078 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1079 amdgpu_vm_size);
1080 goto def_value;
1081 }
1082
1083 /*
1084 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1085 */
1086 if (amdgpu_vm_size > 1024) {
1087 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1088 amdgpu_vm_size);
1089 goto def_value;
1090 }
1091
1092 return;
1093
1094def_value:
bab4fee7 1095 amdgpu_vm_size = -1;
83ca145d
ZJ
1096}
1097
d38ceaf9
AD
1098/**
1099 * amdgpu_check_arguments - validate module params
1100 *
1101 * @adev: amdgpu_device pointer
1102 *
1103 * Validates certain module parameters and updates
1104 * the associated values used by the driver (all asics).
1105 */
1106static void amdgpu_check_arguments(struct amdgpu_device *adev)
1107{
5b011235
CZ
1108 if (amdgpu_sched_jobs < 4) {
1109 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1110 amdgpu_sched_jobs);
1111 amdgpu_sched_jobs = 4;
1112 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
1113 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1114 amdgpu_sched_jobs);
1115 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1116 }
d38ceaf9
AD
1117
1118 if (amdgpu_gart_size != -1) {
c4e1a13a 1119 /* gtt size must be greater or equal to 32M */
d38ceaf9
AD
1120 if (amdgpu_gart_size < 32) {
1121 dev_warn(adev->dev, "gart size (%d) too small\n",
1122 amdgpu_gart_size);
1123 amdgpu_gart_size = -1;
d38ceaf9
AD
1124 }
1125 }
1126
83ca145d 1127 amdgpu_check_vm_size(adev);
d38ceaf9 1128
bab4fee7 1129 amdgpu_check_block_size(adev);
6a7f76e7 1130
526bae37 1131 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1132 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
6a7f76e7
CK
1133 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1134 amdgpu_vram_page_split);
1135 amdgpu_vram_page_split = 1024;
1136 }
d38ceaf9
AD
1137}
1138
1139/**
1140 * amdgpu_switcheroo_set_state - set switcheroo state
1141 *
1142 * @pdev: pci dev pointer
1694467b 1143 * @state: vga_switcheroo state
d38ceaf9
AD
1144 *
1145 * Callback for the switcheroo driver. Suspends or resumes the
1146 * the asics before or after it is powered up using ACPI methods.
1147 */
1148static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1149{
1150 struct drm_device *dev = pci_get_drvdata(pdev);
1151
1152 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1153 return;
1154
1155 if (state == VGA_SWITCHEROO_ON) {
1156 unsigned d3_delay = dev->pdev->d3_delay;
1157
7ca85295 1158 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1159 /* don't suspend or resume card normally */
1160 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1161
810ddc3a 1162 amdgpu_device_resume(dev, true, true);
d38ceaf9
AD
1163
1164 dev->pdev->d3_delay = d3_delay;
1165
1166 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1167 drm_kms_helper_poll_enable(dev);
1168 } else {
7ca85295 1169 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1170 drm_kms_helper_poll_disable(dev);
1171 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1172 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1173 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1174 }
1175}
1176
1177/**
1178 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1179 *
1180 * @pdev: pci dev pointer
1181 *
1182 * Callback for the switcheroo driver. Check of the switcheroo
1183 * state can be changed.
1184 * Returns true if the state can be changed, false if not.
1185 */
1186static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1187{
1188 struct drm_device *dev = pci_get_drvdata(pdev);
1189
1190 /*
1191 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1192 * locking inversion with the driver load path. And the access here is
1193 * completely racy anyway. So don't bother with locking for now.
1194 */
1195 return dev->open_count == 0;
1196}
1197
1198static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1199 .set_gpu_state = amdgpu_switcheroo_set_state,
1200 .reprobe = NULL,
1201 .can_switch = amdgpu_switcheroo_can_switch,
1202};
1203
1204int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
5fc3aeeb 1205 enum amd_ip_block_type block_type,
1206 enum amd_clockgating_state state)
d38ceaf9
AD
1207{
1208 int i, r = 0;
1209
1210 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1211 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1212 continue;
c722865a
RZ
1213 if (adev->ip_blocks[i].version->type != block_type)
1214 continue;
1215 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1216 continue;
1217 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1218 (void *)adev, state);
1219 if (r)
1220 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1221 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1222 }
1223 return r;
1224}
1225
1226int amdgpu_set_powergating_state(struct amdgpu_device *adev,
5fc3aeeb 1227 enum amd_ip_block_type block_type,
1228 enum amd_powergating_state state)
d38ceaf9
AD
1229{
1230 int i, r = 0;
1231
1232 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1233 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1234 continue;
c722865a
RZ
1235 if (adev->ip_blocks[i].version->type != block_type)
1236 continue;
1237 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1238 continue;
1239 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1240 (void *)adev, state);
1241 if (r)
1242 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1243 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1244 }
1245 return r;
1246}
1247
6cb2d4e4
HR
1248void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1249{
1250 int i;
1251
1252 for (i = 0; i < adev->num_ip_blocks; i++) {
1253 if (!adev->ip_blocks[i].status.valid)
1254 continue;
1255 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1256 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1257 }
1258}
1259
5dbbb60b
AD
1260int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1261 enum amd_ip_block_type block_type)
1262{
1263 int i, r;
1264
1265 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1266 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1267 continue;
a1255107
AD
1268 if (adev->ip_blocks[i].version->type == block_type) {
1269 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1270 if (r)
1271 return r;
1272 break;
1273 }
1274 }
1275 return 0;
1276
1277}
1278
1279bool amdgpu_is_idle(struct amdgpu_device *adev,
1280 enum amd_ip_block_type block_type)
1281{
1282 int i;
1283
1284 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1285 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1286 continue;
a1255107
AD
1287 if (adev->ip_blocks[i].version->type == block_type)
1288 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1289 }
1290 return true;
1291
1292}
1293
a1255107
AD
1294struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1295 enum amd_ip_block_type type)
d38ceaf9
AD
1296{
1297 int i;
1298
1299 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1300 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1301 return &adev->ip_blocks[i];
1302
1303 return NULL;
1304}
1305
1306/**
1307 * amdgpu_ip_block_version_cmp
1308 *
1309 * @adev: amdgpu_device pointer
5fc3aeeb 1310 * @type: enum amd_ip_block_type
d38ceaf9
AD
1311 * @major: major version
1312 * @minor: minor version
1313 *
1314 * return 0 if equal or greater
1315 * return 1 if smaller or the ip_block doesn't exist
1316 */
1317int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
5fc3aeeb 1318 enum amd_ip_block_type type,
d38ceaf9
AD
1319 u32 major, u32 minor)
1320{
a1255107 1321 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
d38ceaf9 1322
a1255107
AD
1323 if (ip_block && ((ip_block->version->major > major) ||
1324 ((ip_block->version->major == major) &&
1325 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1326 return 0;
1327
1328 return 1;
1329}
1330
a1255107
AD
1331/**
1332 * amdgpu_ip_block_add
1333 *
1334 * @adev: amdgpu_device pointer
1335 * @ip_block_version: pointer to the IP to add
1336 *
1337 * Adds the IP block driver information to the collection of IPs
1338 * on the asic.
1339 */
1340int amdgpu_ip_block_add(struct amdgpu_device *adev,
1341 const struct amdgpu_ip_block_version *ip_block_version)
1342{
1343 if (!ip_block_version)
1344 return -EINVAL;
1345
1346 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1347
1348 return 0;
1349}
1350
483ef985 1351static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1352{
1353 adev->enable_virtual_display = false;
1354
1355 if (amdgpu_virtual_display) {
1356 struct drm_device *ddev = adev->ddev;
1357 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1358 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1359
1360 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1361 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1362 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1363 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1364 if (!strcmp("all", pciaddname)
1365 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1366 long num_crtc;
1367 int res = -1;
1368
9accf2fd 1369 adev->enable_virtual_display = true;
0f66356d
ED
1370
1371 if (pciaddname_tmp)
1372 res = kstrtol(pciaddname_tmp, 10,
1373 &num_crtc);
1374
1375 if (!res) {
1376 if (num_crtc < 1)
1377 num_crtc = 1;
1378 if (num_crtc > 6)
1379 num_crtc = 6;
1380 adev->mode_info.num_crtc = num_crtc;
1381 } else {
1382 adev->mode_info.num_crtc = 1;
1383 }
9accf2fd
ED
1384 break;
1385 }
1386 }
1387
0f66356d
ED
1388 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1389 amdgpu_virtual_display, pci_address_name,
1390 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1391
1392 kfree(pciaddstr);
1393 }
1394}
1395
e2a75f88
AD
1396static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1397{
1398 const struct firmware *fw;
1399 const char *chip_name;
1400 char fw_name[30];
1401 int err;
1402 const struct gpu_info_firmware_header_v1_0 *hdr;
1403
1404 switch (adev->asic_type) {
1405 case CHIP_TOPAZ:
1406 case CHIP_TONGA:
1407 case CHIP_FIJI:
1408 case CHIP_POLARIS11:
1409 case CHIP_POLARIS10:
1410 case CHIP_POLARIS12:
1411 case CHIP_CARRIZO:
1412 case CHIP_STONEY:
1413#ifdef CONFIG_DRM_AMDGPU_SI
1414 case CHIP_VERDE:
1415 case CHIP_TAHITI:
1416 case CHIP_PITCAIRN:
1417 case CHIP_OLAND:
1418 case CHIP_HAINAN:
1419#endif
1420#ifdef CONFIG_DRM_AMDGPU_CIK
1421 case CHIP_BONAIRE:
1422 case CHIP_HAWAII:
1423 case CHIP_KAVERI:
1424 case CHIP_KABINI:
1425 case CHIP_MULLINS:
1426#endif
1427 default:
1428 return 0;
1429 case CHIP_VEGA10:
1430 chip_name = "vega10";
1431 break;
1432 }
1433
1434 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1435 err = request_firmware(&fw, fw_name, adev->dev);
1436 if (err) {
1437 dev_err(adev->dev,
1438 "Failed to load gpu_info firmware \"%s\"\n",
1439 fw_name);
1440 goto out;
1441 }
1442 err = amdgpu_ucode_validate(fw);
1443 if (err) {
1444 dev_err(adev->dev,
1445 "Failed to validate gpu_info firmware \"%s\"\n",
1446 fw_name);
1447 goto out;
1448 }
1449
1450 hdr = (const struct gpu_info_firmware_header_v1_0 *)fw->data;
1451 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1452
1453 switch (hdr->version_major) {
1454 case 1:
1455 {
1456 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1457 (const struct gpu_info_firmware_v1_0 *)(fw->data +
1458 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1459
1460 adev->gfx.config.max_shader_engines = gpu_info_fw->gc_num_se;
1461 adev->gfx.config.max_cu_per_sh = gpu_info_fw->gc_num_cu_per_sh;
1462 adev->gfx.config.max_sh_per_se = gpu_info_fw->gc_num_sh_per_se;
1463 adev->gfx.config.max_backends_per_se = gpu_info_fw->gc_num_rb_per_se;
1464 adev->gfx.config.max_texture_channel_caches =
1465 gpu_info_fw->gc_num_tccs;
1466 adev->gfx.config.max_gprs = gpu_info_fw->gc_num_gprs;
1467 adev->gfx.config.max_gs_threads = gpu_info_fw->gc_num_max_gs_thds;
1468 adev->gfx.config.gs_vgt_table_depth = gpu_info_fw->gc_gs_table_depth;
1469 adev->gfx.config.gs_prim_buffer_depth = gpu_info_fw->gc_gsprim_buff_depth;
1470 adev->gfx.config.double_offchip_lds_buf =
1471 gpu_info_fw->gc_double_offchip_lds_buffer;
1472 adev->gfx.cu_info.wave_front_size = gpu_info_fw->gc_wave_size;
1473 break;
1474 }
1475 default:
1476 dev_err(adev->dev,
1477 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1478 err = -EINVAL;
1479 goto out;
1480 }
1481out:
1482 release_firmware(fw);
1483 fw = NULL;
1484
1485 return err;
1486}
1487
d38ceaf9
AD
1488static int amdgpu_early_init(struct amdgpu_device *adev)
1489{
aaa36a97 1490 int i, r;
d38ceaf9 1491
483ef985 1492 amdgpu_device_enable_virtual_display(adev);
a6be7570 1493
d38ceaf9 1494 switch (adev->asic_type) {
aaa36a97
AD
1495 case CHIP_TOPAZ:
1496 case CHIP_TONGA:
48299f95 1497 case CHIP_FIJI:
2cc0c0b5
FC
1498 case CHIP_POLARIS11:
1499 case CHIP_POLARIS10:
c4642a47 1500 case CHIP_POLARIS12:
aaa36a97 1501 case CHIP_CARRIZO:
39bb0c92
SL
1502 case CHIP_STONEY:
1503 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1504 adev->family = AMDGPU_FAMILY_CZ;
1505 else
1506 adev->family = AMDGPU_FAMILY_VI;
1507
1508 r = vi_set_ip_blocks(adev);
1509 if (r)
1510 return r;
1511 break;
33f34802
KW
1512#ifdef CONFIG_DRM_AMDGPU_SI
1513 case CHIP_VERDE:
1514 case CHIP_TAHITI:
1515 case CHIP_PITCAIRN:
1516 case CHIP_OLAND:
1517 case CHIP_HAINAN:
295d0daf 1518 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1519 r = si_set_ip_blocks(adev);
1520 if (r)
1521 return r;
1522 break;
1523#endif
a2e73f56
AD
1524#ifdef CONFIG_DRM_AMDGPU_CIK
1525 case CHIP_BONAIRE:
1526 case CHIP_HAWAII:
1527 case CHIP_KAVERI:
1528 case CHIP_KABINI:
1529 case CHIP_MULLINS:
1530 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1531 adev->family = AMDGPU_FAMILY_CI;
1532 else
1533 adev->family = AMDGPU_FAMILY_KV;
1534
1535 r = cik_set_ip_blocks(adev);
1536 if (r)
1537 return r;
1538 break;
1539#endif
460826e6
KW
1540 case CHIP_VEGA10:
1541 adev->family = AMDGPU_FAMILY_AI;
1542
1543 r = soc15_set_ip_blocks(adev);
1544 if (r)
1545 return r;
1546 break;
d38ceaf9
AD
1547 default:
1548 /* FIXME: not supported yet */
1549 return -EINVAL;
1550 }
1551
e2a75f88
AD
1552 r = amdgpu_device_parse_gpu_info_fw(adev);
1553 if (r)
1554 return r;
1555
3149d9da
XY
1556 if (amdgpu_sriov_vf(adev)) {
1557 r = amdgpu_virt_request_full_gpu(adev, true);
1558 if (r)
1559 return r;
1560 }
1561
d38ceaf9
AD
1562 for (i = 0; i < adev->num_ip_blocks; i++) {
1563 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1564 DRM_ERROR("disabled ip block: %d\n", i);
a1255107 1565 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1566 } else {
a1255107
AD
1567 if (adev->ip_blocks[i].version->funcs->early_init) {
1568 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1569 if (r == -ENOENT) {
a1255107 1570 adev->ip_blocks[i].status.valid = false;
2c1a2784 1571 } else if (r) {
a1255107
AD
1572 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1573 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1574 return r;
2c1a2784 1575 } else {
a1255107 1576 adev->ip_blocks[i].status.valid = true;
2c1a2784 1577 }
974e6b64 1578 } else {
a1255107 1579 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1580 }
d38ceaf9
AD
1581 }
1582 }
1583
395d1fb9
NH
1584 adev->cg_flags &= amdgpu_cg_mask;
1585 adev->pg_flags &= amdgpu_pg_mask;
1586
d38ceaf9
AD
1587 return 0;
1588}
1589
1590static int amdgpu_init(struct amdgpu_device *adev)
1591{
1592 int i, r;
1593
1594 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1595 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1596 continue;
a1255107 1597 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1598 if (r) {
a1255107
AD
1599 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1600 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1601 return r;
2c1a2784 1602 }
a1255107 1603 adev->ip_blocks[i].status.sw = true;
d38ceaf9 1604 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1605 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9 1606 r = amdgpu_vram_scratch_init(adev);
2c1a2784
AD
1607 if (r) {
1608 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1609 return r;
2c1a2784 1610 }
a1255107 1611 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1612 if (r) {
1613 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1614 return r;
2c1a2784 1615 }
d38ceaf9 1616 r = amdgpu_wb_init(adev);
2c1a2784
AD
1617 if (r) {
1618 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
d38ceaf9 1619 return r;
2c1a2784 1620 }
a1255107 1621 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1622
1623 /* right after GMC hw init, we create CSA */
1624 if (amdgpu_sriov_vf(adev)) {
1625 r = amdgpu_allocate_static_csa(adev);
1626 if (r) {
1627 DRM_ERROR("allocate CSA failed %d\n", r);
1628 return r;
1629 }
1630 }
d38ceaf9
AD
1631 }
1632 }
1633
1634 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1635 if (!adev->ip_blocks[i].status.sw)
d38ceaf9
AD
1636 continue;
1637 /* gmc hw init is done early */
a1255107 1638 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
d38ceaf9 1639 continue;
a1255107 1640 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1641 if (r) {
a1255107
AD
1642 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1643 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1644 return r;
2c1a2784 1645 }
a1255107 1646 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1647 }
1648
1649 return 0;
1650}
1651
1652static int amdgpu_late_init(struct amdgpu_device *adev)
1653{
1654 int i = 0, r;
1655
1656 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1657 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1658 continue;
a1255107
AD
1659 if (adev->ip_blocks[i].version->funcs->late_init) {
1660 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2c1a2784 1661 if (r) {
a1255107
AD
1662 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1663 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1664 return r;
2c1a2784 1665 }
a1255107 1666 adev->ip_blocks[i].status.late_initialized = true;
d38ceaf9 1667 }
4a446d55 1668 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1669 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1670 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1671 /* enable clockgating to save power */
a1255107
AD
1672 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1673 AMD_CG_STATE_GATE);
4a446d55
AD
1674 if (r) {
1675 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1676 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1677 return r;
1678 }
b0b00ff1 1679 }
d38ceaf9
AD
1680 }
1681
1682 return 0;
1683}
1684
1685static int amdgpu_fini(struct amdgpu_device *adev)
1686{
1687 int i, r;
1688
3e96dbfd
AD
1689 /* need to disable SMC first */
1690 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1691 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1692 continue;
a1255107 1693 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1694 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1695 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1696 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1697 if (r) {
1698 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1699 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1700 return r;
1701 }
a1255107 1702 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1703 /* XXX handle errors */
1704 if (r) {
1705 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1706 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1707 }
a1255107 1708 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1709 break;
1710 }
1711 }
1712
d38ceaf9 1713 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1714 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1715 continue;
a1255107 1716 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9
AD
1717 amdgpu_wb_fini(adev);
1718 amdgpu_vram_scratch_fini(adev);
1719 }
8201a67a
RZ
1720
1721 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1722 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1723 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1724 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1725 AMD_CG_STATE_UNGATE);
1726 if (r) {
1727 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1728 adev->ip_blocks[i].version->funcs->name, r);
1729 return r;
1730 }
2c1a2784 1731 }
8201a67a 1732
a1255107 1733 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1734 /* XXX handle errors */
2c1a2784 1735 if (r) {
a1255107
AD
1736 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1737 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1738 }
8201a67a 1739
a1255107 1740 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1741 }
1742
1743 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1744 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1745 continue;
a1255107 1746 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1747 /* XXX handle errors */
2c1a2784 1748 if (r) {
a1255107
AD
1749 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1750 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1751 }
a1255107
AD
1752 adev->ip_blocks[i].status.sw = false;
1753 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1754 }
1755
a6dcfd9c 1756 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1757 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1758 continue;
a1255107
AD
1759 if (adev->ip_blocks[i].version->funcs->late_fini)
1760 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1761 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1762 }
1763
3149d9da 1764 if (amdgpu_sriov_vf(adev)) {
2493664f 1765 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
3149d9da
XY
1766 amdgpu_virt_release_full_gpu(adev, false);
1767 }
2493664f 1768
d38ceaf9
AD
1769 return 0;
1770}
1771
faefba95 1772int amdgpu_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1773{
1774 int i, r;
1775
e941ea99
XY
1776 if (amdgpu_sriov_vf(adev))
1777 amdgpu_virt_request_full_gpu(adev, false);
1778
c5a93a28
FC
1779 /* ungate SMC block first */
1780 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1781 AMD_CG_STATE_UNGATE);
1782 if (r) {
1783 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1784 }
1785
d38ceaf9 1786 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1787 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1788 continue;
1789 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1790 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1791 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1792 AMD_CG_STATE_UNGATE);
c5a93a28 1793 if (r) {
a1255107
AD
1794 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1795 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1796 }
2c1a2784 1797 }
d38ceaf9 1798 /* XXX handle errors */
a1255107 1799 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1800 /* XXX handle errors */
2c1a2784 1801 if (r) {
a1255107
AD
1802 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1803 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1804 }
d38ceaf9
AD
1805 }
1806
e941ea99
XY
1807 if (amdgpu_sriov_vf(adev))
1808 amdgpu_virt_release_full_gpu(adev, false);
1809
d38ceaf9
AD
1810 return 0;
1811}
1812
e4f0fdcc 1813static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
a90ad3c2
ML
1814{
1815 int i, r;
1816
2cb681b6
ML
1817 static enum amd_ip_block_type ip_order[] = {
1818 AMD_IP_BLOCK_TYPE_GMC,
1819 AMD_IP_BLOCK_TYPE_COMMON,
1820 AMD_IP_BLOCK_TYPE_GFXHUB,
1821 AMD_IP_BLOCK_TYPE_MMHUB,
1822 AMD_IP_BLOCK_TYPE_IH,
1823 };
1824
1825 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1826 int j;
1827 struct amdgpu_ip_block *block;
1828
1829 for (j = 0; j < adev->num_ip_blocks; j++) {
1830 block = &adev->ip_blocks[j];
1831
1832 if (block->version->type != ip_order[i] ||
1833 !block->status.valid)
1834 continue;
1835
1836 r = block->version->funcs->hw_init(adev);
1837 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1838 }
1839 }
1840
1841 return 0;
1842}
1843
e4f0fdcc 1844static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
a90ad3c2
ML
1845{
1846 int i, r;
1847
2cb681b6
ML
1848 static enum amd_ip_block_type ip_order[] = {
1849 AMD_IP_BLOCK_TYPE_SMC,
1850 AMD_IP_BLOCK_TYPE_DCE,
1851 AMD_IP_BLOCK_TYPE_GFX,
1852 AMD_IP_BLOCK_TYPE_SDMA,
1853 AMD_IP_BLOCK_TYPE_VCE,
1854 };
a90ad3c2 1855
2cb681b6
ML
1856 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1857 int j;
1858 struct amdgpu_ip_block *block;
a90ad3c2 1859
2cb681b6
ML
1860 for (j = 0; j < adev->num_ip_blocks; j++) {
1861 block = &adev->ip_blocks[j];
1862
1863 if (block->version->type != ip_order[i] ||
1864 !block->status.valid)
1865 continue;
1866
1867 r = block->version->funcs->hw_init(adev);
1868 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1869 }
1870 }
1871
1872 return 0;
1873}
1874
d38ceaf9
AD
1875static int amdgpu_resume(struct amdgpu_device *adev)
1876{
1877 int i, r;
1878
1879 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1880 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1881 continue;
a1255107 1882 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 1883 if (r) {
a1255107
AD
1884 DRM_ERROR("resume of IP block <%s> failed %d\n",
1885 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1886 return r;
2c1a2784 1887 }
d38ceaf9
AD
1888 }
1889
1890 return 0;
1891}
1892
4e99a44e 1893static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 1894{
a5bde2f9
AD
1895 if (adev->is_atom_fw) {
1896 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1897 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1898 } else {
1899 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1900 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1901 }
048765ad
AR
1902}
1903
d38ceaf9
AD
1904/**
1905 * amdgpu_device_init - initialize the driver
1906 *
1907 * @adev: amdgpu_device pointer
1908 * @pdev: drm dev pointer
1909 * @pdev: pci dev pointer
1910 * @flags: driver flags
1911 *
1912 * Initializes the driver info and hw (all asics).
1913 * Returns 0 for success or an error on failure.
1914 * Called at driver startup.
1915 */
1916int amdgpu_device_init(struct amdgpu_device *adev,
1917 struct drm_device *ddev,
1918 struct pci_dev *pdev,
1919 uint32_t flags)
1920{
1921 int r, i;
1922 bool runtime = false;
95844d20 1923 u32 max_MBps;
d38ceaf9
AD
1924
1925 adev->shutdown = false;
1926 adev->dev = &pdev->dev;
1927 adev->ddev = ddev;
1928 adev->pdev = pdev;
1929 adev->flags = flags;
2f7d10b3 1930 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9
AD
1931 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1932 adev->mc.gtt_size = 512 * 1024 * 1024;
1933 adev->accel_working = false;
1934 adev->num_rings = 0;
1935 adev->mman.buffer_funcs = NULL;
1936 adev->mman.buffer_funcs_ring = NULL;
1937 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 1938 adev->vm_manager.vm_pte_num_rings = 0;
d38ceaf9 1939 adev->gart.gart_funcs = NULL;
f54d1867 1940 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
d38ceaf9
AD
1941
1942 adev->smc_rreg = &amdgpu_invalid_rreg;
1943 adev->smc_wreg = &amdgpu_invalid_wreg;
1944 adev->pcie_rreg = &amdgpu_invalid_rreg;
1945 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
1946 adev->pciep_rreg = &amdgpu_invalid_rreg;
1947 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1948 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1949 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1950 adev->didt_rreg = &amdgpu_invalid_rreg;
1951 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
1952 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1953 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1954 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1955 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1956
ccdbb20a 1957
3e39ab90
AD
1958 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1959 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1960 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
1961
1962 /* mutex initialization are all done here so we
1963 * can recall function without having locking issues */
d38ceaf9 1964 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 1965 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
1966 mutex_init(&adev->pm.mutex);
1967 mutex_init(&adev->gfx.gpu_clock_mutex);
1968 mutex_init(&adev->srbm_mutex);
1969 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9
AD
1970 mutex_init(&adev->mn_lock);
1971 hash_init(adev->mn_hash);
1972
1973 amdgpu_check_arguments(adev);
1974
1975 /* Registers mapping */
1976 /* TODO: block userspace mapping of io register */
1977 spin_lock_init(&adev->mmio_idx_lock);
1978 spin_lock_init(&adev->smc_idx_lock);
1979 spin_lock_init(&adev->pcie_idx_lock);
1980 spin_lock_init(&adev->uvd_ctx_idx_lock);
1981 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 1982 spin_lock_init(&adev->gc_cac_idx_lock);
d38ceaf9 1983 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 1984 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 1985
0c4e7fa5
CZ
1986 INIT_LIST_HEAD(&adev->shadow_list);
1987 mutex_init(&adev->shadow_list_lock);
1988
5c1354bd
CZ
1989 INIT_LIST_HEAD(&adev->gtt_list);
1990 spin_lock_init(&adev->gtt_list_lock);
1991
da69c161
KW
1992 if (adev->asic_type >= CHIP_BONAIRE) {
1993 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1994 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1995 } else {
1996 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1997 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1998 }
d38ceaf9 1999
d38ceaf9
AD
2000 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2001 if (adev->rmmio == NULL) {
2002 return -ENOMEM;
2003 }
2004 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2005 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2006
da69c161
KW
2007 if (adev->asic_type >= CHIP_BONAIRE)
2008 /* doorbell bar mapping */
2009 amdgpu_doorbell_init(adev);
d38ceaf9
AD
2010
2011 /* io port mapping */
2012 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2013 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2014 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2015 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2016 break;
2017 }
2018 }
2019 if (adev->rio_mem == NULL)
b64a18c5 2020 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
2021
2022 /* early init functions */
2023 r = amdgpu_early_init(adev);
2024 if (r)
2025 return r;
2026
2027 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2028 /* this will fail for cards that aren't VGA class devices, just
2029 * ignore it */
2030 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2031
2032 if (amdgpu_runtime_pm == 1)
2033 runtime = true;
e9bef455 2034 if (amdgpu_device_is_px(ddev))
d38ceaf9 2035 runtime = true;
84c8b22e
LW
2036 if (!pci_is_thunderbolt_attached(adev->pdev))
2037 vga_switcheroo_register_client(adev->pdev,
2038 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
2039 if (runtime)
2040 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2041
2042 /* Read BIOS */
83ba126a
AD
2043 if (!amdgpu_get_bios(adev)) {
2044 r = -EINVAL;
2045 goto failed;
2046 }
f7e9e9fe 2047
d38ceaf9 2048 r = amdgpu_atombios_init(adev);
2c1a2784
AD
2049 if (r) {
2050 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
83ba126a 2051 goto failed;
2c1a2784 2052 }
d38ceaf9 2053
4e99a44e
ML
2054 /* detect if we are with an SRIOV vbios */
2055 amdgpu_device_detect_sriov_bios(adev);
048765ad 2056
d38ceaf9 2057 /* Post card if necessary */
bec86378 2058 if (amdgpu_vpost_needed(adev)) {
d38ceaf9 2059 if (!adev->bios) {
bec86378 2060 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
2061 r = -EINVAL;
2062 goto failed;
d38ceaf9 2063 }
bec86378 2064 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
2065 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2066 if (r) {
2067 dev_err(adev->dev, "gpu post error!\n");
2068 goto failed;
2069 }
2070 } else {
2071 DRM_INFO("GPU post is not needed\n");
d38ceaf9
AD
2072 }
2073
a5bde2f9
AD
2074 if (!adev->is_atom_fw) {
2075 /* Initialize clocks */
2076 r = amdgpu_atombios_get_clock_info(adev);
2077 if (r) {
2078 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2079 return r;
2080 }
2081 /* init i2c buses */
2082 amdgpu_atombios_i2c_init(adev);
2c1a2784 2083 }
d38ceaf9
AD
2084
2085 /* Fence driver */
2086 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
2087 if (r) {
2088 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
83ba126a 2089 goto failed;
2c1a2784 2090 }
d38ceaf9
AD
2091
2092 /* init the mode config */
2093 drm_mode_config_init(adev->ddev);
2094
2095 r = amdgpu_init(adev);
2096 if (r) {
2c1a2784 2097 dev_err(adev->dev, "amdgpu_init failed\n");
d38ceaf9 2098 amdgpu_fini(adev);
83ba126a 2099 goto failed;
d38ceaf9
AD
2100 }
2101
2102 adev->accel_working = true;
2103
95844d20
MO
2104 /* Initialize the buffer migration limit. */
2105 if (amdgpu_moverate >= 0)
2106 max_MBps = amdgpu_moverate;
2107 else
2108 max_MBps = 8; /* Allow 8 MB/s. */
2109 /* Get a log2 for easy divisions. */
2110 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2111
d38ceaf9
AD
2112 r = amdgpu_ib_pool_init(adev);
2113 if (r) {
2114 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
83ba126a 2115 goto failed;
d38ceaf9
AD
2116 }
2117
2118 r = amdgpu_ib_ring_tests(adev);
2119 if (r)
2120 DRM_ERROR("ib ring test failed (%d).\n", r);
2121
9bc92b9c
ML
2122 amdgpu_fbdev_init(adev);
2123
d38ceaf9 2124 r = amdgpu_gem_debugfs_init(adev);
3f14e623 2125 if (r)
d38ceaf9 2126 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2127
2128 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2129 if (r)
d38ceaf9 2130 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2131
50ab2533 2132 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2133 if (r)
50ab2533 2134 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2135
d38ceaf9
AD
2136 if ((amdgpu_testing & 1)) {
2137 if (adev->accel_working)
2138 amdgpu_test_moves(adev);
2139 else
2140 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2141 }
d38ceaf9
AD
2142 if (amdgpu_benchmarking) {
2143 if (adev->accel_working)
2144 amdgpu_benchmark(adev, amdgpu_benchmarking);
2145 else
2146 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2147 }
2148
2149 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2150 * explicit gating rather than handling it automatically.
2151 */
2152 r = amdgpu_late_init(adev);
2c1a2784
AD
2153 if (r) {
2154 dev_err(adev->dev, "amdgpu_late_init failed\n");
83ba126a 2155 goto failed;
2c1a2784 2156 }
d38ceaf9
AD
2157
2158 return 0;
83ba126a
AD
2159
2160failed:
2161 if (runtime)
2162 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2163 return r;
d38ceaf9
AD
2164}
2165
d38ceaf9
AD
2166/**
2167 * amdgpu_device_fini - tear down the driver
2168 *
2169 * @adev: amdgpu_device pointer
2170 *
2171 * Tear down the driver info (all asics).
2172 * Called at driver shutdown.
2173 */
2174void amdgpu_device_fini(struct amdgpu_device *adev)
2175{
2176 int r;
2177
2178 DRM_INFO("amdgpu: finishing device.\n");
2179 adev->shutdown = true;
db2c2a97
PD
2180 if (adev->mode_info.mode_config_initialized)
2181 drm_crtc_force_disable_all(adev->ddev);
d38ceaf9
AD
2182 /* evict vram memory */
2183 amdgpu_bo_evict_vram(adev);
2184 amdgpu_ib_pool_fini(adev);
2185 amdgpu_fence_driver_fini(adev);
2186 amdgpu_fbdev_fini(adev);
2187 r = amdgpu_fini(adev);
d38ceaf9
AD
2188 adev->accel_working = false;
2189 /* free i2c buses */
2190 amdgpu_i2c_fini(adev);
2191 amdgpu_atombios_fini(adev);
2192 kfree(adev->bios);
2193 adev->bios = NULL;
84c8b22e
LW
2194 if (!pci_is_thunderbolt_attached(adev->pdev))
2195 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2196 if (adev->flags & AMD_IS_PX)
2197 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2198 vga_client_register(adev->pdev, NULL, NULL, NULL);
2199 if (adev->rio_mem)
2200 pci_iounmap(adev->pdev, adev->rio_mem);
2201 adev->rio_mem = NULL;
2202 iounmap(adev->rmmio);
2203 adev->rmmio = NULL;
da69c161
KW
2204 if (adev->asic_type >= CHIP_BONAIRE)
2205 amdgpu_doorbell_fini(adev);
d38ceaf9 2206 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2207}
2208
2209
2210/*
2211 * Suspend & resume.
2212 */
2213/**
810ddc3a 2214 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2215 *
2216 * @pdev: drm dev pointer
2217 * @state: suspend state
2218 *
2219 * Puts the hw in the suspend state (all asics).
2220 * Returns 0 for success or an error on failure.
2221 * Called at driver suspend.
2222 */
810ddc3a 2223int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2224{
2225 struct amdgpu_device *adev;
2226 struct drm_crtc *crtc;
2227 struct drm_connector *connector;
5ceb54c6 2228 int r;
d38ceaf9
AD
2229
2230 if (dev == NULL || dev->dev_private == NULL) {
2231 return -ENODEV;
2232 }
2233
2234 adev = dev->dev_private;
2235
2236 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2237 return 0;
2238
2239 drm_kms_helper_poll_disable(dev);
2240
2241 /* turn off display hw */
4c7fbc39 2242 drm_modeset_lock_all(dev);
d38ceaf9
AD
2243 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2244 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2245 }
4c7fbc39 2246 drm_modeset_unlock_all(dev);
d38ceaf9 2247
756e6880 2248 /* unpin the front buffers and cursors */
d38ceaf9 2249 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2250 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2251 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2252 struct amdgpu_bo *robj;
2253
756e6880
AD
2254 if (amdgpu_crtc->cursor_bo) {
2255 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2256 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2257 if (r == 0) {
2258 amdgpu_bo_unpin(aobj);
2259 amdgpu_bo_unreserve(aobj);
2260 }
2261 }
2262
d38ceaf9
AD
2263 if (rfb == NULL || rfb->obj == NULL) {
2264 continue;
2265 }
2266 robj = gem_to_amdgpu_bo(rfb->obj);
2267 /* don't unpin kernel fb objects */
2268 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2269 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2270 if (r == 0) {
2271 amdgpu_bo_unpin(robj);
2272 amdgpu_bo_unreserve(robj);
2273 }
2274 }
2275 }
2276 /* evict vram memory */
2277 amdgpu_bo_evict_vram(adev);
2278
5ceb54c6 2279 amdgpu_fence_driver_suspend(adev);
d38ceaf9
AD
2280
2281 r = amdgpu_suspend(adev);
2282
a0a71e49
AD
2283 /* evict remaining vram memory
2284 * This second call to evict vram is to evict the gart page table
2285 * using the CPU.
2286 */
d38ceaf9
AD
2287 amdgpu_bo_evict_vram(adev);
2288
be34d3bf
AD
2289 if (adev->is_atom_fw)
2290 amdgpu_atomfirmware_scratch_regs_save(adev);
2291 else
2292 amdgpu_atombios_scratch_regs_save(adev);
d38ceaf9
AD
2293 pci_save_state(dev->pdev);
2294 if (suspend) {
2295 /* Shut down the device */
2296 pci_disable_device(dev->pdev);
2297 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2298 } else {
2299 r = amdgpu_asic_reset(adev);
2300 if (r)
2301 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2302 }
2303
2304 if (fbcon) {
2305 console_lock();
2306 amdgpu_fbdev_set_suspend(adev, 1);
2307 console_unlock();
2308 }
2309 return 0;
2310}
2311
2312/**
810ddc3a 2313 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2314 *
2315 * @pdev: drm dev pointer
2316 *
2317 * Bring the hw back to operating state (all asics).
2318 * Returns 0 for success or an error on failure.
2319 * Called at driver resume.
2320 */
810ddc3a 2321int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2322{
2323 struct drm_connector *connector;
2324 struct amdgpu_device *adev = dev->dev_private;
756e6880 2325 struct drm_crtc *crtc;
03161a6e 2326 int r = 0;
d38ceaf9
AD
2327
2328 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2329 return 0;
2330
74b0b157 2331 if (fbcon)
d38ceaf9 2332 console_lock();
74b0b157 2333
d38ceaf9
AD
2334 if (resume) {
2335 pci_set_power_state(dev->pdev, PCI_D0);
2336 pci_restore_state(dev->pdev);
74b0b157 2337 r = pci_enable_device(dev->pdev);
03161a6e
HR
2338 if (r)
2339 goto unlock;
d38ceaf9 2340 }
be34d3bf
AD
2341 if (adev->is_atom_fw)
2342 amdgpu_atomfirmware_scratch_regs_restore(adev);
2343 else
2344 amdgpu_atombios_scratch_regs_restore(adev);
d38ceaf9
AD
2345
2346 /* post card */
c836fec5 2347 if (amdgpu_need_post(adev)) {
74b0b157 2348 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2349 if (r)
2350 DRM_ERROR("amdgpu asic init failed\n");
2351 }
d38ceaf9
AD
2352
2353 r = amdgpu_resume(adev);
e6707218 2354 if (r) {
ca198528 2355 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
03161a6e 2356 goto unlock;
e6707218 2357 }
5ceb54c6
AD
2358 amdgpu_fence_driver_resume(adev);
2359
ca198528
FC
2360 if (resume) {
2361 r = amdgpu_ib_ring_tests(adev);
2362 if (r)
2363 DRM_ERROR("ib ring test failed (%d).\n", r);
2364 }
d38ceaf9
AD
2365
2366 r = amdgpu_late_init(adev);
03161a6e
HR
2367 if (r)
2368 goto unlock;
d38ceaf9 2369
756e6880
AD
2370 /* pin cursors */
2371 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2372 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2373
2374 if (amdgpu_crtc->cursor_bo) {
2375 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2376 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2377 if (r == 0) {
2378 r = amdgpu_bo_pin(aobj,
2379 AMDGPU_GEM_DOMAIN_VRAM,
2380 &amdgpu_crtc->cursor_addr);
2381 if (r != 0)
2382 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2383 amdgpu_bo_unreserve(aobj);
2384 }
2385 }
2386 }
2387
d38ceaf9
AD
2388 /* blat the mode back in */
2389 if (fbcon) {
2390 drm_helper_resume_force_mode(dev);
2391 /* turn on display hw */
4c7fbc39 2392 drm_modeset_lock_all(dev);
d38ceaf9
AD
2393 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2394 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2395 }
4c7fbc39 2396 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2397 }
2398
2399 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2400
2401 /*
2402 * Most of the connector probing functions try to acquire runtime pm
2403 * refs to ensure that the GPU is powered on when connector polling is
2404 * performed. Since we're calling this from a runtime PM callback,
2405 * trying to acquire rpm refs will cause us to deadlock.
2406 *
2407 * Since we're guaranteed to be holding the rpm lock, it's safe to
2408 * temporarily disable the rpm helpers so this doesn't deadlock us.
2409 */
2410#ifdef CONFIG_PM
2411 dev->dev->power.disable_depth++;
2412#endif
54fb2a5c 2413 drm_helper_hpd_irq_event(dev);
23a1a9e5
L
2414#ifdef CONFIG_PM
2415 dev->dev->power.disable_depth--;
2416#endif
d38ceaf9 2417
03161a6e 2418 if (fbcon)
d38ceaf9 2419 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2420
2421unlock:
2422 if (fbcon)
d38ceaf9 2423 console_unlock();
d38ceaf9 2424
03161a6e 2425 return r;
d38ceaf9
AD
2426}
2427
63fbf42f
CZ
2428static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2429{
2430 int i;
2431 bool asic_hang = false;
2432
2433 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2434 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2435 continue;
a1255107
AD
2436 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2437 adev->ip_blocks[i].status.hang =
2438 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2439 if (adev->ip_blocks[i].status.hang) {
2440 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2441 asic_hang = true;
2442 }
2443 }
2444 return asic_hang;
2445}
2446
4d446656 2447static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2448{
2449 int i, r = 0;
2450
2451 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2452 if (!adev->ip_blocks[i].status.valid)
d31a501e 2453 continue;
a1255107
AD
2454 if (adev->ip_blocks[i].status.hang &&
2455 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2456 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2457 if (r)
2458 return r;
2459 }
2460 }
2461
2462 return 0;
2463}
2464
35d782fe
CZ
2465static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2466{
da146d3b
AD
2467 int i;
2468
2469 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2470 if (!adev->ip_blocks[i].status.valid)
da146d3b 2471 continue;
a1255107
AD
2472 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2473 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2474 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2475 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2476 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2477 DRM_INFO("Some block need full reset!\n");
2478 return true;
2479 }
2480 }
35d782fe
CZ
2481 }
2482 return false;
2483}
2484
2485static int amdgpu_soft_reset(struct amdgpu_device *adev)
2486{
2487 int i, r = 0;
2488
2489 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2490 if (!adev->ip_blocks[i].status.valid)
35d782fe 2491 continue;
a1255107
AD
2492 if (adev->ip_blocks[i].status.hang &&
2493 adev->ip_blocks[i].version->funcs->soft_reset) {
2494 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2495 if (r)
2496 return r;
2497 }
2498 }
2499
2500 return 0;
2501}
2502
2503static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2504{
2505 int i, r = 0;
2506
2507 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2508 if (!adev->ip_blocks[i].status.valid)
35d782fe 2509 continue;
a1255107
AD
2510 if (adev->ip_blocks[i].status.hang &&
2511 adev->ip_blocks[i].version->funcs->post_soft_reset)
2512 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2513 if (r)
2514 return r;
2515 }
2516
2517 return 0;
2518}
2519
3ad81f16
CZ
2520bool amdgpu_need_backup(struct amdgpu_device *adev)
2521{
2522 if (adev->flags & AMD_IS_APU)
2523 return false;
2524
2525 return amdgpu_lockup_timeout > 0 ? true : false;
2526}
2527
53cdccd5
CZ
2528static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2529 struct amdgpu_ring *ring,
2530 struct amdgpu_bo *bo,
f54d1867 2531 struct dma_fence **fence)
53cdccd5
CZ
2532{
2533 uint32_t domain;
2534 int r;
2535
23d2e504
RH
2536 if (!bo->shadow)
2537 return 0;
2538
1d284797 2539 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2540 if (r)
2541 return r;
2542 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2543 /* if bo has been evicted, then no need to recover */
2544 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2545 r = amdgpu_bo_validate(bo->shadow);
2546 if (r) {
2547 DRM_ERROR("bo validate failed!\n");
2548 goto err;
2549 }
2550
2551 r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
2552 if (r) {
2553 DRM_ERROR("%p bind failed\n", bo->shadow);
2554 goto err;
2555 }
2556
23d2e504 2557 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2558 NULL, fence, true);
23d2e504
RH
2559 if (r) {
2560 DRM_ERROR("recover page table failed!\n");
2561 goto err;
2562 }
2563 }
53cdccd5 2564err:
23d2e504
RH
2565 amdgpu_bo_unreserve(bo);
2566 return r;
53cdccd5
CZ
2567}
2568
a90ad3c2
ML
2569/**
2570 * amdgpu_sriov_gpu_reset - reset the asic
2571 *
2572 * @adev: amdgpu device pointer
2573 * @voluntary: if this reset is requested by guest.
2574 * (true means by guest and false means by HYPERVISOR )
2575 *
2576 * Attempt the reset the GPU if it has hung (all asics).
2577 * for SRIOV case.
2578 * Returns 0 for success or an error on failure.
2579 */
2580int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
2581{
2582 int i, r = 0;
2583 int resched;
2584 struct amdgpu_bo *bo, *tmp;
2585 struct amdgpu_ring *ring;
2586 struct dma_fence *fence = NULL, *next = NULL;
2587
147b5983 2588 mutex_lock(&adev->virt.lock_reset);
a90ad3c2 2589 atomic_inc(&adev->gpu_reset_counter);
1fb37a3d 2590 adev->gfx.in_reset = true;
a90ad3c2
ML
2591
2592 /* block TTM */
2593 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2594
2595 /* block scheduler */
2596 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2597 ring = adev->rings[i];
2598
2599 if (!ring || !ring->sched.thread)
2600 continue;
2601
2602 kthread_park(ring->sched.thread);
2603 amd_sched_hw_job_reset(&ring->sched);
2604 }
2605
2606 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2607 amdgpu_fence_driver_force_completion(adev);
2608
2609 /* request to take full control of GPU before re-initialization */
2610 if (voluntary)
2611 amdgpu_virt_reset_gpu(adev);
2612 else
2613 amdgpu_virt_request_full_gpu(adev, true);
2614
2615
2616 /* Resume IP prior to SMC */
e4f0fdcc 2617 amdgpu_sriov_reinit_early(adev);
a90ad3c2
ML
2618
2619 /* we need recover gart prior to run SMC/CP/SDMA resume */
2620 amdgpu_ttm_recover_gart(adev);
2621
2622 /* now we are okay to resume SMC/CP/SDMA */
e4f0fdcc 2623 amdgpu_sriov_reinit_late(adev);
a90ad3c2
ML
2624
2625 amdgpu_irq_gpu_reset_resume_helper(adev);
2626
2627 if (amdgpu_ib_ring_tests(adev))
2628 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2629
2630 /* release full control of GPU after ib test */
2631 amdgpu_virt_release_full_gpu(adev, true);
2632
2633 DRM_INFO("recover vram bo from shadow\n");
2634
2635 ring = adev->mman.buffer_funcs_ring;
2636 mutex_lock(&adev->shadow_list_lock);
2637 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 2638 next = NULL;
a90ad3c2
ML
2639 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2640 if (fence) {
2641 r = dma_fence_wait(fence, false);
2642 if (r) {
2643 WARN(r, "recovery from shadow isn't completed\n");
2644 break;
2645 }
2646 }
2647
2648 dma_fence_put(fence);
2649 fence = next;
2650 }
2651 mutex_unlock(&adev->shadow_list_lock);
2652
2653 if (fence) {
2654 r = dma_fence_wait(fence, false);
2655 if (r)
2656 WARN(r, "recovery from shadow isn't completed\n");
2657 }
2658 dma_fence_put(fence);
2659
2660 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2661 struct amdgpu_ring *ring = adev->rings[i];
2662 if (!ring || !ring->sched.thread)
2663 continue;
2664
2665 amd_sched_job_recovery(&ring->sched);
2666 kthread_unpark(ring->sched.thread);
2667 }
2668
2669 drm_helper_resume_force_mode(adev->ddev);
2670 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2671 if (r) {
2672 /* bad news, how to tell it to userspace ? */
2673 dev_info(adev->dev, "GPU reset failed\n");
2674 }
2675
1fb37a3d 2676 adev->gfx.in_reset = false;
147b5983 2677 mutex_unlock(&adev->virt.lock_reset);
a90ad3c2
ML
2678 return r;
2679}
2680
d38ceaf9
AD
2681/**
2682 * amdgpu_gpu_reset - reset the asic
2683 *
2684 * @adev: amdgpu device pointer
2685 *
2686 * Attempt the reset the GPU if it has hung (all asics).
2687 * Returns 0 for success or an error on failure.
2688 */
2689int amdgpu_gpu_reset(struct amdgpu_device *adev)
2690{
d38ceaf9
AD
2691 int i, r;
2692 int resched;
35d782fe 2693 bool need_full_reset;
d38ceaf9 2694
fb140b29 2695 if (amdgpu_sriov_vf(adev))
a90ad3c2 2696 return amdgpu_sriov_gpu_reset(adev, true);
fb140b29 2697
63fbf42f
CZ
2698 if (!amdgpu_check_soft_reset(adev)) {
2699 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2700 return 0;
2701 }
d38ceaf9 2702
d94aed5a 2703 atomic_inc(&adev->gpu_reset_counter);
d38ceaf9 2704
a3c47d6b
CZ
2705 /* block TTM */
2706 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2707
0875dc9e
CZ
2708 /* block scheduler */
2709 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2710 struct amdgpu_ring *ring = adev->rings[i];
2711
51687759 2712 if (!ring || !ring->sched.thread)
0875dc9e
CZ
2713 continue;
2714 kthread_park(ring->sched.thread);
aa1c8900 2715 amd_sched_hw_job_reset(&ring->sched);
0875dc9e 2716 }
2200edac
CZ
2717 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2718 amdgpu_fence_driver_force_completion(adev);
d38ceaf9 2719
35d782fe 2720 need_full_reset = amdgpu_need_full_reset(adev);
d38ceaf9 2721
35d782fe
CZ
2722 if (!need_full_reset) {
2723 amdgpu_pre_soft_reset(adev);
2724 r = amdgpu_soft_reset(adev);
2725 amdgpu_post_soft_reset(adev);
2726 if (r || amdgpu_check_soft_reset(adev)) {
2727 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2728 need_full_reset = true;
2729 }
f1aa7e08
CZ
2730 }
2731
35d782fe 2732 if (need_full_reset) {
35d782fe 2733 r = amdgpu_suspend(adev);
bfa99269 2734
35d782fe
CZ
2735retry:
2736 /* Disable fb access */
2737 if (adev->mode_info.num_crtc) {
2738 struct amdgpu_mode_mc_save save;
2739 amdgpu_display_stop_mc_access(adev, &save);
2740 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2741 }
be34d3bf
AD
2742 if (adev->is_atom_fw)
2743 amdgpu_atomfirmware_scratch_regs_save(adev);
2744 else
2745 amdgpu_atombios_scratch_regs_save(adev);
35d782fe 2746 r = amdgpu_asic_reset(adev);
be34d3bf
AD
2747 if (adev->is_atom_fw)
2748 amdgpu_atomfirmware_scratch_regs_restore(adev);
2749 else
2750 amdgpu_atombios_scratch_regs_restore(adev);
35d782fe
CZ
2751 /* post card */
2752 amdgpu_atom_asic_init(adev->mode_info.atom_context);
2753
2754 if (!r) {
2755 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2756 r = amdgpu_resume(adev);
2757 }
d38ceaf9 2758 }
d38ceaf9 2759 if (!r) {
e72cfd58 2760 amdgpu_irq_gpu_reset_resume_helper(adev);
2c0d7318
CZ
2761 if (need_full_reset && amdgpu_need_backup(adev)) {
2762 r = amdgpu_ttm_recover_gart(adev);
2763 if (r)
2764 DRM_ERROR("gart recovery failed!!!\n");
2765 }
1f465087
CZ
2766 r = amdgpu_ib_ring_tests(adev);
2767 if (r) {
2768 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
40019dc4 2769 r = amdgpu_suspend(adev);
53cdccd5 2770 need_full_reset = true;
40019dc4 2771 goto retry;
1f465087 2772 }
53cdccd5
CZ
2773 /**
2774 * recovery vm page tables, since we cannot depend on VRAM is
2775 * consistent after gpu full reset.
2776 */
2777 if (need_full_reset && amdgpu_need_backup(adev)) {
2778 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2779 struct amdgpu_bo *bo, *tmp;
f54d1867 2780 struct dma_fence *fence = NULL, *next = NULL;
53cdccd5
CZ
2781
2782 DRM_INFO("recover vram bo from shadow\n");
2783 mutex_lock(&adev->shadow_list_lock);
2784 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 2785 next = NULL;
53cdccd5
CZ
2786 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2787 if (fence) {
f54d1867 2788 r = dma_fence_wait(fence, false);
53cdccd5 2789 if (r) {
1d7b17b0 2790 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5
CZ
2791 break;
2792 }
2793 }
1f465087 2794
f54d1867 2795 dma_fence_put(fence);
53cdccd5
CZ
2796 fence = next;
2797 }
2798 mutex_unlock(&adev->shadow_list_lock);
2799 if (fence) {
f54d1867 2800 r = dma_fence_wait(fence, false);
53cdccd5 2801 if (r)
1d7b17b0 2802 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5 2803 }
f54d1867 2804 dma_fence_put(fence);
53cdccd5 2805 }
d38ceaf9
AD
2806 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2807 struct amdgpu_ring *ring = adev->rings[i];
51687759
CZ
2808
2809 if (!ring || !ring->sched.thread)
d38ceaf9 2810 continue;
53cdccd5 2811
aa1c8900 2812 amd_sched_job_recovery(&ring->sched);
0875dc9e 2813 kthread_unpark(ring->sched.thread);
d38ceaf9 2814 }
d38ceaf9 2815 } else {
2200edac 2816 dev_err(adev->dev, "asic resume failed (%d).\n", r);
d38ceaf9 2817 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
51687759 2818 if (adev->rings[i] && adev->rings[i]->sched.thread) {
0875dc9e 2819 kthread_unpark(adev->rings[i]->sched.thread);
0875dc9e 2820 }
d38ceaf9
AD
2821 }
2822 }
2823
2824 drm_helper_resume_force_mode(adev->ddev);
2825
2826 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2827 if (r) {
2828 /* bad news, how to tell it to userspace ? */
2829 dev_info(adev->dev, "GPU reset failed\n");
2830 }
2831
d38ceaf9
AD
2832 return r;
2833}
2834
d0dd7f0c
AD
2835void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2836{
2837 u32 mask;
2838 int ret;
2839
cd474ba0
AD
2840 if (amdgpu_pcie_gen_cap)
2841 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 2842
cd474ba0
AD
2843 if (amdgpu_pcie_lane_cap)
2844 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 2845
cd474ba0
AD
2846 /* covers APUs as well */
2847 if (pci_is_root_bus(adev->pdev->bus)) {
2848 if (adev->pm.pcie_gen_mask == 0)
2849 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2850 if (adev->pm.pcie_mlw_mask == 0)
2851 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 2852 return;
cd474ba0 2853 }
d0dd7f0c 2854
cd474ba0
AD
2855 if (adev->pm.pcie_gen_mask == 0) {
2856 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2857 if (!ret) {
2858 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2859 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2860 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2861
2862 if (mask & DRM_PCIE_SPEED_25)
2863 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2864 if (mask & DRM_PCIE_SPEED_50)
2865 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2866 if (mask & DRM_PCIE_SPEED_80)
2867 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2868 } else {
2869 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2870 }
2871 }
2872 if (adev->pm.pcie_mlw_mask == 0) {
2873 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2874 if (!ret) {
2875 switch (mask) {
2876 case 32:
2877 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2878 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2879 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2880 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2881 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2882 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2883 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2884 break;
2885 case 16:
2886 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2887 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2888 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2889 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2890 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2891 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2892 break;
2893 case 12:
2894 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2895 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2896 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2897 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2898 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2899 break;
2900 case 8:
2901 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2902 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2903 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2904 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2905 break;
2906 case 4:
2907 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2908 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2909 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2910 break;
2911 case 2:
2912 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2913 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2914 break;
2915 case 1:
2916 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2917 break;
2918 default:
2919 break;
2920 }
2921 } else {
2922 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
2923 }
2924 }
2925}
d38ceaf9
AD
2926
2927/*
2928 * Debugfs
2929 */
2930int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
06ab6832 2931 const struct drm_info_list *files,
d38ceaf9
AD
2932 unsigned nfiles)
2933{
2934 unsigned i;
2935
2936 for (i = 0; i < adev->debugfs_count; i++) {
2937 if (adev->debugfs[i].files == files) {
2938 /* Already registered */
2939 return 0;
2940 }
2941 }
2942
2943 i = adev->debugfs_count + 1;
2944 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2945 DRM_ERROR("Reached maximum number of debugfs components.\n");
2946 DRM_ERROR("Report so we increase "
2947 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2948 return -EINVAL;
2949 }
2950 adev->debugfs[adev->debugfs_count].files = files;
2951 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2952 adev->debugfs_count = i;
2953#if defined(CONFIG_DEBUG_FS)
d38ceaf9
AD
2954 drm_debugfs_create_files(files, nfiles,
2955 adev->ddev->primary->debugfs_root,
2956 adev->ddev->primary);
2957#endif
2958 return 0;
2959}
2960
d38ceaf9
AD
2961#if defined(CONFIG_DEBUG_FS)
2962
2963static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2964 size_t size, loff_t *pos)
2965{
45063097 2966 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
2967 ssize_t result = 0;
2968 int r;
bd12267d 2969 bool pm_pg_lock, use_bank;
56628159 2970 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
2971
2972 if (size & 0x3 || *pos & 0x3)
2973 return -EINVAL;
2974
bd12267d
TSD
2975 /* are we reading registers for which a PG lock is necessary? */
2976 pm_pg_lock = (*pos >> 23) & 1;
2977
56628159
TSD
2978 if (*pos & (1ULL << 62)) {
2979 se_bank = (*pos >> 24) & 0x3FF;
2980 sh_bank = (*pos >> 34) & 0x3FF;
2981 instance_bank = (*pos >> 44) & 0x3FF;
32977f93
TSD
2982
2983 if (se_bank == 0x3FF)
2984 se_bank = 0xFFFFFFFF;
2985 if (sh_bank == 0x3FF)
2986 sh_bank = 0xFFFFFFFF;
2987 if (instance_bank == 0x3FF)
2988 instance_bank = 0xFFFFFFFF;
56628159 2989 use_bank = 1;
56628159
TSD
2990 } else {
2991 use_bank = 0;
2992 }
2993
801a6aa9 2994 *pos &= (1UL << 22) - 1;
bd12267d 2995
56628159 2996 if (use_bank) {
32977f93
TSD
2997 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2998 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
56628159
TSD
2999 return -EINVAL;
3000 mutex_lock(&adev->grbm_idx_mutex);
3001 amdgpu_gfx_select_se_sh(adev, se_bank,
3002 sh_bank, instance_bank);
3003 }
3004
bd12267d
TSD
3005 if (pm_pg_lock)
3006 mutex_lock(&adev->pm.mutex);
3007
d38ceaf9
AD
3008 while (size) {
3009 uint32_t value;
3010
3011 if (*pos > adev->rmmio_size)
56628159 3012 goto end;
d38ceaf9
AD
3013
3014 value = RREG32(*pos >> 2);
3015 r = put_user(value, (uint32_t *)buf);
56628159
TSD
3016 if (r) {
3017 result = r;
3018 goto end;
3019 }
d38ceaf9
AD
3020
3021 result += 4;
3022 buf += 4;
3023 *pos += 4;
3024 size -= 4;
3025 }
3026
56628159
TSD
3027end:
3028 if (use_bank) {
3029 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3030 mutex_unlock(&adev->grbm_idx_mutex);
3031 }
3032
bd12267d
TSD
3033 if (pm_pg_lock)
3034 mutex_unlock(&adev->pm.mutex);
3035
d38ceaf9
AD
3036 return result;
3037}
3038
3039static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3040 size_t size, loff_t *pos)
3041{
45063097 3042 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
3043 ssize_t result = 0;
3044 int r;
394fdde2
TSD
3045 bool pm_pg_lock, use_bank;
3046 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
3047
3048 if (size & 0x3 || *pos & 0x3)
3049 return -EINVAL;
3050
394fdde2
TSD
3051 /* are we reading registers for which a PG lock is necessary? */
3052 pm_pg_lock = (*pos >> 23) & 1;
3053
3054 if (*pos & (1ULL << 62)) {
3055 se_bank = (*pos >> 24) & 0x3FF;
3056 sh_bank = (*pos >> 34) & 0x3FF;
3057 instance_bank = (*pos >> 44) & 0x3FF;
3058
3059 if (se_bank == 0x3FF)
3060 se_bank = 0xFFFFFFFF;
3061 if (sh_bank == 0x3FF)
3062 sh_bank = 0xFFFFFFFF;
3063 if (instance_bank == 0x3FF)
3064 instance_bank = 0xFFFFFFFF;
3065 use_bank = 1;
3066 } else {
3067 use_bank = 0;
3068 }
3069
801a6aa9 3070 *pos &= (1UL << 22) - 1;
394fdde2
TSD
3071
3072 if (use_bank) {
3073 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3074 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3075 return -EINVAL;
3076 mutex_lock(&adev->grbm_idx_mutex);
3077 amdgpu_gfx_select_se_sh(adev, se_bank,
3078 sh_bank, instance_bank);
3079 }
3080
3081 if (pm_pg_lock)
3082 mutex_lock(&adev->pm.mutex);
3083
d38ceaf9
AD
3084 while (size) {
3085 uint32_t value;
3086
3087 if (*pos > adev->rmmio_size)
3088 return result;
3089
3090 r = get_user(value, (uint32_t *)buf);
3091 if (r)
3092 return r;
3093
3094 WREG32(*pos >> 2, value);
3095
3096 result += 4;
3097 buf += 4;
3098 *pos += 4;
3099 size -= 4;
3100 }
3101
394fdde2
TSD
3102 if (use_bank) {
3103 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3104 mutex_unlock(&adev->grbm_idx_mutex);
3105 }
3106
3107 if (pm_pg_lock)
3108 mutex_unlock(&adev->pm.mutex);
3109
d38ceaf9
AD
3110 return result;
3111}
3112
adcec288
TSD
3113static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3114 size_t size, loff_t *pos)
3115{
45063097 3116 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3117 ssize_t result = 0;
3118 int r;
3119
3120 if (size & 0x3 || *pos & 0x3)
3121 return -EINVAL;
3122
3123 while (size) {
3124 uint32_t value;
3125
3126 value = RREG32_PCIE(*pos >> 2);
3127 r = put_user(value, (uint32_t *)buf);
3128 if (r)
3129 return r;
3130
3131 result += 4;
3132 buf += 4;
3133 *pos += 4;
3134 size -= 4;
3135 }
3136
3137 return result;
3138}
3139
3140static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3141 size_t size, loff_t *pos)
3142{
45063097 3143 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3144 ssize_t result = 0;
3145 int r;
3146
3147 if (size & 0x3 || *pos & 0x3)
3148 return -EINVAL;
3149
3150 while (size) {
3151 uint32_t value;
3152
3153 r = get_user(value, (uint32_t *)buf);
3154 if (r)
3155 return r;
3156
3157 WREG32_PCIE(*pos >> 2, value);
3158
3159 result += 4;
3160 buf += 4;
3161 *pos += 4;
3162 size -= 4;
3163 }
3164
3165 return result;
3166}
3167
3168static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3169 size_t size, loff_t *pos)
3170{
45063097 3171 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3172 ssize_t result = 0;
3173 int r;
3174
3175 if (size & 0x3 || *pos & 0x3)
3176 return -EINVAL;
3177
3178 while (size) {
3179 uint32_t value;
3180
3181 value = RREG32_DIDT(*pos >> 2);
3182 r = put_user(value, (uint32_t *)buf);
3183 if (r)
3184 return r;
3185
3186 result += 4;
3187 buf += 4;
3188 *pos += 4;
3189 size -= 4;
3190 }
3191
3192 return result;
3193}
3194
3195static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3196 size_t size, loff_t *pos)
3197{
45063097 3198 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3199 ssize_t result = 0;
3200 int r;
3201
3202 if (size & 0x3 || *pos & 0x3)
3203 return -EINVAL;
3204
3205 while (size) {
3206 uint32_t value;
3207
3208 r = get_user(value, (uint32_t *)buf);
3209 if (r)
3210 return r;
3211
3212 WREG32_DIDT(*pos >> 2, value);
3213
3214 result += 4;
3215 buf += 4;
3216 *pos += 4;
3217 size -= 4;
3218 }
3219
3220 return result;
3221}
3222
3223static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3224 size_t size, loff_t *pos)
3225{
45063097 3226 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3227 ssize_t result = 0;
3228 int r;
3229
3230 if (size & 0x3 || *pos & 0x3)
3231 return -EINVAL;
3232
3233 while (size) {
3234 uint32_t value;
3235
6fc0deaf 3236 value = RREG32_SMC(*pos);
adcec288
TSD
3237 r = put_user(value, (uint32_t *)buf);
3238 if (r)
3239 return r;
3240
3241 result += 4;
3242 buf += 4;
3243 *pos += 4;
3244 size -= 4;
3245 }
3246
3247 return result;
3248}
3249
3250static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3251 size_t size, loff_t *pos)
3252{
45063097 3253 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3254 ssize_t result = 0;
3255 int r;
3256
3257 if (size & 0x3 || *pos & 0x3)
3258 return -EINVAL;
3259
3260 while (size) {
3261 uint32_t value;
3262
3263 r = get_user(value, (uint32_t *)buf);
3264 if (r)
3265 return r;
3266
6fc0deaf 3267 WREG32_SMC(*pos, value);
adcec288
TSD
3268
3269 result += 4;
3270 buf += 4;
3271 *pos += 4;
3272 size -= 4;
3273 }
3274
3275 return result;
3276}
3277
1e051413
TSD
3278static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3279 size_t size, loff_t *pos)
3280{
45063097 3281 struct amdgpu_device *adev = file_inode(f)->i_private;
1e051413
TSD
3282 ssize_t result = 0;
3283 int r;
3284 uint32_t *config, no_regs = 0;
3285
3286 if (size & 0x3 || *pos & 0x3)
3287 return -EINVAL;
3288
ecab7668 3289 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
1e051413
TSD
3290 if (!config)
3291 return -ENOMEM;
3292
3293 /* version, increment each time something is added */
9a999359 3294 config[no_regs++] = 3;
1e051413
TSD
3295 config[no_regs++] = adev->gfx.config.max_shader_engines;
3296 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3297 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3298 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3299 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3300 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3301 config[no_regs++] = adev->gfx.config.max_gprs;
3302 config[no_regs++] = adev->gfx.config.max_gs_threads;
3303 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3304 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3305 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3306 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3307 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3308 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3309 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3310 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3311 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3312 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3313 config[no_regs++] = adev->gfx.config.num_gpus;
3314 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3315 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3316 config[no_regs++] = adev->gfx.config.gb_addr_config;
3317 config[no_regs++] = adev->gfx.config.num_rbs;
3318
89a8f309
TSD
3319 /* rev==1 */
3320 config[no_regs++] = adev->rev_id;
3321 config[no_regs++] = adev->pg_flags;
3322 config[no_regs++] = adev->cg_flags;
3323
e9f11dc8
TSD
3324 /* rev==2 */
3325 config[no_regs++] = adev->family;
3326 config[no_regs++] = adev->external_rev_id;
3327
9a999359
TSD
3328 /* rev==3 */
3329 config[no_regs++] = adev->pdev->device;
3330 config[no_regs++] = adev->pdev->revision;
3331 config[no_regs++] = adev->pdev->subsystem_device;
3332 config[no_regs++] = adev->pdev->subsystem_vendor;
3333
1e051413
TSD
3334 while (size && (*pos < no_regs * 4)) {
3335 uint32_t value;
3336
3337 value = config[*pos >> 2];
3338 r = put_user(value, (uint32_t *)buf);
3339 if (r) {
3340 kfree(config);
3341 return r;
3342 }
3343
3344 result += 4;
3345 buf += 4;
3346 *pos += 4;
3347 size -= 4;
3348 }
3349
3350 kfree(config);
3351 return result;
3352}
3353
f2cdaf20
TSD
3354static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3355 size_t size, loff_t *pos)
3356{
45063097 3357 struct amdgpu_device *adev = file_inode(f)->i_private;
9f8df7d7
TSD
3358 int idx, x, outsize, r, valuesize;
3359 uint32_t values[16];
f2cdaf20 3360
9f8df7d7 3361 if (size & 3 || *pos & 0x3)
f2cdaf20
TSD
3362 return -EINVAL;
3363
3cbc614f
SP
3364 if (amdgpu_dpm == 0)
3365 return -EINVAL;
3366
f2cdaf20
TSD
3367 /* convert offset to sensor number */
3368 idx = *pos >> 2;
3369
9f8df7d7 3370 valuesize = sizeof(values);
f2cdaf20 3371 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
9f8df7d7 3372 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
3cbc614f
SP
3373 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3374 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3375 &valuesize);
f2cdaf20
TSD
3376 else
3377 return -EINVAL;
3378
9f8df7d7
TSD
3379 if (size > valuesize)
3380 return -EINVAL;
3381
3382 outsize = 0;
3383 x = 0;
3384 if (!r) {
3385 while (size) {
3386 r = put_user(values[x++], (int32_t *)buf);
3387 buf += 4;
3388 size -= 4;
3389 outsize += 4;
3390 }
3391 }
f2cdaf20 3392
9f8df7d7 3393 return !r ? outsize : r;
f2cdaf20 3394}
1e051413 3395
273d7aa1
TSD
3396static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3397 size_t size, loff_t *pos)
3398{
3399 struct amdgpu_device *adev = f->f_inode->i_private;
3400 int r, x;
3401 ssize_t result=0;
472259f0 3402 uint32_t offset, se, sh, cu, wave, simd, data[32];
273d7aa1
TSD
3403
3404 if (size & 3 || *pos & 3)
3405 return -EINVAL;
3406
3407 /* decode offset */
3408 offset = (*pos & 0x7F);
3409 se = ((*pos >> 7) & 0xFF);
3410 sh = ((*pos >> 15) & 0xFF);
3411 cu = ((*pos >> 23) & 0xFF);
3412 wave = ((*pos >> 31) & 0xFF);
3413 simd = ((*pos >> 37) & 0xFF);
273d7aa1
TSD
3414
3415 /* switch to the specific se/sh/cu */
3416 mutex_lock(&adev->grbm_idx_mutex);
3417 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3418
3419 x = 0;
472259f0
TSD
3420 if (adev->gfx.funcs->read_wave_data)
3421 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
273d7aa1
TSD
3422
3423 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3424 mutex_unlock(&adev->grbm_idx_mutex);
3425
5ecfb3b8
TSD
3426 if (!x)
3427 return -EINVAL;
3428
472259f0 3429 while (size && (offset < x * 4)) {
273d7aa1
TSD
3430 uint32_t value;
3431
472259f0 3432 value = data[offset >> 2];
273d7aa1
TSD
3433 r = put_user(value, (uint32_t *)buf);
3434 if (r)
3435 return r;
3436
3437 result += 4;
3438 buf += 4;
472259f0 3439 offset += 4;
273d7aa1
TSD
3440 size -= 4;
3441 }
3442
3443 return result;
3444}
3445
c5a60ce8
TSD
3446static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3447 size_t size, loff_t *pos)
3448{
3449 struct amdgpu_device *adev = f->f_inode->i_private;
3450 int r;
3451 ssize_t result = 0;
3452 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3453
3454 if (size & 3 || *pos & 3)
3455 return -EINVAL;
3456
3457 /* decode offset */
3458 offset = (*pos & 0xFFF); /* in dwords */
3459 se = ((*pos >> 12) & 0xFF);
3460 sh = ((*pos >> 20) & 0xFF);
3461 cu = ((*pos >> 28) & 0xFF);
3462 wave = ((*pos >> 36) & 0xFF);
3463 simd = ((*pos >> 44) & 0xFF);
3464 thread = ((*pos >> 52) & 0xFF);
3465 bank = ((*pos >> 60) & 1);
3466
3467 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3468 if (!data)
3469 return -ENOMEM;
3470
3471 /* switch to the specific se/sh/cu */
3472 mutex_lock(&adev->grbm_idx_mutex);
3473 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3474
3475 if (bank == 0) {
3476 if (adev->gfx.funcs->read_wave_vgprs)
3477 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3478 } else {
3479 if (adev->gfx.funcs->read_wave_sgprs)
3480 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3481 }
3482
3483 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3484 mutex_unlock(&adev->grbm_idx_mutex);
3485
3486 while (size) {
3487 uint32_t value;
3488
3489 value = data[offset++];
3490 r = put_user(value, (uint32_t *)buf);
3491 if (r) {
3492 result = r;
3493 goto err;
3494 }
3495
3496 result += 4;
3497 buf += 4;
3498 size -= 4;
3499 }
3500
3501err:
3502 kfree(data);
3503 return result;
3504}
3505
d38ceaf9
AD
3506static const struct file_operations amdgpu_debugfs_regs_fops = {
3507 .owner = THIS_MODULE,
3508 .read = amdgpu_debugfs_regs_read,
3509 .write = amdgpu_debugfs_regs_write,
3510 .llseek = default_llseek
3511};
adcec288
TSD
3512static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3513 .owner = THIS_MODULE,
3514 .read = amdgpu_debugfs_regs_didt_read,
3515 .write = amdgpu_debugfs_regs_didt_write,
3516 .llseek = default_llseek
3517};
3518static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3519 .owner = THIS_MODULE,
3520 .read = amdgpu_debugfs_regs_pcie_read,
3521 .write = amdgpu_debugfs_regs_pcie_write,
3522 .llseek = default_llseek
3523};
3524static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3525 .owner = THIS_MODULE,
3526 .read = amdgpu_debugfs_regs_smc_read,
3527 .write = amdgpu_debugfs_regs_smc_write,
3528 .llseek = default_llseek
3529};
3530
1e051413
TSD
3531static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3532 .owner = THIS_MODULE,
3533 .read = amdgpu_debugfs_gca_config_read,
3534 .llseek = default_llseek
3535};
3536
f2cdaf20
TSD
3537static const struct file_operations amdgpu_debugfs_sensors_fops = {
3538 .owner = THIS_MODULE,
3539 .read = amdgpu_debugfs_sensor_read,
3540 .llseek = default_llseek
3541};
3542
273d7aa1
TSD
3543static const struct file_operations amdgpu_debugfs_wave_fops = {
3544 .owner = THIS_MODULE,
3545 .read = amdgpu_debugfs_wave_read,
3546 .llseek = default_llseek
3547};
c5a60ce8
TSD
3548static const struct file_operations amdgpu_debugfs_gpr_fops = {
3549 .owner = THIS_MODULE,
3550 .read = amdgpu_debugfs_gpr_read,
3551 .llseek = default_llseek
3552};
273d7aa1 3553
adcec288
TSD
3554static const struct file_operations *debugfs_regs[] = {
3555 &amdgpu_debugfs_regs_fops,
3556 &amdgpu_debugfs_regs_didt_fops,
3557 &amdgpu_debugfs_regs_pcie_fops,
3558 &amdgpu_debugfs_regs_smc_fops,
1e051413 3559 &amdgpu_debugfs_gca_config_fops,
f2cdaf20 3560 &amdgpu_debugfs_sensors_fops,
273d7aa1 3561 &amdgpu_debugfs_wave_fops,
c5a60ce8 3562 &amdgpu_debugfs_gpr_fops,
adcec288
TSD
3563};
3564
3565static const char *debugfs_regs_names[] = {
3566 "amdgpu_regs",
3567 "amdgpu_regs_didt",
3568 "amdgpu_regs_pcie",
3569 "amdgpu_regs_smc",
1e051413 3570 "amdgpu_gca_config",
f2cdaf20 3571 "amdgpu_sensors",
273d7aa1 3572 "amdgpu_wave",
c5a60ce8 3573 "amdgpu_gpr",
adcec288 3574};
d38ceaf9
AD
3575
3576static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3577{
3578 struct drm_minor *minor = adev->ddev->primary;
3579 struct dentry *ent, *root = minor->debugfs_root;
adcec288
TSD
3580 unsigned i, j;
3581
3582 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3583 ent = debugfs_create_file(debugfs_regs_names[i],
3584 S_IFREG | S_IRUGO, root,
3585 adev, debugfs_regs[i]);
3586 if (IS_ERR(ent)) {
3587 for (j = 0; j < i; j++) {
3588 debugfs_remove(adev->debugfs_regs[i]);
3589 adev->debugfs_regs[i] = NULL;
3590 }
3591 return PTR_ERR(ent);
3592 }
d38ceaf9 3593
adcec288
TSD
3594 if (!i)
3595 i_size_write(ent->d_inode, adev->rmmio_size);
3596 adev->debugfs_regs[i] = ent;
3597 }
d38ceaf9
AD
3598
3599 return 0;
3600}
3601
3602static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3603{
adcec288
TSD
3604 unsigned i;
3605
3606 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3607 if (adev->debugfs_regs[i]) {
3608 debugfs_remove(adev->debugfs_regs[i]);
3609 adev->debugfs_regs[i] = NULL;
3610 }
3611 }
d38ceaf9
AD
3612}
3613
3614int amdgpu_debugfs_init(struct drm_minor *minor)
3615{
3616 return 0;
3617}
7cebc728
AK
3618#else
3619static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3620{
3621 return 0;
3622}
3623static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
d38ceaf9 3624#endif