drm/amdgpu: Use kmalloc_array() in amdgpu_debugfs_gca_config_read()
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
f4b373f4 39#include "amdgpu_trace.h"
d38ceaf9
AD
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
d0dd7f0c 43#include "amd_pcie.h"
33f34802
KW
44#ifdef CONFIG_DRM_AMDGPU_SI
45#include "si.h"
46#endif
a2e73f56
AD
47#ifdef CONFIG_DRM_AMDGPU_CIK
48#include "cik.h"
49#endif
aaa36a97 50#include "vi.h"
d38ceaf9 51#include "bif/bif_4_1_d.h"
9accf2fd 52#include <linux/pci.h>
d38ceaf9
AD
53
54static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
55static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
56
57static const char *amdgpu_asic_name[] = {
da69c161
KW
58 "TAHITI",
59 "PITCAIRN",
60 "VERDE",
61 "OLAND",
62 "HAINAN",
d38ceaf9
AD
63 "BONAIRE",
64 "KAVERI",
65 "KABINI",
66 "HAWAII",
67 "MULLINS",
68 "TOPAZ",
69 "TONGA",
48299f95 70 "FIJI",
d38ceaf9 71 "CARRIZO",
139f4917 72 "STONEY",
2cc0c0b5
FC
73 "POLARIS10",
74 "POLARIS11",
d38ceaf9
AD
75 "LAST",
76};
77
78bool amdgpu_device_is_px(struct drm_device *dev)
79{
80 struct amdgpu_device *adev = dev->dev_private;
81
2f7d10b3 82 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
83 return true;
84 return false;
85}
86
87/*
88 * MMIO register access helper functions.
89 */
90uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
91 bool always_indirect)
92{
f4b373f4
TSD
93 uint32_t ret;
94
d38ceaf9 95 if ((reg * 4) < adev->rmmio_size && !always_indirect)
f4b373f4 96 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
97 else {
98 unsigned long flags;
d38ceaf9
AD
99
100 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
101 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
102 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
103 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 104 }
f4b373f4
TSD
105 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
106 return ret;
d38ceaf9
AD
107}
108
109void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
110 bool always_indirect)
111{
f4b373f4
TSD
112 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
113
d38ceaf9
AD
114 if ((reg * 4) < adev->rmmio_size && !always_indirect)
115 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
116 else {
117 unsigned long flags;
118
119 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
120 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
121 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
122 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
123 }
124}
125
126u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
127{
128 if ((reg * 4) < adev->rio_mem_size)
129 return ioread32(adev->rio_mem + (reg * 4));
130 else {
131 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
132 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
133 }
134}
135
136void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
137{
138
139 if ((reg * 4) < adev->rio_mem_size)
140 iowrite32(v, adev->rio_mem + (reg * 4));
141 else {
142 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
143 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
144 }
145}
146
147/**
148 * amdgpu_mm_rdoorbell - read a doorbell dword
149 *
150 * @adev: amdgpu_device pointer
151 * @index: doorbell index
152 *
153 * Returns the value in the doorbell aperture at the
154 * requested doorbell index (CIK).
155 */
156u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
157{
158 if (index < adev->doorbell.num_doorbells) {
159 return readl(adev->doorbell.ptr + index);
160 } else {
161 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
162 return 0;
163 }
164}
165
166/**
167 * amdgpu_mm_wdoorbell - write a doorbell dword
168 *
169 * @adev: amdgpu_device pointer
170 * @index: doorbell index
171 * @v: value to write
172 *
173 * Writes @v to the doorbell aperture at the
174 * requested doorbell index (CIK).
175 */
176void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
177{
178 if (index < adev->doorbell.num_doorbells) {
179 writel(v, adev->doorbell.ptr + index);
180 } else {
181 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
182 }
183}
184
185/**
186 * amdgpu_invalid_rreg - dummy reg read function
187 *
188 * @adev: amdgpu device pointer
189 * @reg: offset of register
190 *
191 * Dummy register read function. Used for register blocks
192 * that certain asics don't have (all asics).
193 * Returns the value in the register.
194 */
195static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
196{
197 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
198 BUG();
199 return 0;
200}
201
202/**
203 * amdgpu_invalid_wreg - dummy reg write function
204 *
205 * @adev: amdgpu device pointer
206 * @reg: offset of register
207 * @v: value to write to the register
208 *
209 * Dummy register read function. Used for register blocks
210 * that certain asics don't have (all asics).
211 */
212static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
213{
214 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
215 reg, v);
216 BUG();
217}
218
219/**
220 * amdgpu_block_invalid_rreg - dummy reg read function
221 *
222 * @adev: amdgpu device pointer
223 * @block: offset of instance
224 * @reg: offset of register
225 *
226 * Dummy register read function. Used for register blocks
227 * that certain asics don't have (all asics).
228 * Returns the value in the register.
229 */
230static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
231 uint32_t block, uint32_t reg)
232{
233 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
234 reg, block);
235 BUG();
236 return 0;
237}
238
239/**
240 * amdgpu_block_invalid_wreg - dummy reg write function
241 *
242 * @adev: amdgpu device pointer
243 * @block: offset of instance
244 * @reg: offset of register
245 * @v: value to write to the register
246 *
247 * Dummy register read function. Used for register blocks
248 * that certain asics don't have (all asics).
249 */
250static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
251 uint32_t block,
252 uint32_t reg, uint32_t v)
253{
254 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
255 reg, block, v);
256 BUG();
257}
258
259static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
260{
261 int r;
262
263 if (adev->vram_scratch.robj == NULL) {
264 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
857d913d
AD
265 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
266 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
72d7668b 267 NULL, NULL, &adev->vram_scratch.robj);
d38ceaf9
AD
268 if (r) {
269 return r;
270 }
271 }
272
273 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
274 if (unlikely(r != 0))
275 return r;
276 r = amdgpu_bo_pin(adev->vram_scratch.robj,
277 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
278 if (r) {
279 amdgpu_bo_unreserve(adev->vram_scratch.robj);
280 return r;
281 }
282 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
283 (void **)&adev->vram_scratch.ptr);
284 if (r)
285 amdgpu_bo_unpin(adev->vram_scratch.robj);
286 amdgpu_bo_unreserve(adev->vram_scratch.robj);
287
288 return r;
289}
290
291static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
292{
293 int r;
294
295 if (adev->vram_scratch.robj == NULL) {
296 return;
297 }
298 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
299 if (likely(r == 0)) {
300 amdgpu_bo_kunmap(adev->vram_scratch.robj);
301 amdgpu_bo_unpin(adev->vram_scratch.robj);
302 amdgpu_bo_unreserve(adev->vram_scratch.robj);
303 }
304 amdgpu_bo_unref(&adev->vram_scratch.robj);
305}
306
307/**
308 * amdgpu_program_register_sequence - program an array of registers.
309 *
310 * @adev: amdgpu_device pointer
311 * @registers: pointer to the register array
312 * @array_size: size of the register array
313 *
314 * Programs an array or registers with and and or masks.
315 * This is a helper for setting golden registers.
316 */
317void amdgpu_program_register_sequence(struct amdgpu_device *adev,
318 const u32 *registers,
319 const u32 array_size)
320{
321 u32 tmp, reg, and_mask, or_mask;
322 int i;
323
324 if (array_size % 3)
325 return;
326
327 for (i = 0; i < array_size; i +=3) {
328 reg = registers[i + 0];
329 and_mask = registers[i + 1];
330 or_mask = registers[i + 2];
331
332 if (and_mask == 0xffffffff) {
333 tmp = or_mask;
334 } else {
335 tmp = RREG32(reg);
336 tmp &= ~and_mask;
337 tmp |= or_mask;
338 }
339 WREG32(reg, tmp);
340 }
341}
342
343void amdgpu_pci_config_reset(struct amdgpu_device *adev)
344{
345 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
346}
347
348/*
349 * GPU doorbell aperture helpers function.
350 */
351/**
352 * amdgpu_doorbell_init - Init doorbell driver information.
353 *
354 * @adev: amdgpu_device pointer
355 *
356 * Init doorbell driver information (CIK)
357 * Returns 0 on success, error on failure.
358 */
359static int amdgpu_doorbell_init(struct amdgpu_device *adev)
360{
361 /* doorbell bar mapping */
362 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
363 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
364
edf600da 365 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
366 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
367 if (adev->doorbell.num_doorbells == 0)
368 return -EINVAL;
369
370 adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
371 if (adev->doorbell.ptr == NULL) {
372 return -ENOMEM;
373 }
374 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
375 DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
376
377 return 0;
378}
379
380/**
381 * amdgpu_doorbell_fini - Tear down doorbell driver information.
382 *
383 * @adev: amdgpu_device pointer
384 *
385 * Tear down doorbell driver information (CIK)
386 */
387static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
388{
389 iounmap(adev->doorbell.ptr);
390 adev->doorbell.ptr = NULL;
391}
392
393/**
394 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
395 * setup amdkfd
396 *
397 * @adev: amdgpu_device pointer
398 * @aperture_base: output returning doorbell aperture base physical address
399 * @aperture_size: output returning doorbell aperture size in bytes
400 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
401 *
402 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
403 * takes doorbells required for its own rings and reports the setup to amdkfd.
404 * amdgpu reserved doorbells are at the start of the doorbell aperture.
405 */
406void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
407 phys_addr_t *aperture_base,
408 size_t *aperture_size,
409 size_t *start_offset)
410{
411 /*
412 * The first num_doorbells are used by amdgpu.
413 * amdkfd takes whatever's left in the aperture.
414 */
415 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
416 *aperture_base = adev->doorbell.base;
417 *aperture_size = adev->doorbell.size;
418 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
419 } else {
420 *aperture_base = 0;
421 *aperture_size = 0;
422 *start_offset = 0;
423 }
424}
425
426/*
427 * amdgpu_wb_*()
428 * Writeback is the the method by which the the GPU updates special pages
429 * in memory with the status of certain GPU events (fences, ring pointers,
430 * etc.).
431 */
432
433/**
434 * amdgpu_wb_fini - Disable Writeback and free memory
435 *
436 * @adev: amdgpu_device pointer
437 *
438 * Disables Writeback and frees the Writeback memory (all asics).
439 * Used at driver shutdown.
440 */
441static void amdgpu_wb_fini(struct amdgpu_device *adev)
442{
443 if (adev->wb.wb_obj) {
444 if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
445 amdgpu_bo_kunmap(adev->wb.wb_obj);
446 amdgpu_bo_unpin(adev->wb.wb_obj);
447 amdgpu_bo_unreserve(adev->wb.wb_obj);
448 }
449 amdgpu_bo_unref(&adev->wb.wb_obj);
450 adev->wb.wb = NULL;
451 adev->wb.wb_obj = NULL;
452 }
453}
454
455/**
456 * amdgpu_wb_init- Init Writeback driver info and allocate memory
457 *
458 * @adev: amdgpu_device pointer
459 *
460 * Disables Writeback and frees the Writeback memory (all asics).
461 * Used at driver startup.
462 * Returns 0 on success or an -error on failure.
463 */
464static int amdgpu_wb_init(struct amdgpu_device *adev)
465{
466 int r;
467
468 if (adev->wb.wb_obj == NULL) {
469 r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
72d7668b
CK
470 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
471 &adev->wb.wb_obj);
d38ceaf9
AD
472 if (r) {
473 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
474 return r;
475 }
476 r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
477 if (unlikely(r != 0)) {
478 amdgpu_wb_fini(adev);
479 return r;
480 }
481 r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
482 &adev->wb.gpu_addr);
483 if (r) {
484 amdgpu_bo_unreserve(adev->wb.wb_obj);
485 dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
486 amdgpu_wb_fini(adev);
487 return r;
488 }
489 r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
490 amdgpu_bo_unreserve(adev->wb.wb_obj);
491 if (r) {
492 dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
493 amdgpu_wb_fini(adev);
494 return r;
495 }
496
497 adev->wb.num_wb = AMDGPU_MAX_WB;
498 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
499
500 /* clear wb memory */
501 memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE);
502 }
503
504 return 0;
505}
506
507/**
508 * amdgpu_wb_get - Allocate a wb entry
509 *
510 * @adev: amdgpu_device pointer
511 * @wb: wb index
512 *
513 * Allocate a wb slot for use by the driver (all asics).
514 * Returns 0 on success or -EINVAL on failure.
515 */
516int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
517{
518 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
519 if (offset < adev->wb.num_wb) {
520 __set_bit(offset, adev->wb.used);
521 *wb = offset;
522 return 0;
523 } else {
524 return -EINVAL;
525 }
526}
527
528/**
529 * amdgpu_wb_free - Free a wb entry
530 *
531 * @adev: amdgpu_device pointer
532 * @wb: wb index
533 *
534 * Free a wb slot allocated for use by the driver (all asics)
535 */
536void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
537{
538 if (wb < adev->wb.num_wb)
539 __clear_bit(wb, adev->wb.used);
540}
541
542/**
543 * amdgpu_vram_location - try to find VRAM location
544 * @adev: amdgpu device structure holding all necessary informations
545 * @mc: memory controller structure holding memory informations
546 * @base: base address at which to put VRAM
547 *
548 * Function will place try to place VRAM at base address provided
549 * as parameter (which is so far either PCI aperture address or
550 * for IGP TOM base address).
551 *
552 * If there is not enough space to fit the unvisible VRAM in the 32bits
553 * address space then we limit the VRAM size to the aperture.
554 *
555 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
556 * this shouldn't be a problem as we are using the PCI aperture as a reference.
557 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
558 * not IGP.
559 *
560 * Note: we use mc_vram_size as on some board we need to program the mc to
561 * cover the whole aperture even if VRAM size is inferior to aperture size
562 * Novell bug 204882 + along with lots of ubuntu ones
563 *
564 * Note: when limiting vram it's safe to overwritte real_vram_size because
565 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
566 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
567 * ones)
568 *
569 * Note: IGP TOM addr should be the same as the aperture addr, we don't
570 * explicitly check for that thought.
571 *
572 * FIXME: when reducing VRAM size align new size on power of 2.
573 */
574void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
575{
576 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
577
578 mc->vram_start = base;
579 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
580 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
581 mc->real_vram_size = mc->aper_size;
582 mc->mc_vram_size = mc->aper_size;
583 }
584 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
585 if (limit && limit < mc->real_vram_size)
586 mc->real_vram_size = limit;
587 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
588 mc->mc_vram_size >> 20, mc->vram_start,
589 mc->vram_end, mc->real_vram_size >> 20);
590}
591
592/**
593 * amdgpu_gtt_location - try to find GTT location
594 * @adev: amdgpu device structure holding all necessary informations
595 * @mc: memory controller structure holding memory informations
596 *
597 * Function will place try to place GTT before or after VRAM.
598 *
599 * If GTT size is bigger than space left then we ajust GTT size.
600 * Thus function will never fails.
601 *
602 * FIXME: when reducing GTT size align new size on power of 2.
603 */
604void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
605{
606 u64 size_af, size_bf;
607
608 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
609 size_bf = mc->vram_start & ~mc->gtt_base_align;
610 if (size_bf > size_af) {
611 if (mc->gtt_size > size_bf) {
612 dev_warn(adev->dev, "limiting GTT\n");
613 mc->gtt_size = size_bf;
614 }
615 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
616 } else {
617 if (mc->gtt_size > size_af) {
618 dev_warn(adev->dev, "limiting GTT\n");
619 mc->gtt_size = size_af;
620 }
621 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
622 }
623 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
624 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
625 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
626}
627
628/*
629 * GPU helpers function.
630 */
631/**
632 * amdgpu_card_posted - check if the hw has already been initialized
633 *
634 * @adev: amdgpu_device pointer
635 *
636 * Check if the asic has been initialized (all asics).
637 * Used at driver startup.
638 * Returns true if initialized or false if not.
639 */
640bool amdgpu_card_posted(struct amdgpu_device *adev)
641{
642 uint32_t reg;
643
644 /* then check MEM_SIZE, in case the crtcs are off */
645 reg = RREG32(mmCONFIG_MEMSIZE);
646
647 if (reg)
648 return true;
649
650 return false;
651
652}
653
d38ceaf9
AD
654/**
655 * amdgpu_dummy_page_init - init dummy page used by the driver
656 *
657 * @adev: amdgpu_device pointer
658 *
659 * Allocate the dummy page used by the driver (all asics).
660 * This dummy page is used by the driver as a filler for gart entries
661 * when pages are taken out of the GART
662 * Returns 0 on sucess, -ENOMEM on failure.
663 */
664int amdgpu_dummy_page_init(struct amdgpu_device *adev)
665{
666 if (adev->dummy_page.page)
667 return 0;
668 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
669 if (adev->dummy_page.page == NULL)
670 return -ENOMEM;
671 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
672 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
673 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
674 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
675 __free_page(adev->dummy_page.page);
676 adev->dummy_page.page = NULL;
677 return -ENOMEM;
678 }
679 return 0;
680}
681
682/**
683 * amdgpu_dummy_page_fini - free dummy page used by the driver
684 *
685 * @adev: amdgpu_device pointer
686 *
687 * Frees the dummy page used by the driver (all asics).
688 */
689void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
690{
691 if (adev->dummy_page.page == NULL)
692 return;
693 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
694 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
695 __free_page(adev->dummy_page.page);
696 adev->dummy_page.page = NULL;
697}
698
699
700/* ATOM accessor methods */
701/*
702 * ATOM is an interpreted byte code stored in tables in the vbios. The
703 * driver registers callbacks to access registers and the interpreter
704 * in the driver parses the tables and executes then to program specific
705 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
706 * atombios.h, and atom.c
707 */
708
709/**
710 * cail_pll_read - read PLL register
711 *
712 * @info: atom card_info pointer
713 * @reg: PLL register offset
714 *
715 * Provides a PLL register accessor for the atom interpreter (r4xx+).
716 * Returns the value of the PLL register.
717 */
718static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
719{
720 return 0;
721}
722
723/**
724 * cail_pll_write - write PLL register
725 *
726 * @info: atom card_info pointer
727 * @reg: PLL register offset
728 * @val: value to write to the pll register
729 *
730 * Provides a PLL register accessor for the atom interpreter (r4xx+).
731 */
732static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
733{
734
735}
736
737/**
738 * cail_mc_read - read MC (Memory Controller) register
739 *
740 * @info: atom card_info pointer
741 * @reg: MC register offset
742 *
743 * Provides an MC register accessor for the atom interpreter (r4xx+).
744 * Returns the value of the MC register.
745 */
746static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
747{
748 return 0;
749}
750
751/**
752 * cail_mc_write - write MC (Memory Controller) register
753 *
754 * @info: atom card_info pointer
755 * @reg: MC register offset
756 * @val: value to write to the pll register
757 *
758 * Provides a MC register accessor for the atom interpreter (r4xx+).
759 */
760static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
761{
762
763}
764
765/**
766 * cail_reg_write - write MMIO register
767 *
768 * @info: atom card_info pointer
769 * @reg: MMIO register offset
770 * @val: value to write to the pll register
771 *
772 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
773 */
774static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
775{
776 struct amdgpu_device *adev = info->dev->dev_private;
777
778 WREG32(reg, val);
779}
780
781/**
782 * cail_reg_read - read MMIO register
783 *
784 * @info: atom card_info pointer
785 * @reg: MMIO register offset
786 *
787 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
788 * Returns the value of the MMIO register.
789 */
790static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
791{
792 struct amdgpu_device *adev = info->dev->dev_private;
793 uint32_t r;
794
795 r = RREG32(reg);
796 return r;
797}
798
799/**
800 * cail_ioreg_write - write IO register
801 *
802 * @info: atom card_info pointer
803 * @reg: IO register offset
804 * @val: value to write to the pll register
805 *
806 * Provides a IO register accessor for the atom interpreter (r4xx+).
807 */
808static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
809{
810 struct amdgpu_device *adev = info->dev->dev_private;
811
812 WREG32_IO(reg, val);
813}
814
815/**
816 * cail_ioreg_read - read IO register
817 *
818 * @info: atom card_info pointer
819 * @reg: IO register offset
820 *
821 * Provides an IO register accessor for the atom interpreter (r4xx+).
822 * Returns the value of the IO register.
823 */
824static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
825{
826 struct amdgpu_device *adev = info->dev->dev_private;
827 uint32_t r;
828
829 r = RREG32_IO(reg);
830 return r;
831}
832
833/**
834 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
835 *
836 * @adev: amdgpu_device pointer
837 *
838 * Frees the driver info and register access callbacks for the ATOM
839 * interpreter (r4xx+).
840 * Called at driver shutdown.
841 */
842static void amdgpu_atombios_fini(struct amdgpu_device *adev)
843{
89e0ec9f 844 if (adev->mode_info.atom_context) {
d38ceaf9 845 kfree(adev->mode_info.atom_context->scratch);
89e0ec9f
ML
846 kfree(adev->mode_info.atom_context->iio);
847 }
d38ceaf9
AD
848 kfree(adev->mode_info.atom_context);
849 adev->mode_info.atom_context = NULL;
850 kfree(adev->mode_info.atom_card_info);
851 adev->mode_info.atom_card_info = NULL;
852}
853
854/**
855 * amdgpu_atombios_init - init the driver info and callbacks for atombios
856 *
857 * @adev: amdgpu_device pointer
858 *
859 * Initializes the driver info and register access callbacks for the
860 * ATOM interpreter (r4xx+).
861 * Returns 0 on sucess, -ENOMEM on failure.
862 * Called at driver startup.
863 */
864static int amdgpu_atombios_init(struct amdgpu_device *adev)
865{
866 struct card_info *atom_card_info =
867 kzalloc(sizeof(struct card_info), GFP_KERNEL);
868
869 if (!atom_card_info)
870 return -ENOMEM;
871
872 adev->mode_info.atom_card_info = atom_card_info;
873 atom_card_info->dev = adev->ddev;
874 atom_card_info->reg_read = cail_reg_read;
875 atom_card_info->reg_write = cail_reg_write;
876 /* needed for iio ops */
877 if (adev->rio_mem) {
878 atom_card_info->ioreg_read = cail_ioreg_read;
879 atom_card_info->ioreg_write = cail_ioreg_write;
880 } else {
881 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
882 atom_card_info->ioreg_read = cail_reg_read;
883 atom_card_info->ioreg_write = cail_reg_write;
884 }
885 atom_card_info->mc_read = cail_mc_read;
886 atom_card_info->mc_write = cail_mc_write;
887 atom_card_info->pll_read = cail_pll_read;
888 atom_card_info->pll_write = cail_pll_write;
889
890 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
891 if (!adev->mode_info.atom_context) {
892 amdgpu_atombios_fini(adev);
893 return -ENOMEM;
894 }
895
896 mutex_init(&adev->mode_info.atom_context->mutex);
897 amdgpu_atombios_scratch_regs_init(adev);
898 amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context);
899 return 0;
900}
901
902/* if we get transitioned to only one device, take VGA back */
903/**
904 * amdgpu_vga_set_decode - enable/disable vga decode
905 *
906 * @cookie: amdgpu_device pointer
907 * @state: enable/disable vga decode
908 *
909 * Enable/disable vga decode (all asics).
910 * Returns VGA resource flags.
911 */
912static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
913{
914 struct amdgpu_device *adev = cookie;
915 amdgpu_asic_set_vga_state(adev, state);
916 if (state)
917 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
918 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
919 else
920 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
921}
922
923/**
924 * amdgpu_check_pot_argument - check that argument is a power of two
925 *
926 * @arg: value to check
927 *
928 * Validates that a certain argument is a power of two (all asics).
929 * Returns true if argument is valid.
930 */
931static bool amdgpu_check_pot_argument(int arg)
932{
933 return (arg & (arg - 1)) == 0;
934}
935
936/**
937 * amdgpu_check_arguments - validate module params
938 *
939 * @adev: amdgpu_device pointer
940 *
941 * Validates certain module parameters and updates
942 * the associated values used by the driver (all asics).
943 */
944static void amdgpu_check_arguments(struct amdgpu_device *adev)
945{
5b011235
CZ
946 if (amdgpu_sched_jobs < 4) {
947 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
948 amdgpu_sched_jobs);
949 amdgpu_sched_jobs = 4;
950 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
951 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
952 amdgpu_sched_jobs);
953 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
954 }
d38ceaf9
AD
955
956 if (amdgpu_gart_size != -1) {
c4e1a13a 957 /* gtt size must be greater or equal to 32M */
d38ceaf9
AD
958 if (amdgpu_gart_size < 32) {
959 dev_warn(adev->dev, "gart size (%d) too small\n",
960 amdgpu_gart_size);
961 amdgpu_gart_size = -1;
d38ceaf9
AD
962 }
963 }
964
965 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
966 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
967 amdgpu_vm_size);
8dacc127 968 amdgpu_vm_size = 8;
d38ceaf9
AD
969 }
970
971 if (amdgpu_vm_size < 1) {
972 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
973 amdgpu_vm_size);
8dacc127 974 amdgpu_vm_size = 8;
d38ceaf9
AD
975 }
976
977 /*
978 * Max GPUVM size for Cayman, SI and CI are 40 bits.
979 */
980 if (amdgpu_vm_size > 1024) {
981 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
982 amdgpu_vm_size);
8dacc127 983 amdgpu_vm_size = 8;
d38ceaf9
AD
984 }
985
986 /* defines number of bits in page table versus page directory,
987 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
988 * page table and the remaining bits are in the page directory */
989 if (amdgpu_vm_block_size == -1) {
990
991 /* Total bits covered by PD + PTs */
992 unsigned bits = ilog2(amdgpu_vm_size) + 18;
993
994 /* Make sure the PD is 4K in size up to 8GB address space.
995 Above that split equal between PD and PTs */
996 if (amdgpu_vm_size <= 8)
997 amdgpu_vm_block_size = bits - 9;
998 else
999 amdgpu_vm_block_size = (bits + 3) / 2;
1000
1001 } else if (amdgpu_vm_block_size < 9) {
1002 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1003 amdgpu_vm_block_size);
1004 amdgpu_vm_block_size = 9;
1005 }
1006
1007 if (amdgpu_vm_block_size > 24 ||
1008 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1009 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1010 amdgpu_vm_block_size);
1011 amdgpu_vm_block_size = 9;
1012 }
1013}
1014
1015/**
1016 * amdgpu_switcheroo_set_state - set switcheroo state
1017 *
1018 * @pdev: pci dev pointer
1694467b 1019 * @state: vga_switcheroo state
d38ceaf9
AD
1020 *
1021 * Callback for the switcheroo driver. Suspends or resumes the
1022 * the asics before or after it is powered up using ACPI methods.
1023 */
1024static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1025{
1026 struct drm_device *dev = pci_get_drvdata(pdev);
1027
1028 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1029 return;
1030
1031 if (state == VGA_SWITCHEROO_ON) {
1032 unsigned d3_delay = dev->pdev->d3_delay;
1033
1034 printk(KERN_INFO "amdgpu: switched on\n");
1035 /* don't suspend or resume card normally */
1036 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1037
810ddc3a 1038 amdgpu_device_resume(dev, true, true);
d38ceaf9
AD
1039
1040 dev->pdev->d3_delay = d3_delay;
1041
1042 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1043 drm_kms_helper_poll_enable(dev);
1044 } else {
1045 printk(KERN_INFO "amdgpu: switched off\n");
1046 drm_kms_helper_poll_disable(dev);
1047 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1048 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1049 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1050 }
1051}
1052
1053/**
1054 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1055 *
1056 * @pdev: pci dev pointer
1057 *
1058 * Callback for the switcheroo driver. Check of the switcheroo
1059 * state can be changed.
1060 * Returns true if the state can be changed, false if not.
1061 */
1062static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1063{
1064 struct drm_device *dev = pci_get_drvdata(pdev);
1065
1066 /*
1067 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1068 * locking inversion with the driver load path. And the access here is
1069 * completely racy anyway. So don't bother with locking for now.
1070 */
1071 return dev->open_count == 0;
1072}
1073
1074static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1075 .set_gpu_state = amdgpu_switcheroo_set_state,
1076 .reprobe = NULL,
1077 .can_switch = amdgpu_switcheroo_can_switch,
1078};
1079
1080int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
5fc3aeeb 1081 enum amd_ip_block_type block_type,
1082 enum amd_clockgating_state state)
d38ceaf9
AD
1083{
1084 int i, r = 0;
1085
1086 for (i = 0; i < adev->num_ip_blocks; i++) {
9ecbe7f5
AD
1087 if (!adev->ip_block_status[i].valid)
1088 continue;
d38ceaf9 1089 if (adev->ip_blocks[i].type == block_type) {
5fc3aeeb 1090 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
d38ceaf9
AD
1091 state);
1092 if (r)
1093 return r;
a225bf1c 1094 break;
d38ceaf9
AD
1095 }
1096 }
1097 return r;
1098}
1099
1100int amdgpu_set_powergating_state(struct amdgpu_device *adev,
5fc3aeeb 1101 enum amd_ip_block_type block_type,
1102 enum amd_powergating_state state)
d38ceaf9
AD
1103{
1104 int i, r = 0;
1105
1106 for (i = 0; i < adev->num_ip_blocks; i++) {
9ecbe7f5
AD
1107 if (!adev->ip_block_status[i].valid)
1108 continue;
d38ceaf9 1109 if (adev->ip_blocks[i].type == block_type) {
5fc3aeeb 1110 r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
d38ceaf9
AD
1111 state);
1112 if (r)
1113 return r;
a225bf1c 1114 break;
d38ceaf9
AD
1115 }
1116 }
1117 return r;
1118}
1119
5dbbb60b
AD
1120int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1121 enum amd_ip_block_type block_type)
1122{
1123 int i, r;
1124
1125 for (i = 0; i < adev->num_ip_blocks; i++) {
9ecbe7f5
AD
1126 if (!adev->ip_block_status[i].valid)
1127 continue;
5dbbb60b
AD
1128 if (adev->ip_blocks[i].type == block_type) {
1129 r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
1130 if (r)
1131 return r;
1132 break;
1133 }
1134 }
1135 return 0;
1136
1137}
1138
1139bool amdgpu_is_idle(struct amdgpu_device *adev,
1140 enum amd_ip_block_type block_type)
1141{
1142 int i;
1143
1144 for (i = 0; i < adev->num_ip_blocks; i++) {
9ecbe7f5
AD
1145 if (!adev->ip_block_status[i].valid)
1146 continue;
5dbbb60b
AD
1147 if (adev->ip_blocks[i].type == block_type)
1148 return adev->ip_blocks[i].funcs->is_idle((void *)adev);
1149 }
1150 return true;
1151
1152}
1153
d38ceaf9
AD
1154const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
1155 struct amdgpu_device *adev,
5fc3aeeb 1156 enum amd_ip_block_type type)
d38ceaf9
AD
1157{
1158 int i;
1159
1160 for (i = 0; i < adev->num_ip_blocks; i++)
1161 if (adev->ip_blocks[i].type == type)
1162 return &adev->ip_blocks[i];
1163
1164 return NULL;
1165}
1166
1167/**
1168 * amdgpu_ip_block_version_cmp
1169 *
1170 * @adev: amdgpu_device pointer
5fc3aeeb 1171 * @type: enum amd_ip_block_type
d38ceaf9
AD
1172 * @major: major version
1173 * @minor: minor version
1174 *
1175 * return 0 if equal or greater
1176 * return 1 if smaller or the ip_block doesn't exist
1177 */
1178int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
5fc3aeeb 1179 enum amd_ip_block_type type,
d38ceaf9
AD
1180 u32 major, u32 minor)
1181{
1182 const struct amdgpu_ip_block_version *ip_block;
1183 ip_block = amdgpu_get_ip_block(adev, type);
1184
1185 if (ip_block && ((ip_block->major > major) ||
1186 ((ip_block->major == major) &&
1187 (ip_block->minor >= minor))))
1188 return 0;
1189
1190 return 1;
1191}
1192
9accf2fd
ED
1193static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
1194{
1195 adev->enable_virtual_display = false;
1196
1197 if (amdgpu_virtual_display) {
1198 struct drm_device *ddev = adev->ddev;
1199 const char *pci_address_name = pci_name(ddev->pdev);
1200 char *pciaddstr, *pciaddstr_tmp, *pciaddname;
1201
1202 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1203 pciaddstr_tmp = pciaddstr;
1204 while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
1205 if (!strcmp(pci_address_name, pciaddname)) {
1206 adev->enable_virtual_display = true;
1207 break;
1208 }
1209 }
1210
1211 DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
1212 amdgpu_virtual_display, pci_address_name,
1213 adev->enable_virtual_display);
1214
1215 kfree(pciaddstr);
1216 }
1217}
1218
d38ceaf9
AD
1219static int amdgpu_early_init(struct amdgpu_device *adev)
1220{
aaa36a97 1221 int i, r;
d38ceaf9 1222
9accf2fd 1223 amdgpu_whether_enable_virtual_display(adev);
a6be7570 1224
d38ceaf9 1225 switch (adev->asic_type) {
aaa36a97
AD
1226 case CHIP_TOPAZ:
1227 case CHIP_TONGA:
48299f95 1228 case CHIP_FIJI:
2cc0c0b5
FC
1229 case CHIP_POLARIS11:
1230 case CHIP_POLARIS10:
aaa36a97 1231 case CHIP_CARRIZO:
39bb0c92
SL
1232 case CHIP_STONEY:
1233 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1234 adev->family = AMDGPU_FAMILY_CZ;
1235 else
1236 adev->family = AMDGPU_FAMILY_VI;
1237
1238 r = vi_set_ip_blocks(adev);
1239 if (r)
1240 return r;
1241 break;
33f34802
KW
1242#ifdef CONFIG_DRM_AMDGPU_SI
1243 case CHIP_VERDE:
1244 case CHIP_TAHITI:
1245 case CHIP_PITCAIRN:
1246 case CHIP_OLAND:
1247 case CHIP_HAINAN:
295d0daf 1248 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1249 r = si_set_ip_blocks(adev);
1250 if (r)
1251 return r;
1252 break;
1253#endif
a2e73f56
AD
1254#ifdef CONFIG_DRM_AMDGPU_CIK
1255 case CHIP_BONAIRE:
1256 case CHIP_HAWAII:
1257 case CHIP_KAVERI:
1258 case CHIP_KABINI:
1259 case CHIP_MULLINS:
1260 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1261 adev->family = AMDGPU_FAMILY_CI;
1262 else
1263 adev->family = AMDGPU_FAMILY_KV;
1264
1265 r = cik_set_ip_blocks(adev);
1266 if (r)
1267 return r;
1268 break;
1269#endif
d38ceaf9
AD
1270 default:
1271 /* FIXME: not supported yet */
1272 return -EINVAL;
1273 }
1274
8faf0e08
AD
1275 adev->ip_block_status = kcalloc(adev->num_ip_blocks,
1276 sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
1277 if (adev->ip_block_status == NULL)
d8d090b7 1278 return -ENOMEM;
d38ceaf9
AD
1279
1280 if (adev->ip_blocks == NULL) {
1281 DRM_ERROR("No IP blocks found!\n");
1282 return r;
1283 }
1284
1285 for (i = 0; i < adev->num_ip_blocks; i++) {
1286 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1287 DRM_ERROR("disabled ip block: %d\n", i);
8faf0e08 1288 adev->ip_block_status[i].valid = false;
d38ceaf9
AD
1289 } else {
1290 if (adev->ip_blocks[i].funcs->early_init) {
5fc3aeeb 1291 r = adev->ip_blocks[i].funcs->early_init((void *)adev);
2c1a2784 1292 if (r == -ENOENT) {
8faf0e08 1293 adev->ip_block_status[i].valid = false;
2c1a2784 1294 } else if (r) {
88a907d6 1295 DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
d38ceaf9 1296 return r;
2c1a2784 1297 } else {
8faf0e08 1298 adev->ip_block_status[i].valid = true;
2c1a2784 1299 }
974e6b64 1300 } else {
8faf0e08 1301 adev->ip_block_status[i].valid = true;
d38ceaf9 1302 }
d38ceaf9
AD
1303 }
1304 }
1305
395d1fb9
NH
1306 adev->cg_flags &= amdgpu_cg_mask;
1307 adev->pg_flags &= amdgpu_pg_mask;
1308
d38ceaf9
AD
1309 return 0;
1310}
1311
1312static int amdgpu_init(struct amdgpu_device *adev)
1313{
1314 int i, r;
1315
1316 for (i = 0; i < adev->num_ip_blocks; i++) {
8faf0e08 1317 if (!adev->ip_block_status[i].valid)
d38ceaf9 1318 continue;
5fc3aeeb 1319 r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
2c1a2784 1320 if (r) {
822b2cef 1321 DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
d38ceaf9 1322 return r;
2c1a2784 1323 }
8faf0e08 1324 adev->ip_block_status[i].sw = true;
d38ceaf9 1325 /* need to do gmc hw init early so we can allocate gpu mem */
5fc3aeeb 1326 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9 1327 r = amdgpu_vram_scratch_init(adev);
2c1a2784
AD
1328 if (r) {
1329 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1330 return r;
2c1a2784 1331 }
5fc3aeeb 1332 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
2c1a2784
AD
1333 if (r) {
1334 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1335 return r;
2c1a2784 1336 }
d38ceaf9 1337 r = amdgpu_wb_init(adev);
2c1a2784
AD
1338 if (r) {
1339 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
d38ceaf9 1340 return r;
2c1a2784 1341 }
8faf0e08 1342 adev->ip_block_status[i].hw = true;
d38ceaf9
AD
1343 }
1344 }
1345
1346 for (i = 0; i < adev->num_ip_blocks; i++) {
8faf0e08 1347 if (!adev->ip_block_status[i].sw)
d38ceaf9
AD
1348 continue;
1349 /* gmc hw init is done early */
5fc3aeeb 1350 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
d38ceaf9 1351 continue;
5fc3aeeb 1352 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
2c1a2784 1353 if (r) {
822b2cef 1354 DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
d38ceaf9 1355 return r;
2c1a2784 1356 }
8faf0e08 1357 adev->ip_block_status[i].hw = true;
d38ceaf9
AD
1358 }
1359
1360 return 0;
1361}
1362
1363static int amdgpu_late_init(struct amdgpu_device *adev)
1364{
1365 int i = 0, r;
1366
1367 for (i = 0; i < adev->num_ip_blocks; i++) {
8faf0e08 1368 if (!adev->ip_block_status[i].valid)
d38ceaf9 1369 continue;
d932f37c
RZ
1370 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD ||
1371 adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE)
1372 continue;
d38ceaf9 1373 /* enable clockgating to save power */
5fc3aeeb 1374 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1375 AMD_CG_STATE_GATE);
2c1a2784 1376 if (r) {
822b2cef 1377 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
d38ceaf9 1378 return r;
2c1a2784 1379 }
d38ceaf9 1380 if (adev->ip_blocks[i].funcs->late_init) {
5fc3aeeb 1381 r = adev->ip_blocks[i].funcs->late_init((void *)adev);
2c1a2784 1382 if (r) {
822b2cef 1383 DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
d38ceaf9 1384 return r;
2c1a2784 1385 }
d38ceaf9
AD
1386 }
1387 }
1388
1389 return 0;
1390}
1391
1392static int amdgpu_fini(struct amdgpu_device *adev)
1393{
1394 int i, r;
1395
1396 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
8faf0e08 1397 if (!adev->ip_block_status[i].hw)
d38ceaf9 1398 continue;
5fc3aeeb 1399 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9
AD
1400 amdgpu_wb_fini(adev);
1401 amdgpu_vram_scratch_fini(adev);
1402 }
1403 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
5fc3aeeb 1404 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1405 AMD_CG_STATE_UNGATE);
2c1a2784 1406 if (r) {
822b2cef 1407 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
d38ceaf9 1408 return r;
2c1a2784 1409 }
5fc3aeeb 1410 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
d38ceaf9 1411 /* XXX handle errors */
2c1a2784 1412 if (r) {
822b2cef 1413 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
2c1a2784 1414 }
8faf0e08 1415 adev->ip_block_status[i].hw = false;
d38ceaf9
AD
1416 }
1417
1418 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
8faf0e08 1419 if (!adev->ip_block_status[i].sw)
d38ceaf9 1420 continue;
5fc3aeeb 1421 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
d38ceaf9 1422 /* XXX handle errors */
2c1a2784 1423 if (r) {
822b2cef 1424 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
2c1a2784 1425 }
8faf0e08
AD
1426 adev->ip_block_status[i].sw = false;
1427 adev->ip_block_status[i].valid = false;
d38ceaf9
AD
1428 }
1429
a6dcfd9c
ML
1430 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1431 if (adev->ip_blocks[i].funcs->late_fini)
1432 adev->ip_blocks[i].funcs->late_fini((void *)adev);
1433 }
1434
d38ceaf9
AD
1435 return 0;
1436}
1437
1438static int amdgpu_suspend(struct amdgpu_device *adev)
1439{
1440 int i, r;
1441
c5a93a28
FC
1442 /* ungate SMC block first */
1443 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1444 AMD_CG_STATE_UNGATE);
1445 if (r) {
1446 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1447 }
1448
d38ceaf9 1449 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
8faf0e08 1450 if (!adev->ip_block_status[i].valid)
d38ceaf9
AD
1451 continue;
1452 /* ungate blocks so that suspend can properly shut them down */
c5a93a28
FC
1453 if (i != AMD_IP_BLOCK_TYPE_SMC) {
1454 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1455 AMD_CG_STATE_UNGATE);
1456 if (r) {
822b2cef 1457 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
c5a93a28 1458 }
2c1a2784 1459 }
d38ceaf9
AD
1460 /* XXX handle errors */
1461 r = adev->ip_blocks[i].funcs->suspend(adev);
1462 /* XXX handle errors */
2c1a2784 1463 if (r) {
822b2cef 1464 DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
2c1a2784 1465 }
d38ceaf9
AD
1466 }
1467
1468 return 0;
1469}
1470
1471static int amdgpu_resume(struct amdgpu_device *adev)
1472{
1473 int i, r;
1474
1475 for (i = 0; i < adev->num_ip_blocks; i++) {
8faf0e08 1476 if (!adev->ip_block_status[i].valid)
d38ceaf9
AD
1477 continue;
1478 r = adev->ip_blocks[i].funcs->resume(adev);
2c1a2784 1479 if (r) {
822b2cef 1480 DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
d38ceaf9 1481 return r;
2c1a2784 1482 }
d38ceaf9
AD
1483 }
1484
1485 return 0;
1486}
1487
048765ad
AR
1488static bool amdgpu_device_is_virtual(void)
1489{
1490#ifdef CONFIG_X86
1491 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
1492#else
1493 return false;
1494#endif
1495}
1496
d38ceaf9
AD
1497/**
1498 * amdgpu_device_init - initialize the driver
1499 *
1500 * @adev: amdgpu_device pointer
1501 * @pdev: drm dev pointer
1502 * @pdev: pci dev pointer
1503 * @flags: driver flags
1504 *
1505 * Initializes the driver info and hw (all asics).
1506 * Returns 0 for success or an error on failure.
1507 * Called at driver startup.
1508 */
1509int amdgpu_device_init(struct amdgpu_device *adev,
1510 struct drm_device *ddev,
1511 struct pci_dev *pdev,
1512 uint32_t flags)
1513{
1514 int r, i;
1515 bool runtime = false;
95844d20 1516 u32 max_MBps;
d38ceaf9
AD
1517
1518 adev->shutdown = false;
1519 adev->dev = &pdev->dev;
1520 adev->ddev = ddev;
1521 adev->pdev = pdev;
1522 adev->flags = flags;
2f7d10b3 1523 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9
AD
1524 adev->is_atom_bios = false;
1525 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1526 adev->mc.gtt_size = 512 * 1024 * 1024;
1527 adev->accel_working = false;
1528 adev->num_rings = 0;
1529 adev->mman.buffer_funcs = NULL;
1530 adev->mman.buffer_funcs_ring = NULL;
1531 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 1532 adev->vm_manager.vm_pte_num_rings = 0;
d38ceaf9
AD
1533 adev->gart.gart_funcs = NULL;
1534 adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
1535
1536 adev->smc_rreg = &amdgpu_invalid_rreg;
1537 adev->smc_wreg = &amdgpu_invalid_wreg;
1538 adev->pcie_rreg = &amdgpu_invalid_rreg;
1539 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
1540 adev->pciep_rreg = &amdgpu_invalid_rreg;
1541 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1542 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1543 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1544 adev->didt_rreg = &amdgpu_invalid_rreg;
1545 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
1546 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1547 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1548 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1549 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1550
ccdbb20a 1551
3e39ab90
AD
1552 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1553 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1554 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
1555
1556 /* mutex initialization are all done here so we
1557 * can recall function without having locking issues */
8d0a7cea 1558 mutex_init(&adev->vm_manager.lock);
d38ceaf9 1559 atomic_set(&adev->irq.ih.lock, 0);
d38ceaf9
AD
1560 mutex_init(&adev->pm.mutex);
1561 mutex_init(&adev->gfx.gpu_clock_mutex);
1562 mutex_init(&adev->srbm_mutex);
1563 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9
AD
1564 mutex_init(&adev->mn_lock);
1565 hash_init(adev->mn_hash);
1566
1567 amdgpu_check_arguments(adev);
1568
1569 /* Registers mapping */
1570 /* TODO: block userspace mapping of io register */
1571 spin_lock_init(&adev->mmio_idx_lock);
1572 spin_lock_init(&adev->smc_idx_lock);
1573 spin_lock_init(&adev->pcie_idx_lock);
1574 spin_lock_init(&adev->uvd_ctx_idx_lock);
1575 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 1576 spin_lock_init(&adev->gc_cac_idx_lock);
d38ceaf9 1577 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 1578 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 1579
0c4e7fa5
CZ
1580 INIT_LIST_HEAD(&adev->shadow_list);
1581 mutex_init(&adev->shadow_list_lock);
1582
5c1354bd
CZ
1583 INIT_LIST_HEAD(&adev->gtt_list);
1584 spin_lock_init(&adev->gtt_list_lock);
1585
da69c161
KW
1586 if (adev->asic_type >= CHIP_BONAIRE) {
1587 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1588 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1589 } else {
1590 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1591 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1592 }
5c1354bd 1593
d38ceaf9
AD
1594 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1595 if (adev->rmmio == NULL) {
1596 return -ENOMEM;
1597 }
1598 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1599 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1600
da69c161
KW
1601 if (adev->asic_type >= CHIP_BONAIRE)
1602 /* doorbell bar mapping */
1603 amdgpu_doorbell_init(adev);
d38ceaf9
AD
1604
1605 /* io port mapping */
1606 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1607 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1608 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1609 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1610 break;
1611 }
1612 }
1613 if (adev->rio_mem == NULL)
1614 DRM_ERROR("Unable to find PCI I/O BAR\n");
1615
1616 /* early init functions */
1617 r = amdgpu_early_init(adev);
1618 if (r)
1619 return r;
1620
1621 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1622 /* this will fail for cards that aren't VGA class devices, just
1623 * ignore it */
1624 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1625
1626 if (amdgpu_runtime_pm == 1)
1627 runtime = true;
e9bef455 1628 if (amdgpu_device_is_px(ddev))
d38ceaf9
AD
1629 runtime = true;
1630 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1631 if (runtime)
1632 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1633
1634 /* Read BIOS */
83ba126a
AD
1635 if (!amdgpu_get_bios(adev)) {
1636 r = -EINVAL;
1637 goto failed;
1638 }
d38ceaf9
AD
1639 /* Must be an ATOMBIOS */
1640 if (!adev->is_atom_bios) {
1641 dev_err(adev->dev, "Expecting atombios for GPU\n");
83ba126a
AD
1642 r = -EINVAL;
1643 goto failed;
d38ceaf9
AD
1644 }
1645 r = amdgpu_atombios_init(adev);
2c1a2784
AD
1646 if (r) {
1647 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
83ba126a 1648 goto failed;
2c1a2784 1649 }
d38ceaf9 1650
7e471e6f
AD
1651 /* See if the asic supports SR-IOV */
1652 adev->virtualization.supports_sr_iov =
1653 amdgpu_atombios_has_gpu_virtualization_table(adev);
1654
048765ad
AR
1655 /* Check if we are executing in a virtualized environment */
1656 adev->virtualization.is_virtual = amdgpu_device_is_virtual();
1657 adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
1658
d38ceaf9 1659 /* Post card if necessary */
048765ad
AR
1660 if (!amdgpu_card_posted(adev) ||
1661 (adev->virtualization.is_virtual &&
48a70e1c 1662 !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
d38ceaf9
AD
1663 if (!adev->bios) {
1664 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
83ba126a
AD
1665 r = -EINVAL;
1666 goto failed;
d38ceaf9
AD
1667 }
1668 DRM_INFO("GPU not posted. posting now...\n");
1669 amdgpu_atom_asic_init(adev->mode_info.atom_context);
1670 }
1671
1672 /* Initialize clocks */
1673 r = amdgpu_atombios_get_clock_info(adev);
2c1a2784
AD
1674 if (r) {
1675 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
83ba126a 1676 goto failed;
2c1a2784 1677 }
d38ceaf9
AD
1678 /* init i2c buses */
1679 amdgpu_atombios_i2c_init(adev);
1680
1681 /* Fence driver */
1682 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
1683 if (r) {
1684 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
83ba126a 1685 goto failed;
2c1a2784 1686 }
d38ceaf9
AD
1687
1688 /* init the mode config */
1689 drm_mode_config_init(adev->ddev);
1690
1691 r = amdgpu_init(adev);
1692 if (r) {
2c1a2784 1693 dev_err(adev->dev, "amdgpu_init failed\n");
d38ceaf9 1694 amdgpu_fini(adev);
83ba126a 1695 goto failed;
d38ceaf9
AD
1696 }
1697
1698 adev->accel_working = true;
1699
95844d20
MO
1700 /* Initialize the buffer migration limit. */
1701 if (amdgpu_moverate >= 0)
1702 max_MBps = amdgpu_moverate;
1703 else
1704 max_MBps = 8; /* Allow 8 MB/s. */
1705 /* Get a log2 for easy divisions. */
1706 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
1707
d38ceaf9
AD
1708 amdgpu_fbdev_init(adev);
1709
1710 r = amdgpu_ib_pool_init(adev);
1711 if (r) {
1712 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
83ba126a 1713 goto failed;
d38ceaf9
AD
1714 }
1715
1716 r = amdgpu_ib_ring_tests(adev);
1717 if (r)
1718 DRM_ERROR("ib ring test failed (%d).\n", r);
1719
1720 r = amdgpu_gem_debugfs_init(adev);
1721 if (r) {
1722 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1723 }
1724
1725 r = amdgpu_debugfs_regs_init(adev);
1726 if (r) {
1727 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1728 }
1729
50ab2533
HR
1730 r = amdgpu_debugfs_firmware_init(adev);
1731 if (r) {
1732 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1733 return r;
1734 }
1735
d38ceaf9
AD
1736 if ((amdgpu_testing & 1)) {
1737 if (adev->accel_working)
1738 amdgpu_test_moves(adev);
1739 else
1740 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
1741 }
1742 if ((amdgpu_testing & 2)) {
1743 if (adev->accel_working)
1744 amdgpu_test_syncing(adev);
1745 else
1746 DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n");
1747 }
1748 if (amdgpu_benchmarking) {
1749 if (adev->accel_working)
1750 amdgpu_benchmark(adev, amdgpu_benchmarking);
1751 else
1752 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
1753 }
1754
1755 /* enable clockgating, etc. after ib tests, etc. since some blocks require
1756 * explicit gating rather than handling it automatically.
1757 */
1758 r = amdgpu_late_init(adev);
2c1a2784
AD
1759 if (r) {
1760 dev_err(adev->dev, "amdgpu_late_init failed\n");
83ba126a 1761 goto failed;
2c1a2784 1762 }
d38ceaf9
AD
1763
1764 return 0;
83ba126a
AD
1765
1766failed:
1767 if (runtime)
1768 vga_switcheroo_fini_domain_pm_ops(adev->dev);
1769 return r;
d38ceaf9
AD
1770}
1771
1772static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
1773
1774/**
1775 * amdgpu_device_fini - tear down the driver
1776 *
1777 * @adev: amdgpu_device pointer
1778 *
1779 * Tear down the driver info (all asics).
1780 * Called at driver shutdown.
1781 */
1782void amdgpu_device_fini(struct amdgpu_device *adev)
1783{
1784 int r;
1785
1786 DRM_INFO("amdgpu: finishing device.\n");
1787 adev->shutdown = true;
1788 /* evict vram memory */
1789 amdgpu_bo_evict_vram(adev);
1790 amdgpu_ib_pool_fini(adev);
1791 amdgpu_fence_driver_fini(adev);
84b89bdc 1792 drm_crtc_force_disable_all(adev->ddev);
d38ceaf9
AD
1793 amdgpu_fbdev_fini(adev);
1794 r = amdgpu_fini(adev);
8faf0e08
AD
1795 kfree(adev->ip_block_status);
1796 adev->ip_block_status = NULL;
d38ceaf9
AD
1797 adev->accel_working = false;
1798 /* free i2c buses */
1799 amdgpu_i2c_fini(adev);
1800 amdgpu_atombios_fini(adev);
1801 kfree(adev->bios);
1802 adev->bios = NULL;
1803 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
1804 if (adev->flags & AMD_IS_PX)
1805 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
1806 vga_client_register(adev->pdev, NULL, NULL, NULL);
1807 if (adev->rio_mem)
1808 pci_iounmap(adev->pdev, adev->rio_mem);
1809 adev->rio_mem = NULL;
1810 iounmap(adev->rmmio);
1811 adev->rmmio = NULL;
da69c161
KW
1812 if (adev->asic_type >= CHIP_BONAIRE)
1813 amdgpu_doorbell_fini(adev);
d38ceaf9
AD
1814 amdgpu_debugfs_regs_cleanup(adev);
1815 amdgpu_debugfs_remove_files(adev);
1816}
1817
1818
1819/*
1820 * Suspend & resume.
1821 */
1822/**
810ddc3a 1823 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
1824 *
1825 * @pdev: drm dev pointer
1826 * @state: suspend state
1827 *
1828 * Puts the hw in the suspend state (all asics).
1829 * Returns 0 for success or an error on failure.
1830 * Called at driver suspend.
1831 */
810ddc3a 1832int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
1833{
1834 struct amdgpu_device *adev;
1835 struct drm_crtc *crtc;
1836 struct drm_connector *connector;
5ceb54c6 1837 int r;
d38ceaf9
AD
1838
1839 if (dev == NULL || dev->dev_private == NULL) {
1840 return -ENODEV;
1841 }
1842
1843 adev = dev->dev_private;
1844
e313de7e 1845 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
d38ceaf9
AD
1846 return 0;
1847
1848 drm_kms_helper_poll_disable(dev);
1849
1850 /* turn off display hw */
4c7fbc39 1851 drm_modeset_lock_all(dev);
d38ceaf9
AD
1852 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1853 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1854 }
4c7fbc39 1855 drm_modeset_unlock_all(dev);
d38ceaf9 1856
756e6880 1857 /* unpin the front buffers and cursors */
d38ceaf9 1858 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 1859 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
1860 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
1861 struct amdgpu_bo *robj;
1862
756e6880
AD
1863 if (amdgpu_crtc->cursor_bo) {
1864 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1865 r = amdgpu_bo_reserve(aobj, false);
1866 if (r == 0) {
1867 amdgpu_bo_unpin(aobj);
1868 amdgpu_bo_unreserve(aobj);
1869 }
1870 }
1871
d38ceaf9
AD
1872 if (rfb == NULL || rfb->obj == NULL) {
1873 continue;
1874 }
1875 robj = gem_to_amdgpu_bo(rfb->obj);
1876 /* don't unpin kernel fb objects */
1877 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
1878 r = amdgpu_bo_reserve(robj, false);
1879 if (r == 0) {
1880 amdgpu_bo_unpin(robj);
1881 amdgpu_bo_unreserve(robj);
1882 }
1883 }
1884 }
1885 /* evict vram memory */
1886 amdgpu_bo_evict_vram(adev);
1887
5ceb54c6 1888 amdgpu_fence_driver_suspend(adev);
d38ceaf9
AD
1889
1890 r = amdgpu_suspend(adev);
1891
1892 /* evict remaining vram memory */
1893 amdgpu_bo_evict_vram(adev);
1894
1895 pci_save_state(dev->pdev);
1896 if (suspend) {
1897 /* Shut down the device */
1898 pci_disable_device(dev->pdev);
1899 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 1900 } else {
1901 r = amdgpu_asic_reset(adev);
1902 if (r)
1903 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
1904 }
1905
1906 if (fbcon) {
1907 console_lock();
1908 amdgpu_fbdev_set_suspend(adev, 1);
1909 console_unlock();
1910 }
1911 return 0;
1912}
1913
1914/**
810ddc3a 1915 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
1916 *
1917 * @pdev: drm dev pointer
1918 *
1919 * Bring the hw back to operating state (all asics).
1920 * Returns 0 for success or an error on failure.
1921 * Called at driver resume.
1922 */
810ddc3a 1923int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
1924{
1925 struct drm_connector *connector;
1926 struct amdgpu_device *adev = dev->dev_private;
756e6880 1927 struct drm_crtc *crtc;
d38ceaf9
AD
1928 int r;
1929
e313de7e 1930 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
d38ceaf9
AD
1931 return 0;
1932
74b0b157 1933 if (fbcon)
d38ceaf9 1934 console_lock();
74b0b157 1935
d38ceaf9
AD
1936 if (resume) {
1937 pci_set_power_state(dev->pdev, PCI_D0);
1938 pci_restore_state(dev->pdev);
74b0b157 1939 r = pci_enable_device(dev->pdev);
1940 if (r) {
d38ceaf9
AD
1941 if (fbcon)
1942 console_unlock();
74b0b157 1943 return r;
d38ceaf9
AD
1944 }
1945 }
1946
1947 /* post card */
74b0b157 1948 if (!amdgpu_card_posted(adev) || !resume) {
1949 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1950 if (r)
1951 DRM_ERROR("amdgpu asic init failed\n");
1952 }
d38ceaf9
AD
1953
1954 r = amdgpu_resume(adev);
ca198528
FC
1955 if (r)
1956 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
d38ceaf9 1957
5ceb54c6
AD
1958 amdgpu_fence_driver_resume(adev);
1959
ca198528
FC
1960 if (resume) {
1961 r = amdgpu_ib_ring_tests(adev);
1962 if (r)
1963 DRM_ERROR("ib ring test failed (%d).\n", r);
1964 }
d38ceaf9
AD
1965
1966 r = amdgpu_late_init(adev);
1967 if (r)
1968 return r;
1969
756e6880
AD
1970 /* pin cursors */
1971 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1972 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1973
1974 if (amdgpu_crtc->cursor_bo) {
1975 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1976 r = amdgpu_bo_reserve(aobj, false);
1977 if (r == 0) {
1978 r = amdgpu_bo_pin(aobj,
1979 AMDGPU_GEM_DOMAIN_VRAM,
1980 &amdgpu_crtc->cursor_addr);
1981 if (r != 0)
1982 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1983 amdgpu_bo_unreserve(aobj);
1984 }
1985 }
1986 }
1987
d38ceaf9
AD
1988 /* blat the mode back in */
1989 if (fbcon) {
1990 drm_helper_resume_force_mode(dev);
1991 /* turn on display hw */
4c7fbc39 1992 drm_modeset_lock_all(dev);
d38ceaf9
AD
1993 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1994 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1995 }
4c7fbc39 1996 drm_modeset_unlock_all(dev);
d38ceaf9
AD
1997 }
1998
1999 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2000
2001 /*
2002 * Most of the connector probing functions try to acquire runtime pm
2003 * refs to ensure that the GPU is powered on when connector polling is
2004 * performed. Since we're calling this from a runtime PM callback,
2005 * trying to acquire rpm refs will cause us to deadlock.
2006 *
2007 * Since we're guaranteed to be holding the rpm lock, it's safe to
2008 * temporarily disable the rpm helpers so this doesn't deadlock us.
2009 */
2010#ifdef CONFIG_PM
2011 dev->dev->power.disable_depth++;
2012#endif
54fb2a5c 2013 drm_helper_hpd_irq_event(dev);
23a1a9e5
L
2014#ifdef CONFIG_PM
2015 dev->dev->power.disable_depth--;
2016#endif
d38ceaf9
AD
2017
2018 if (fbcon) {
2019 amdgpu_fbdev_set_suspend(adev, 0);
2020 console_unlock();
2021 }
2022
2023 return 0;
2024}
2025
63fbf42f
CZ
2026static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2027{
2028 int i;
2029 bool asic_hang = false;
2030
2031 for (i = 0; i < adev->num_ip_blocks; i++) {
2032 if (!adev->ip_block_status[i].valid)
2033 continue;
2034 if (adev->ip_blocks[i].funcs->check_soft_reset)
2035 adev->ip_blocks[i].funcs->check_soft_reset(adev);
2036 if (adev->ip_block_status[i].hang) {
2037 DRM_INFO("IP block:%d is hang!\n", i);
2038 asic_hang = true;
2039 }
2040 }
2041 return asic_hang;
2042}
2043
d31a501e
CZ
2044int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
2045{
2046 int i, r = 0;
2047
2048 for (i = 0; i < adev->num_ip_blocks; i++) {
2049 if (!adev->ip_block_status[i].valid)
2050 continue;
35d782fe
CZ
2051 if (adev->ip_block_status[i].hang &&
2052 adev->ip_blocks[i].funcs->pre_soft_reset) {
d31a501e
CZ
2053 r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
2054 if (r)
2055 return r;
2056 }
2057 }
2058
2059 return 0;
2060}
2061
35d782fe
CZ
2062static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2063{
2064 if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang ||
35d782fe 2065 adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang ||
35d782fe
CZ
2066 adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang ||
2067 adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) {
2068 DRM_INFO("Some block need full reset!\n");
2069 return true;
2070 }
2071 return false;
2072}
2073
2074static int amdgpu_soft_reset(struct amdgpu_device *adev)
2075{
2076 int i, r = 0;
2077
2078 for (i = 0; i < adev->num_ip_blocks; i++) {
2079 if (!adev->ip_block_status[i].valid)
2080 continue;
2081 if (adev->ip_block_status[i].hang &&
2082 adev->ip_blocks[i].funcs->soft_reset) {
2083 r = adev->ip_blocks[i].funcs->soft_reset(adev);
2084 if (r)
2085 return r;
2086 }
2087 }
2088
2089 return 0;
2090}
2091
2092static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2093{
2094 int i, r = 0;
2095
2096 for (i = 0; i < adev->num_ip_blocks; i++) {
2097 if (!adev->ip_block_status[i].valid)
2098 continue;
2099 if (adev->ip_block_status[i].hang &&
2100 adev->ip_blocks[i].funcs->post_soft_reset)
2101 r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
2102 if (r)
2103 return r;
2104 }
2105
2106 return 0;
2107}
2108
3ad81f16
CZ
2109bool amdgpu_need_backup(struct amdgpu_device *adev)
2110{
2111 if (adev->flags & AMD_IS_APU)
2112 return false;
2113
2114 return amdgpu_lockup_timeout > 0 ? true : false;
2115}
2116
53cdccd5
CZ
2117static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2118 struct amdgpu_ring *ring,
2119 struct amdgpu_bo *bo,
2120 struct fence **fence)
2121{
2122 uint32_t domain;
2123 int r;
2124
2125 if (!bo->shadow)
2126 return 0;
2127
2128 r = amdgpu_bo_reserve(bo, false);
2129 if (r)
2130 return r;
2131 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2132 /* if bo has been evicted, then no need to recover */
2133 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2134 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2135 NULL, fence, true);
2136 if (r) {
2137 DRM_ERROR("recover page table failed!\n");
2138 goto err;
2139 }
2140 }
2141err:
2142 amdgpu_bo_unreserve(bo);
2143 return r;
2144}
2145
d38ceaf9
AD
2146/**
2147 * amdgpu_gpu_reset - reset the asic
2148 *
2149 * @adev: amdgpu device pointer
2150 *
2151 * Attempt the reset the GPU if it has hung (all asics).
2152 * Returns 0 for success or an error on failure.
2153 */
2154int amdgpu_gpu_reset(struct amdgpu_device *adev)
2155{
d38ceaf9
AD
2156 int i, r;
2157 int resched;
35d782fe 2158 bool need_full_reset;
d38ceaf9 2159
63fbf42f
CZ
2160 if (!amdgpu_check_soft_reset(adev)) {
2161 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2162 return 0;
2163 }
2164
d94aed5a 2165 atomic_inc(&adev->gpu_reset_counter);
d38ceaf9 2166
a3c47d6b
CZ
2167 /* block TTM */
2168 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2169
0875dc9e
CZ
2170 /* block scheduler */
2171 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2172 struct amdgpu_ring *ring = adev->rings[i];
2173
2174 if (!ring)
2175 continue;
2176 kthread_park(ring->sched.thread);
aa1c8900 2177 amd_sched_hw_job_reset(&ring->sched);
0875dc9e 2178 }
2200edac
CZ
2179 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2180 amdgpu_fence_driver_force_completion(adev);
d38ceaf9 2181
35d782fe 2182 need_full_reset = amdgpu_need_full_reset(adev);
d38ceaf9 2183
35d782fe
CZ
2184 if (!need_full_reset) {
2185 amdgpu_pre_soft_reset(adev);
2186 r = amdgpu_soft_reset(adev);
2187 amdgpu_post_soft_reset(adev);
2188 if (r || amdgpu_check_soft_reset(adev)) {
2189 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2190 need_full_reset = true;
2191 }
f1aa7e08
CZ
2192 }
2193
35d782fe
CZ
2194 if (need_full_reset) {
2195 /* save scratch */
2196 amdgpu_atombios_scratch_regs_save(adev);
2197 r = amdgpu_suspend(adev);
bfa99269 2198
35d782fe
CZ
2199retry:
2200 /* Disable fb access */
2201 if (adev->mode_info.num_crtc) {
2202 struct amdgpu_mode_mc_save save;
2203 amdgpu_display_stop_mc_access(adev, &save);
2204 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2205 }
2206
2207 r = amdgpu_asic_reset(adev);
2208 /* post card */
2209 amdgpu_atom_asic_init(adev->mode_info.atom_context);
2210
2211 if (!r) {
2212 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2213 r = amdgpu_resume(adev);
2214 }
2215 /* restore scratch */
2216 amdgpu_atombios_scratch_regs_restore(adev);
d38ceaf9 2217 }
d38ceaf9 2218 if (!r) {
e72cfd58 2219 amdgpu_irq_gpu_reset_resume_helper(adev);
2c0d7318
CZ
2220 if (need_full_reset && amdgpu_need_backup(adev)) {
2221 r = amdgpu_ttm_recover_gart(adev);
2222 if (r)
2223 DRM_ERROR("gart recovery failed!!!\n");
2224 }
1f465087
CZ
2225 r = amdgpu_ib_ring_tests(adev);
2226 if (r) {
2227 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
40019dc4 2228 r = amdgpu_suspend(adev);
53cdccd5 2229 need_full_reset = true;
40019dc4 2230 goto retry;
1f465087 2231 }
53cdccd5
CZ
2232 /**
2233 * recovery vm page tables, since we cannot depend on VRAM is
2234 * consistent after gpu full reset.
2235 */
2236 if (need_full_reset && amdgpu_need_backup(adev)) {
2237 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2238 struct amdgpu_bo *bo, *tmp;
2239 struct fence *fence = NULL, *next = NULL;
2240
2241 DRM_INFO("recover vram bo from shadow\n");
2242 mutex_lock(&adev->shadow_list_lock);
2243 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2244 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2245 if (fence) {
2246 r = fence_wait(fence, false);
2247 if (r) {
2248 WARN(r, "recovery from shadow isn't comleted\n");
2249 break;
2250 }
2251 }
1f465087 2252
53cdccd5
CZ
2253 fence_put(fence);
2254 fence = next;
2255 }
2256 mutex_unlock(&adev->shadow_list_lock);
2257 if (fence) {
2258 r = fence_wait(fence, false);
2259 if (r)
2260 WARN(r, "recovery from shadow isn't comleted\n");
2261 }
2262 fence_put(fence);
2263 }
d38ceaf9
AD
2264 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2265 struct amdgpu_ring *ring = adev->rings[i];
2266 if (!ring)
2267 continue;
53cdccd5 2268
aa1c8900 2269 amd_sched_job_recovery(&ring->sched);
0875dc9e 2270 kthread_unpark(ring->sched.thread);
d38ceaf9 2271 }
d38ceaf9 2272 } else {
2200edac 2273 dev_err(adev->dev, "asic resume failed (%d).\n", r);
d38ceaf9 2274 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
0875dc9e
CZ
2275 if (adev->rings[i]) {
2276 kthread_unpark(adev->rings[i]->sched.thread);
0875dc9e 2277 }
d38ceaf9
AD
2278 }
2279 }
2280
2281 drm_helper_resume_force_mode(adev->ddev);
2282
2283 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2284 if (r) {
2285 /* bad news, how to tell it to userspace ? */
2286 dev_info(adev->dev, "GPU reset failed\n");
2287 }
2288
d38ceaf9
AD
2289 return r;
2290}
2291
d0dd7f0c
AD
2292void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2293{
2294 u32 mask;
2295 int ret;
2296
cd474ba0
AD
2297 if (amdgpu_pcie_gen_cap)
2298 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 2299
cd474ba0
AD
2300 if (amdgpu_pcie_lane_cap)
2301 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 2302
cd474ba0
AD
2303 /* covers APUs as well */
2304 if (pci_is_root_bus(adev->pdev->bus)) {
2305 if (adev->pm.pcie_gen_mask == 0)
2306 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2307 if (adev->pm.pcie_mlw_mask == 0)
2308 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 2309 return;
cd474ba0 2310 }
d0dd7f0c 2311
cd474ba0
AD
2312 if (adev->pm.pcie_gen_mask == 0) {
2313 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2314 if (!ret) {
2315 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2316 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2317 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2318
2319 if (mask & DRM_PCIE_SPEED_25)
2320 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2321 if (mask & DRM_PCIE_SPEED_50)
2322 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2323 if (mask & DRM_PCIE_SPEED_80)
2324 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2325 } else {
2326 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2327 }
2328 }
2329 if (adev->pm.pcie_mlw_mask == 0) {
2330 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2331 if (!ret) {
2332 switch (mask) {
2333 case 32:
2334 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2335 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2336 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2337 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2338 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2339 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2340 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2341 break;
2342 case 16:
2343 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2344 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2345 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2346 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2347 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2348 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2349 break;
2350 case 12:
2351 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2352 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2353 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2354 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2355 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2356 break;
2357 case 8:
2358 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2359 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2360 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2361 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2362 break;
2363 case 4:
2364 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2365 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2366 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2367 break;
2368 case 2:
2369 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2370 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2371 break;
2372 case 1:
2373 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2374 break;
2375 default:
2376 break;
2377 }
2378 } else {
2379 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
2380 }
2381 }
2382}
d38ceaf9
AD
2383
2384/*
2385 * Debugfs
2386 */
2387int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
06ab6832 2388 const struct drm_info_list *files,
d38ceaf9
AD
2389 unsigned nfiles)
2390{
2391 unsigned i;
2392
2393 for (i = 0; i < adev->debugfs_count; i++) {
2394 if (adev->debugfs[i].files == files) {
2395 /* Already registered */
2396 return 0;
2397 }
2398 }
2399
2400 i = adev->debugfs_count + 1;
2401 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2402 DRM_ERROR("Reached maximum number of debugfs components.\n");
2403 DRM_ERROR("Report so we increase "
2404 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2405 return -EINVAL;
2406 }
2407 adev->debugfs[adev->debugfs_count].files = files;
2408 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2409 adev->debugfs_count = i;
2410#if defined(CONFIG_DEBUG_FS)
2411 drm_debugfs_create_files(files, nfiles,
2412 adev->ddev->control->debugfs_root,
2413 adev->ddev->control);
2414 drm_debugfs_create_files(files, nfiles,
2415 adev->ddev->primary->debugfs_root,
2416 adev->ddev->primary);
2417#endif
2418 return 0;
2419}
2420
2421static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
2422{
2423#if defined(CONFIG_DEBUG_FS)
2424 unsigned i;
2425
2426 for (i = 0; i < adev->debugfs_count; i++) {
2427 drm_debugfs_remove_files(adev->debugfs[i].files,
2428 adev->debugfs[i].num_files,
2429 adev->ddev->control);
2430 drm_debugfs_remove_files(adev->debugfs[i].files,
2431 adev->debugfs[i].num_files,
2432 adev->ddev->primary);
2433 }
2434#endif
2435}
2436
2437#if defined(CONFIG_DEBUG_FS)
2438
2439static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2440 size_t size, loff_t *pos)
2441{
2442 struct amdgpu_device *adev = f->f_inode->i_private;
2443 ssize_t result = 0;
2444 int r;
bd12267d 2445 bool pm_pg_lock, use_bank;
56628159 2446 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
2447
2448 if (size & 0x3 || *pos & 0x3)
2449 return -EINVAL;
2450
bd12267d
TSD
2451 /* are we reading registers for which a PG lock is necessary? */
2452 pm_pg_lock = (*pos >> 23) & 1;
2453
56628159
TSD
2454 if (*pos & (1ULL << 62)) {
2455 se_bank = (*pos >> 24) & 0x3FF;
2456 sh_bank = (*pos >> 34) & 0x3FF;
2457 instance_bank = (*pos >> 44) & 0x3FF;
2458 use_bank = 1;
56628159
TSD
2459 } else {
2460 use_bank = 0;
2461 }
2462
bd12267d
TSD
2463 *pos &= 0x3FFFF;
2464
56628159
TSD
2465 if (use_bank) {
2466 if (sh_bank >= adev->gfx.config.max_sh_per_se ||
2467 se_bank >= adev->gfx.config.max_shader_engines)
2468 return -EINVAL;
2469 mutex_lock(&adev->grbm_idx_mutex);
2470 amdgpu_gfx_select_se_sh(adev, se_bank,
2471 sh_bank, instance_bank);
2472 }
2473
bd12267d
TSD
2474 if (pm_pg_lock)
2475 mutex_lock(&adev->pm.mutex);
2476
d38ceaf9
AD
2477 while (size) {
2478 uint32_t value;
2479
2480 if (*pos > adev->rmmio_size)
56628159 2481 goto end;
d38ceaf9
AD
2482
2483 value = RREG32(*pos >> 2);
2484 r = put_user(value, (uint32_t *)buf);
56628159
TSD
2485 if (r) {
2486 result = r;
2487 goto end;
2488 }
d38ceaf9
AD
2489
2490 result += 4;
2491 buf += 4;
2492 *pos += 4;
2493 size -= 4;
2494 }
2495
56628159
TSD
2496end:
2497 if (use_bank) {
2498 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2499 mutex_unlock(&adev->grbm_idx_mutex);
2500 }
2501
bd12267d
TSD
2502 if (pm_pg_lock)
2503 mutex_unlock(&adev->pm.mutex);
2504
d38ceaf9
AD
2505 return result;
2506}
2507
2508static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2509 size_t size, loff_t *pos)
2510{
2511 struct amdgpu_device *adev = f->f_inode->i_private;
2512 ssize_t result = 0;
2513 int r;
2514
2515 if (size & 0x3 || *pos & 0x3)
2516 return -EINVAL;
2517
2518 while (size) {
2519 uint32_t value;
2520
2521 if (*pos > adev->rmmio_size)
2522 return result;
2523
2524 r = get_user(value, (uint32_t *)buf);
2525 if (r)
2526 return r;
2527
2528 WREG32(*pos >> 2, value);
2529
2530 result += 4;
2531 buf += 4;
2532 *pos += 4;
2533 size -= 4;
2534 }
2535
2536 return result;
2537}
2538
adcec288
TSD
2539static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
2540 size_t size, loff_t *pos)
2541{
2542 struct amdgpu_device *adev = f->f_inode->i_private;
2543 ssize_t result = 0;
2544 int r;
2545
2546 if (size & 0x3 || *pos & 0x3)
2547 return -EINVAL;
2548
2549 while (size) {
2550 uint32_t value;
2551
2552 value = RREG32_PCIE(*pos >> 2);
2553 r = put_user(value, (uint32_t *)buf);
2554 if (r)
2555 return r;
2556
2557 result += 4;
2558 buf += 4;
2559 *pos += 4;
2560 size -= 4;
2561 }
2562
2563 return result;
2564}
2565
2566static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
2567 size_t size, loff_t *pos)
2568{
2569 struct amdgpu_device *adev = f->f_inode->i_private;
2570 ssize_t result = 0;
2571 int r;
2572
2573 if (size & 0x3 || *pos & 0x3)
2574 return -EINVAL;
2575
2576 while (size) {
2577 uint32_t value;
2578
2579 r = get_user(value, (uint32_t *)buf);
2580 if (r)
2581 return r;
2582
2583 WREG32_PCIE(*pos >> 2, value);
2584
2585 result += 4;
2586 buf += 4;
2587 *pos += 4;
2588 size -= 4;
2589 }
2590
2591 return result;
2592}
2593
2594static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
2595 size_t size, loff_t *pos)
2596{
2597 struct amdgpu_device *adev = f->f_inode->i_private;
2598 ssize_t result = 0;
2599 int r;
2600
2601 if (size & 0x3 || *pos & 0x3)
2602 return -EINVAL;
2603
2604 while (size) {
2605 uint32_t value;
2606
2607 value = RREG32_DIDT(*pos >> 2);
2608 r = put_user(value, (uint32_t *)buf);
2609 if (r)
2610 return r;
2611
2612 result += 4;
2613 buf += 4;
2614 *pos += 4;
2615 size -= 4;
2616 }
2617
2618 return result;
2619}
2620
2621static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
2622 size_t size, loff_t *pos)
2623{
2624 struct amdgpu_device *adev = f->f_inode->i_private;
2625 ssize_t result = 0;
2626 int r;
2627
2628 if (size & 0x3 || *pos & 0x3)
2629 return -EINVAL;
2630
2631 while (size) {
2632 uint32_t value;
2633
2634 r = get_user(value, (uint32_t *)buf);
2635 if (r)
2636 return r;
2637
2638 WREG32_DIDT(*pos >> 2, value);
2639
2640 result += 4;
2641 buf += 4;
2642 *pos += 4;
2643 size -= 4;
2644 }
2645
2646 return result;
2647}
2648
2649static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
2650 size_t size, loff_t *pos)
2651{
2652 struct amdgpu_device *adev = f->f_inode->i_private;
2653 ssize_t result = 0;
2654 int r;
2655
2656 if (size & 0x3 || *pos & 0x3)
2657 return -EINVAL;
2658
2659 while (size) {
2660 uint32_t value;
2661
6fc0deaf 2662 value = RREG32_SMC(*pos);
adcec288
TSD
2663 r = put_user(value, (uint32_t *)buf);
2664 if (r)
2665 return r;
2666
2667 result += 4;
2668 buf += 4;
2669 *pos += 4;
2670 size -= 4;
2671 }
2672
2673 return result;
2674}
2675
2676static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
2677 size_t size, loff_t *pos)
2678{
2679 struct amdgpu_device *adev = f->f_inode->i_private;
2680 ssize_t result = 0;
2681 int r;
2682
2683 if (size & 0x3 || *pos & 0x3)
2684 return -EINVAL;
2685
2686 while (size) {
2687 uint32_t value;
2688
2689 r = get_user(value, (uint32_t *)buf);
2690 if (r)
2691 return r;
2692
6fc0deaf 2693 WREG32_SMC(*pos, value);
adcec288
TSD
2694
2695 result += 4;
2696 buf += 4;
2697 *pos += 4;
2698 size -= 4;
2699 }
2700
2701 return result;
2702}
2703
1e051413
TSD
2704static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
2705 size_t size, loff_t *pos)
2706{
2707 struct amdgpu_device *adev = f->f_inode->i_private;
2708 ssize_t result = 0;
2709 int r;
2710 uint32_t *config, no_regs = 0;
2711
2712 if (size & 0x3 || *pos & 0x3)
2713 return -EINVAL;
2714
ecab7668 2715 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
1e051413
TSD
2716 if (!config)
2717 return -ENOMEM;
2718
2719 /* version, increment each time something is added */
e9f11dc8 2720 config[no_regs++] = 2;
1e051413
TSD
2721 config[no_regs++] = adev->gfx.config.max_shader_engines;
2722 config[no_regs++] = adev->gfx.config.max_tile_pipes;
2723 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
2724 config[no_regs++] = adev->gfx.config.max_sh_per_se;
2725 config[no_regs++] = adev->gfx.config.max_backends_per_se;
2726 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
2727 config[no_regs++] = adev->gfx.config.max_gprs;
2728 config[no_regs++] = adev->gfx.config.max_gs_threads;
2729 config[no_regs++] = adev->gfx.config.max_hw_contexts;
2730 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
2731 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
2732 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
2733 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
2734 config[no_regs++] = adev->gfx.config.num_tile_pipes;
2735 config[no_regs++] = adev->gfx.config.backend_enable_mask;
2736 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
2737 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
2738 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
2739 config[no_regs++] = adev->gfx.config.num_gpus;
2740 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
2741 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
2742 config[no_regs++] = adev->gfx.config.gb_addr_config;
2743 config[no_regs++] = adev->gfx.config.num_rbs;
2744
89a8f309
TSD
2745 /* rev==1 */
2746 config[no_regs++] = adev->rev_id;
2747 config[no_regs++] = adev->pg_flags;
2748 config[no_regs++] = adev->cg_flags;
2749
e9f11dc8
TSD
2750 /* rev==2 */
2751 config[no_regs++] = adev->family;
2752 config[no_regs++] = adev->external_rev_id;
2753
1e051413
TSD
2754 while (size && (*pos < no_regs * 4)) {
2755 uint32_t value;
2756
2757 value = config[*pos >> 2];
2758 r = put_user(value, (uint32_t *)buf);
2759 if (r) {
2760 kfree(config);
2761 return r;
2762 }
2763
2764 result += 4;
2765 buf += 4;
2766 *pos += 4;
2767 size -= 4;
2768 }
2769
2770 kfree(config);
2771 return result;
2772}
2773
f2cdaf20
TSD
2774static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
2775 size_t size, loff_t *pos)
2776{
2777 struct amdgpu_device *adev = f->f_inode->i_private;
2778 int idx, r;
2779 int32_t value;
2780
2781 if (size != 4 || *pos & 0x3)
2782 return -EINVAL;
2783
2784 /* convert offset to sensor number */
2785 idx = *pos >> 2;
2786
2787 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
2788 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value);
2789 else
2790 return -EINVAL;
2791
2792 if (!r)
2793 r = put_user(value, (int32_t *)buf);
2794
2795 return !r ? 4 : r;
2796}
1e051413 2797
d38ceaf9
AD
2798static const struct file_operations amdgpu_debugfs_regs_fops = {
2799 .owner = THIS_MODULE,
2800 .read = amdgpu_debugfs_regs_read,
2801 .write = amdgpu_debugfs_regs_write,
2802 .llseek = default_llseek
2803};
adcec288
TSD
2804static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
2805 .owner = THIS_MODULE,
2806 .read = amdgpu_debugfs_regs_didt_read,
2807 .write = amdgpu_debugfs_regs_didt_write,
2808 .llseek = default_llseek
2809};
2810static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
2811 .owner = THIS_MODULE,
2812 .read = amdgpu_debugfs_regs_pcie_read,
2813 .write = amdgpu_debugfs_regs_pcie_write,
2814 .llseek = default_llseek
2815};
2816static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
2817 .owner = THIS_MODULE,
2818 .read = amdgpu_debugfs_regs_smc_read,
2819 .write = amdgpu_debugfs_regs_smc_write,
2820 .llseek = default_llseek
2821};
2822
1e051413
TSD
2823static const struct file_operations amdgpu_debugfs_gca_config_fops = {
2824 .owner = THIS_MODULE,
2825 .read = amdgpu_debugfs_gca_config_read,
2826 .llseek = default_llseek
2827};
2828
f2cdaf20
TSD
2829static const struct file_operations amdgpu_debugfs_sensors_fops = {
2830 .owner = THIS_MODULE,
2831 .read = amdgpu_debugfs_sensor_read,
2832 .llseek = default_llseek
2833};
2834
adcec288
TSD
2835static const struct file_operations *debugfs_regs[] = {
2836 &amdgpu_debugfs_regs_fops,
2837 &amdgpu_debugfs_regs_didt_fops,
2838 &amdgpu_debugfs_regs_pcie_fops,
2839 &amdgpu_debugfs_regs_smc_fops,
1e051413 2840 &amdgpu_debugfs_gca_config_fops,
f2cdaf20 2841 &amdgpu_debugfs_sensors_fops,
adcec288
TSD
2842};
2843
2844static const char *debugfs_regs_names[] = {
2845 "amdgpu_regs",
2846 "amdgpu_regs_didt",
2847 "amdgpu_regs_pcie",
2848 "amdgpu_regs_smc",
1e051413 2849 "amdgpu_gca_config",
f2cdaf20 2850 "amdgpu_sensors",
adcec288 2851};
d38ceaf9
AD
2852
2853static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
2854{
2855 struct drm_minor *minor = adev->ddev->primary;
2856 struct dentry *ent, *root = minor->debugfs_root;
adcec288
TSD
2857 unsigned i, j;
2858
2859 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
2860 ent = debugfs_create_file(debugfs_regs_names[i],
2861 S_IFREG | S_IRUGO, root,
2862 adev, debugfs_regs[i]);
2863 if (IS_ERR(ent)) {
2864 for (j = 0; j < i; j++) {
2865 debugfs_remove(adev->debugfs_regs[i]);
2866 adev->debugfs_regs[i] = NULL;
2867 }
2868 return PTR_ERR(ent);
2869 }
d38ceaf9 2870
adcec288
TSD
2871 if (!i)
2872 i_size_write(ent->d_inode, adev->rmmio_size);
2873 adev->debugfs_regs[i] = ent;
2874 }
d38ceaf9
AD
2875
2876 return 0;
2877}
2878
2879static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
2880{
adcec288
TSD
2881 unsigned i;
2882
2883 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
2884 if (adev->debugfs_regs[i]) {
2885 debugfs_remove(adev->debugfs_regs[i]);
2886 adev->debugfs_regs[i] = NULL;
2887 }
2888 }
d38ceaf9
AD
2889}
2890
2891int amdgpu_debugfs_init(struct drm_minor *minor)
2892{
2893 return 0;
2894}
2895
2896void amdgpu_debugfs_cleanup(struct drm_minor *minor)
2897{
2898}
7cebc728
AK
2899#else
2900static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
2901{
2902 return 0;
2903}
2904static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
d38ceaf9 2905#endif