drm/amdgpu:don't return error for debugfs failed
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/kthread.h>
29 #include <linux/console.h>
30 #include <linux/slab.h>
31 #include <linux/debugfs.h>
32 #include <drm/drmP.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/amdgpu_drm.h>
35 #include <linux/vgaarb.h>
36 #include <linux/vga_switcheroo.h>
37 #include <linux/efi.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_i2c.h"
41 #include "atom.h"
42 #include "amdgpu_atombios.h"
43 #include "amd_pcie.h"
44 #ifdef CONFIG_DRM_AMDGPU_SI
45 #include "si.h"
46 #endif
47 #ifdef CONFIG_DRM_AMDGPU_CIK
48 #include "cik.h"
49 #endif
50 #include "vi.h"
51 #include "bif/bif_4_1_d.h"
52 #include <linux/pci.h>
53 #include <linux/firmware.h>
54
55 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
56 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
57
58 static const char *amdgpu_asic_name[] = {
59         "TAHITI",
60         "PITCAIRN",
61         "VERDE",
62         "OLAND",
63         "HAINAN",
64         "BONAIRE",
65         "KAVERI",
66         "KABINI",
67         "HAWAII",
68         "MULLINS",
69         "TOPAZ",
70         "TONGA",
71         "FIJI",
72         "CARRIZO",
73         "STONEY",
74         "POLARIS10",
75         "POLARIS11",
76         "POLARIS12",
77         "LAST",
78 };
79
80 bool amdgpu_device_is_px(struct drm_device *dev)
81 {
82         struct amdgpu_device *adev = dev->dev_private;
83
84         if (adev->flags & AMD_IS_PX)
85                 return true;
86         return false;
87 }
88
89 /*
90  * MMIO register access helper functions.
91  */
92 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
93                         bool always_indirect)
94 {
95         uint32_t ret;
96
97         if (amdgpu_sriov_runtime(adev)) {
98                 BUG_ON(in_interrupt());
99                 return amdgpu_virt_kiq_rreg(adev, reg);
100         }
101
102         if ((reg * 4) < adev->rmmio_size && !always_indirect)
103                 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
104         else {
105                 unsigned long flags;
106
107                 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
108                 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
109                 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
110                 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
111         }
112         trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
113         return ret;
114 }
115
116 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
117                     bool always_indirect)
118 {
119         trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
120
121         if (amdgpu_sriov_runtime(adev)) {
122                 BUG_ON(in_interrupt());
123                 return amdgpu_virt_kiq_wreg(adev, reg, v);
124         }
125
126         if ((reg * 4) < adev->rmmio_size && !always_indirect)
127                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
128         else {
129                 unsigned long flags;
130
131                 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
132                 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
133                 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
134                 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
135         }
136 }
137
138 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
139 {
140         if ((reg * 4) < adev->rio_mem_size)
141                 return ioread32(adev->rio_mem + (reg * 4));
142         else {
143                 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
144                 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
145         }
146 }
147
148 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
149 {
150
151         if ((reg * 4) < adev->rio_mem_size)
152                 iowrite32(v, adev->rio_mem + (reg * 4));
153         else {
154                 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
155                 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
156         }
157 }
158
159 /**
160  * amdgpu_mm_rdoorbell - read a doorbell dword
161  *
162  * @adev: amdgpu_device pointer
163  * @index: doorbell index
164  *
165  * Returns the value in the doorbell aperture at the
166  * requested doorbell index (CIK).
167  */
168 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
169 {
170         if (index < adev->doorbell.num_doorbells) {
171                 return readl(adev->doorbell.ptr + index);
172         } else {
173                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
174                 return 0;
175         }
176 }
177
178 /**
179  * amdgpu_mm_wdoorbell - write a doorbell dword
180  *
181  * @adev: amdgpu_device pointer
182  * @index: doorbell index
183  * @v: value to write
184  *
185  * Writes @v to the doorbell aperture at the
186  * requested doorbell index (CIK).
187  */
188 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
189 {
190         if (index < adev->doorbell.num_doorbells) {
191                 writel(v, adev->doorbell.ptr + index);
192         } else {
193                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
194         }
195 }
196
197 /**
198  * amdgpu_invalid_rreg - dummy reg read function
199  *
200  * @adev: amdgpu device pointer
201  * @reg: offset of register
202  *
203  * Dummy register read function.  Used for register blocks
204  * that certain asics don't have (all asics).
205  * Returns the value in the register.
206  */
207 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
208 {
209         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
210         BUG();
211         return 0;
212 }
213
214 /**
215  * amdgpu_invalid_wreg - dummy reg write function
216  *
217  * @adev: amdgpu device pointer
218  * @reg: offset of register
219  * @v: value to write to the register
220  *
221  * Dummy register read function.  Used for register blocks
222  * that certain asics don't have (all asics).
223  */
224 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
225 {
226         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
227                   reg, v);
228         BUG();
229 }
230
231 /**
232  * amdgpu_block_invalid_rreg - dummy reg read function
233  *
234  * @adev: amdgpu device pointer
235  * @block: offset of instance
236  * @reg: offset of register
237  *
238  * Dummy register read function.  Used for register blocks
239  * that certain asics don't have (all asics).
240  * Returns the value in the register.
241  */
242 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
243                                           uint32_t block, uint32_t reg)
244 {
245         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
246                   reg, block);
247         BUG();
248         return 0;
249 }
250
251 /**
252  * amdgpu_block_invalid_wreg - dummy reg write function
253  *
254  * @adev: amdgpu device pointer
255  * @block: offset of instance
256  * @reg: offset of register
257  * @v: value to write to the register
258  *
259  * Dummy register read function.  Used for register blocks
260  * that certain asics don't have (all asics).
261  */
262 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
263                                       uint32_t block,
264                                       uint32_t reg, uint32_t v)
265 {
266         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
267                   reg, block, v);
268         BUG();
269 }
270
271 static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
272 {
273         int r;
274
275         if (adev->vram_scratch.robj == NULL) {
276                 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
277                                      PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
278                                      AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
279                                      AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
280                                      NULL, NULL, &adev->vram_scratch.robj);
281                 if (r) {
282                         return r;
283                 }
284         }
285
286         r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
287         if (unlikely(r != 0))
288                 return r;
289         r = amdgpu_bo_pin(adev->vram_scratch.robj,
290                           AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
291         if (r) {
292                 amdgpu_bo_unreserve(adev->vram_scratch.robj);
293                 return r;
294         }
295         r = amdgpu_bo_kmap(adev->vram_scratch.robj,
296                                 (void **)&adev->vram_scratch.ptr);
297         if (r)
298                 amdgpu_bo_unpin(adev->vram_scratch.robj);
299         amdgpu_bo_unreserve(adev->vram_scratch.robj);
300
301         return r;
302 }
303
304 static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
305 {
306         int r;
307
308         if (adev->vram_scratch.robj == NULL) {
309                 return;
310         }
311         r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
312         if (likely(r == 0)) {
313                 amdgpu_bo_kunmap(adev->vram_scratch.robj);
314                 amdgpu_bo_unpin(adev->vram_scratch.robj);
315                 amdgpu_bo_unreserve(adev->vram_scratch.robj);
316         }
317         amdgpu_bo_unref(&adev->vram_scratch.robj);
318 }
319
320 /**
321  * amdgpu_program_register_sequence - program an array of registers.
322  *
323  * @adev: amdgpu_device pointer
324  * @registers: pointer to the register array
325  * @array_size: size of the register array
326  *
327  * Programs an array or registers with and and or masks.
328  * This is a helper for setting golden registers.
329  */
330 void amdgpu_program_register_sequence(struct amdgpu_device *adev,
331                                       const u32 *registers,
332                                       const u32 array_size)
333 {
334         u32 tmp, reg, and_mask, or_mask;
335         int i;
336
337         if (array_size % 3)
338                 return;
339
340         for (i = 0; i < array_size; i +=3) {
341                 reg = registers[i + 0];
342                 and_mask = registers[i + 1];
343                 or_mask = registers[i + 2];
344
345                 if (and_mask == 0xffffffff) {
346                         tmp = or_mask;
347                 } else {
348                         tmp = RREG32(reg);
349                         tmp &= ~and_mask;
350                         tmp |= or_mask;
351                 }
352                 WREG32(reg, tmp);
353         }
354 }
355
356 void amdgpu_pci_config_reset(struct amdgpu_device *adev)
357 {
358         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
359 }
360
361 /*
362  * GPU doorbell aperture helpers function.
363  */
364 /**
365  * amdgpu_doorbell_init - Init doorbell driver information.
366  *
367  * @adev: amdgpu_device pointer
368  *
369  * Init doorbell driver information (CIK)
370  * Returns 0 on success, error on failure.
371  */
372 static int amdgpu_doorbell_init(struct amdgpu_device *adev)
373 {
374         /* doorbell bar mapping */
375         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
376         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
377
378         adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
379                                              AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
380         if (adev->doorbell.num_doorbells == 0)
381                 return -EINVAL;
382
383         adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
384         if (adev->doorbell.ptr == NULL) {
385                 return -ENOMEM;
386         }
387         DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
388         DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
389
390         return 0;
391 }
392
393 /**
394  * amdgpu_doorbell_fini - Tear down doorbell driver information.
395  *
396  * @adev: amdgpu_device pointer
397  *
398  * Tear down doorbell driver information (CIK)
399  */
400 static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
401 {
402         iounmap(adev->doorbell.ptr);
403         adev->doorbell.ptr = NULL;
404 }
405
406 /**
407  * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
408  *                                setup amdkfd
409  *
410  * @adev: amdgpu_device pointer
411  * @aperture_base: output returning doorbell aperture base physical address
412  * @aperture_size: output returning doorbell aperture size in bytes
413  * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
414  *
415  * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
416  * takes doorbells required for its own rings and reports the setup to amdkfd.
417  * amdgpu reserved doorbells are at the start of the doorbell aperture.
418  */
419 void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
420                                 phys_addr_t *aperture_base,
421                                 size_t *aperture_size,
422                                 size_t *start_offset)
423 {
424         /*
425          * The first num_doorbells are used by amdgpu.
426          * amdkfd takes whatever's left in the aperture.
427          */
428         if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
429                 *aperture_base = adev->doorbell.base;
430                 *aperture_size = adev->doorbell.size;
431                 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
432         } else {
433                 *aperture_base = 0;
434                 *aperture_size = 0;
435                 *start_offset = 0;
436         }
437 }
438
439 /*
440  * amdgpu_wb_*()
441  * Writeback is the the method by which the the GPU updates special pages
442  * in memory with the status of certain GPU events (fences, ring pointers,
443  * etc.).
444  */
445
446 /**
447  * amdgpu_wb_fini - Disable Writeback and free memory
448  *
449  * @adev: amdgpu_device pointer
450  *
451  * Disables Writeback and frees the Writeback memory (all asics).
452  * Used at driver shutdown.
453  */
454 static void amdgpu_wb_fini(struct amdgpu_device *adev)
455 {
456         if (adev->wb.wb_obj) {
457                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
458                                       &adev->wb.gpu_addr,
459                                       (void **)&adev->wb.wb);
460                 adev->wb.wb_obj = NULL;
461         }
462 }
463
464 /**
465  * amdgpu_wb_init- Init Writeback driver info and allocate memory
466  *
467  * @adev: amdgpu_device pointer
468  *
469  * Disables Writeback and frees the Writeback memory (all asics).
470  * Used at driver startup.
471  * Returns 0 on success or an -error on failure.
472  */
473 static int amdgpu_wb_init(struct amdgpu_device *adev)
474 {
475         int r;
476
477         if (adev->wb.wb_obj == NULL) {
478                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
479                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
480                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
481                                             (void **)&adev->wb.wb);
482                 if (r) {
483                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
484                         return r;
485                 }
486
487                 adev->wb.num_wb = AMDGPU_MAX_WB;
488                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
489
490                 /* clear wb memory */
491                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
492         }
493
494         return 0;
495 }
496
497 /**
498  * amdgpu_wb_get - Allocate a wb entry
499  *
500  * @adev: amdgpu_device pointer
501  * @wb: wb index
502  *
503  * Allocate a wb slot for use by the driver (all asics).
504  * Returns 0 on success or -EINVAL on failure.
505  */
506 int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
507 {
508         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
509         if (offset < adev->wb.num_wb) {
510                 __set_bit(offset, adev->wb.used);
511                 *wb = offset;
512                 return 0;
513         } else {
514                 return -EINVAL;
515         }
516 }
517
518 /**
519  * amdgpu_wb_free - Free a wb entry
520  *
521  * @adev: amdgpu_device pointer
522  * @wb: wb index
523  *
524  * Free a wb slot allocated for use by the driver (all asics)
525  */
526 void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
527 {
528         if (wb < adev->wb.num_wb)
529                 __clear_bit(wb, adev->wb.used);
530 }
531
532 /**
533  * amdgpu_vram_location - try to find VRAM location
534  * @adev: amdgpu device structure holding all necessary informations
535  * @mc: memory controller structure holding memory informations
536  * @base: base address at which to put VRAM
537  *
538  * Function will place try to place VRAM at base address provided
539  * as parameter (which is so far either PCI aperture address or
540  * for IGP TOM base address).
541  *
542  * If there is not enough space to fit the unvisible VRAM in the 32bits
543  * address space then we limit the VRAM size to the aperture.
544  *
545  * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
546  * this shouldn't be a problem as we are using the PCI aperture as a reference.
547  * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
548  * not IGP.
549  *
550  * Note: we use mc_vram_size as on some board we need to program the mc to
551  * cover the whole aperture even if VRAM size is inferior to aperture size
552  * Novell bug 204882 + along with lots of ubuntu ones
553  *
554  * Note: when limiting vram it's safe to overwritte real_vram_size because
555  * we are not in case where real_vram_size is inferior to mc_vram_size (ie
556  * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
557  * ones)
558  *
559  * Note: IGP TOM addr should be the same as the aperture addr, we don't
560  * explicitly check for that thought.
561  *
562  * FIXME: when reducing VRAM size align new size on power of 2.
563  */
564 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
565 {
566         uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
567
568         mc->vram_start = base;
569         if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
570                 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
571                 mc->real_vram_size = mc->aper_size;
572                 mc->mc_vram_size = mc->aper_size;
573         }
574         mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
575         if (limit && limit < mc->real_vram_size)
576                 mc->real_vram_size = limit;
577         dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
578                         mc->mc_vram_size >> 20, mc->vram_start,
579                         mc->vram_end, mc->real_vram_size >> 20);
580 }
581
582 /**
583  * amdgpu_gtt_location - try to find GTT location
584  * @adev: amdgpu device structure holding all necessary informations
585  * @mc: memory controller structure holding memory informations
586  *
587  * Function will place try to place GTT before or after VRAM.
588  *
589  * If GTT size is bigger than space left then we ajust GTT size.
590  * Thus function will never fails.
591  *
592  * FIXME: when reducing GTT size align new size on power of 2.
593  */
594 void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
595 {
596         u64 size_af, size_bf;
597
598         size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
599         size_bf = mc->vram_start & ~mc->gtt_base_align;
600         if (size_bf > size_af) {
601                 if (mc->gtt_size > size_bf) {
602                         dev_warn(adev->dev, "limiting GTT\n");
603                         mc->gtt_size = size_bf;
604                 }
605                 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
606         } else {
607                 if (mc->gtt_size > size_af) {
608                         dev_warn(adev->dev, "limiting GTT\n");
609                         mc->gtt_size = size_af;
610                 }
611                 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
612         }
613         mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
614         dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
615                         mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
616 }
617
618 /*
619  * GPU helpers function.
620  */
621 /**
622  * amdgpu_need_post - check if the hw need post or not
623  *
624  * @adev: amdgpu_device pointer
625  *
626  * Check if the asic has been initialized (all asics) at driver startup
627  * or post is needed if  hw reset is performed.
628  * Returns true if need or false if not.
629  */
630 bool amdgpu_need_post(struct amdgpu_device *adev)
631 {
632         uint32_t reg;
633
634         if (adev->has_hw_reset) {
635                 adev->has_hw_reset = false;
636                 return true;
637         }
638         /* then check MEM_SIZE, in case the crtcs are off */
639         reg = RREG32(mmCONFIG_MEMSIZE);
640
641         if (reg)
642                 return false;
643
644         return true;
645
646 }
647
648 static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
649 {
650         if (amdgpu_sriov_vf(adev))
651                 return false;
652
653         if (amdgpu_passthrough(adev)) {
654                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
655                  * some old smc fw still need driver do vPost otherwise gpu hang, while
656                  * those smc fw version above 22.15 doesn't have this flaw, so we force
657                  * vpost executed for smc version below 22.15
658                  */
659                 if (adev->asic_type == CHIP_FIJI) {
660                         int err;
661                         uint32_t fw_ver;
662                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
663                         /* force vPost if error occured */
664                         if (err)
665                                 return true;
666
667                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
668                         if (fw_ver < 0x00160e00)
669                                 return true;
670                 }
671         }
672         return amdgpu_need_post(adev);
673 }
674
675 /**
676  * amdgpu_dummy_page_init - init dummy page used by the driver
677  *
678  * @adev: amdgpu_device pointer
679  *
680  * Allocate the dummy page used by the driver (all asics).
681  * This dummy page is used by the driver as a filler for gart entries
682  * when pages are taken out of the GART
683  * Returns 0 on sucess, -ENOMEM on failure.
684  */
685 int amdgpu_dummy_page_init(struct amdgpu_device *adev)
686 {
687         if (adev->dummy_page.page)
688                 return 0;
689         adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
690         if (adev->dummy_page.page == NULL)
691                 return -ENOMEM;
692         adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
693                                         0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
694         if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
695                 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
696                 __free_page(adev->dummy_page.page);
697                 adev->dummy_page.page = NULL;
698                 return -ENOMEM;
699         }
700         return 0;
701 }
702
703 /**
704  * amdgpu_dummy_page_fini - free dummy page used by the driver
705  *
706  * @adev: amdgpu_device pointer
707  *
708  * Frees the dummy page used by the driver (all asics).
709  */
710 void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
711 {
712         if (adev->dummy_page.page == NULL)
713                 return;
714         pci_unmap_page(adev->pdev, adev->dummy_page.addr,
715                         PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
716         __free_page(adev->dummy_page.page);
717         adev->dummy_page.page = NULL;
718 }
719
720
721 /* ATOM accessor methods */
722 /*
723  * ATOM is an interpreted byte code stored in tables in the vbios.  The
724  * driver registers callbacks to access registers and the interpreter
725  * in the driver parses the tables and executes then to program specific
726  * actions (set display modes, asic init, etc.).  See amdgpu_atombios.c,
727  * atombios.h, and atom.c
728  */
729
730 /**
731  * cail_pll_read - read PLL register
732  *
733  * @info: atom card_info pointer
734  * @reg: PLL register offset
735  *
736  * Provides a PLL register accessor for the atom interpreter (r4xx+).
737  * Returns the value of the PLL register.
738  */
739 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
740 {
741         return 0;
742 }
743
744 /**
745  * cail_pll_write - write PLL register
746  *
747  * @info: atom card_info pointer
748  * @reg: PLL register offset
749  * @val: value to write to the pll register
750  *
751  * Provides a PLL register accessor for the atom interpreter (r4xx+).
752  */
753 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
754 {
755
756 }
757
758 /**
759  * cail_mc_read - read MC (Memory Controller) register
760  *
761  * @info: atom card_info pointer
762  * @reg: MC register offset
763  *
764  * Provides an MC register accessor for the atom interpreter (r4xx+).
765  * Returns the value of the MC register.
766  */
767 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
768 {
769         return 0;
770 }
771
772 /**
773  * cail_mc_write - write MC (Memory Controller) register
774  *
775  * @info: atom card_info pointer
776  * @reg: MC register offset
777  * @val: value to write to the pll register
778  *
779  * Provides a MC register accessor for the atom interpreter (r4xx+).
780  */
781 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
782 {
783
784 }
785
786 /**
787  * cail_reg_write - write MMIO register
788  *
789  * @info: atom card_info pointer
790  * @reg: MMIO register offset
791  * @val: value to write to the pll register
792  *
793  * Provides a MMIO register accessor for the atom interpreter (r4xx+).
794  */
795 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
796 {
797         struct amdgpu_device *adev = info->dev->dev_private;
798
799         WREG32(reg, val);
800 }
801
802 /**
803  * cail_reg_read - read MMIO register
804  *
805  * @info: atom card_info pointer
806  * @reg: MMIO register offset
807  *
808  * Provides an MMIO register accessor for the atom interpreter (r4xx+).
809  * Returns the value of the MMIO register.
810  */
811 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
812 {
813         struct amdgpu_device *adev = info->dev->dev_private;
814         uint32_t r;
815
816         r = RREG32(reg);
817         return r;
818 }
819
820 /**
821  * cail_ioreg_write - write IO register
822  *
823  * @info: atom card_info pointer
824  * @reg: IO register offset
825  * @val: value to write to the pll register
826  *
827  * Provides a IO register accessor for the atom interpreter (r4xx+).
828  */
829 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
830 {
831         struct amdgpu_device *adev = info->dev->dev_private;
832
833         WREG32_IO(reg, val);
834 }
835
836 /**
837  * cail_ioreg_read - read IO register
838  *
839  * @info: atom card_info pointer
840  * @reg: IO register offset
841  *
842  * Provides an IO register accessor for the atom interpreter (r4xx+).
843  * Returns the value of the IO register.
844  */
845 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
846 {
847         struct amdgpu_device *adev = info->dev->dev_private;
848         uint32_t r;
849
850         r = RREG32_IO(reg);
851         return r;
852 }
853
854 /**
855  * amdgpu_atombios_fini - free the driver info and callbacks for atombios
856  *
857  * @adev: amdgpu_device pointer
858  *
859  * Frees the driver info and register access callbacks for the ATOM
860  * interpreter (r4xx+).
861  * Called at driver shutdown.
862  */
863 static void amdgpu_atombios_fini(struct amdgpu_device *adev)
864 {
865         if (adev->mode_info.atom_context) {
866                 kfree(adev->mode_info.atom_context->scratch);
867                 kfree(adev->mode_info.atom_context->iio);
868         }
869         kfree(adev->mode_info.atom_context);
870         adev->mode_info.atom_context = NULL;
871         kfree(adev->mode_info.atom_card_info);
872         adev->mode_info.atom_card_info = NULL;
873 }
874
875 /**
876  * amdgpu_atombios_init - init the driver info and callbacks for atombios
877  *
878  * @adev: amdgpu_device pointer
879  *
880  * Initializes the driver info and register access callbacks for the
881  * ATOM interpreter (r4xx+).
882  * Returns 0 on sucess, -ENOMEM on failure.
883  * Called at driver startup.
884  */
885 static int amdgpu_atombios_init(struct amdgpu_device *adev)
886 {
887         struct card_info *atom_card_info =
888             kzalloc(sizeof(struct card_info), GFP_KERNEL);
889
890         if (!atom_card_info)
891                 return -ENOMEM;
892
893         adev->mode_info.atom_card_info = atom_card_info;
894         atom_card_info->dev = adev->ddev;
895         atom_card_info->reg_read = cail_reg_read;
896         atom_card_info->reg_write = cail_reg_write;
897         /* needed for iio ops */
898         if (adev->rio_mem) {
899                 atom_card_info->ioreg_read = cail_ioreg_read;
900                 atom_card_info->ioreg_write = cail_ioreg_write;
901         } else {
902                 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
903                 atom_card_info->ioreg_read = cail_reg_read;
904                 atom_card_info->ioreg_write = cail_reg_write;
905         }
906         atom_card_info->mc_read = cail_mc_read;
907         atom_card_info->mc_write = cail_mc_write;
908         atom_card_info->pll_read = cail_pll_read;
909         atom_card_info->pll_write = cail_pll_write;
910
911         adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
912         if (!adev->mode_info.atom_context) {
913                 amdgpu_atombios_fini(adev);
914                 return -ENOMEM;
915         }
916
917         mutex_init(&adev->mode_info.atom_context->mutex);
918         amdgpu_atombios_scratch_regs_init(adev);
919         amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context);
920         return 0;
921 }
922
923 /* if we get transitioned to only one device, take VGA back */
924 /**
925  * amdgpu_vga_set_decode - enable/disable vga decode
926  *
927  * @cookie: amdgpu_device pointer
928  * @state: enable/disable vga decode
929  *
930  * Enable/disable vga decode (all asics).
931  * Returns VGA resource flags.
932  */
933 static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
934 {
935         struct amdgpu_device *adev = cookie;
936         amdgpu_asic_set_vga_state(adev, state);
937         if (state)
938                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
939                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
940         else
941                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
942 }
943
944 /**
945  * amdgpu_check_pot_argument - check that argument is a power of two
946  *
947  * @arg: value to check
948  *
949  * Validates that a certain argument is a power of two (all asics).
950  * Returns true if argument is valid.
951  */
952 static bool amdgpu_check_pot_argument(int arg)
953 {
954         return (arg & (arg - 1)) == 0;
955 }
956
957 /**
958  * amdgpu_check_arguments - validate module params
959  *
960  * @adev: amdgpu_device pointer
961  *
962  * Validates certain module parameters and updates
963  * the associated values used by the driver (all asics).
964  */
965 static void amdgpu_check_arguments(struct amdgpu_device *adev)
966 {
967         if (amdgpu_sched_jobs < 4) {
968                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
969                          amdgpu_sched_jobs);
970                 amdgpu_sched_jobs = 4;
971         } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
972                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
973                          amdgpu_sched_jobs);
974                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
975         }
976
977         if (amdgpu_gart_size != -1) {
978                 /* gtt size must be greater or equal to 32M */
979                 if (amdgpu_gart_size < 32) {
980                         dev_warn(adev->dev, "gart size (%d) too small\n",
981                                  amdgpu_gart_size);
982                         amdgpu_gart_size = -1;
983                 }
984         }
985
986         if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
987                 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
988                          amdgpu_vm_size);
989                 amdgpu_vm_size = 8;
990         }
991
992         if (amdgpu_vm_size < 1) {
993                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
994                          amdgpu_vm_size);
995                 amdgpu_vm_size = 8;
996         }
997
998         /*
999          * Max GPUVM size for Cayman, SI and CI are 40 bits.
1000          */
1001         if (amdgpu_vm_size > 1024) {
1002                 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1003                          amdgpu_vm_size);
1004                 amdgpu_vm_size = 8;
1005         }
1006
1007         /* defines number of bits in page table versus page directory,
1008          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1009          * page table and the remaining bits are in the page directory */
1010         if (amdgpu_vm_block_size == -1) {
1011
1012                 /* Total bits covered by PD + PTs */
1013                 unsigned bits = ilog2(amdgpu_vm_size) + 18;
1014
1015                 /* Make sure the PD is 4K in size up to 8GB address space.
1016                    Above that split equal between PD and PTs */
1017                 if (amdgpu_vm_size <= 8)
1018                         amdgpu_vm_block_size = bits - 9;
1019                 else
1020                         amdgpu_vm_block_size = (bits + 3) / 2;
1021
1022         } else if (amdgpu_vm_block_size < 9) {
1023                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1024                          amdgpu_vm_block_size);
1025                 amdgpu_vm_block_size = 9;
1026         }
1027
1028         if (amdgpu_vm_block_size > 24 ||
1029             (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1030                 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1031                          amdgpu_vm_block_size);
1032                 amdgpu_vm_block_size = 9;
1033         }
1034
1035         if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1036             !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
1037                 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1038                          amdgpu_vram_page_split);
1039                 amdgpu_vram_page_split = 1024;
1040         }
1041 }
1042
1043 /**
1044  * amdgpu_switcheroo_set_state - set switcheroo state
1045  *
1046  * @pdev: pci dev pointer
1047  * @state: vga_switcheroo state
1048  *
1049  * Callback for the switcheroo driver.  Suspends or resumes the
1050  * the asics before or after it is powered up using ACPI methods.
1051  */
1052 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1053 {
1054         struct drm_device *dev = pci_get_drvdata(pdev);
1055
1056         if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1057                 return;
1058
1059         if (state == VGA_SWITCHEROO_ON) {
1060                 unsigned d3_delay = dev->pdev->d3_delay;
1061
1062                 printk(KERN_INFO "amdgpu: switched on\n");
1063                 /* don't suspend or resume card normally */
1064                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1065
1066                 amdgpu_device_resume(dev, true, true);
1067
1068                 dev->pdev->d3_delay = d3_delay;
1069
1070                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1071                 drm_kms_helper_poll_enable(dev);
1072         } else {
1073                 printk(KERN_INFO "amdgpu: switched off\n");
1074                 drm_kms_helper_poll_disable(dev);
1075                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1076                 amdgpu_device_suspend(dev, true, true);
1077                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1078         }
1079 }
1080
1081 /**
1082  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1083  *
1084  * @pdev: pci dev pointer
1085  *
1086  * Callback for the switcheroo driver.  Check of the switcheroo
1087  * state can be changed.
1088  * Returns true if the state can be changed, false if not.
1089  */
1090 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1091 {
1092         struct drm_device *dev = pci_get_drvdata(pdev);
1093
1094         /*
1095         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1096         * locking inversion with the driver load path. And the access here is
1097         * completely racy anyway. So don't bother with locking for now.
1098         */
1099         return dev->open_count == 0;
1100 }
1101
1102 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1103         .set_gpu_state = amdgpu_switcheroo_set_state,
1104         .reprobe = NULL,
1105         .can_switch = amdgpu_switcheroo_can_switch,
1106 };
1107
1108 int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
1109                                   enum amd_ip_block_type block_type,
1110                                   enum amd_clockgating_state state)
1111 {
1112         int i, r = 0;
1113
1114         for (i = 0; i < adev->num_ip_blocks; i++) {
1115                 if (!adev->ip_blocks[i].status.valid)
1116                         continue;
1117                 if (adev->ip_blocks[i].version->type == block_type) {
1118                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1119                                                                                      state);
1120                         if (r)
1121                                 return r;
1122                         break;
1123                 }
1124         }
1125         return r;
1126 }
1127
1128 int amdgpu_set_powergating_state(struct amdgpu_device *adev,
1129                                   enum amd_ip_block_type block_type,
1130                                   enum amd_powergating_state state)
1131 {
1132         int i, r = 0;
1133
1134         for (i = 0; i < adev->num_ip_blocks; i++) {
1135                 if (!adev->ip_blocks[i].status.valid)
1136                         continue;
1137                 if (adev->ip_blocks[i].version->type == block_type) {
1138                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1139                                                                                      state);
1140                         if (r)
1141                                 return r;
1142                         break;
1143                 }
1144         }
1145         return r;
1146 }
1147
1148 void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1149 {
1150         int i;
1151
1152         for (i = 0; i < adev->num_ip_blocks; i++) {
1153                 if (!adev->ip_blocks[i].status.valid)
1154                         continue;
1155                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1156                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1157         }
1158 }
1159
1160 int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1161                          enum amd_ip_block_type block_type)
1162 {
1163         int i, r;
1164
1165         for (i = 0; i < adev->num_ip_blocks; i++) {
1166                 if (!adev->ip_blocks[i].status.valid)
1167                         continue;
1168                 if (adev->ip_blocks[i].version->type == block_type) {
1169                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1170                         if (r)
1171                                 return r;
1172                         break;
1173                 }
1174         }
1175         return 0;
1176
1177 }
1178
1179 bool amdgpu_is_idle(struct amdgpu_device *adev,
1180                     enum amd_ip_block_type block_type)
1181 {
1182         int i;
1183
1184         for (i = 0; i < adev->num_ip_blocks; i++) {
1185                 if (!adev->ip_blocks[i].status.valid)
1186                         continue;
1187                 if (adev->ip_blocks[i].version->type == block_type)
1188                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1189         }
1190         return true;
1191
1192 }
1193
1194 struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1195                                              enum amd_ip_block_type type)
1196 {
1197         int i;
1198
1199         for (i = 0; i < adev->num_ip_blocks; i++)
1200                 if (adev->ip_blocks[i].version->type == type)
1201                         return &adev->ip_blocks[i];
1202
1203         return NULL;
1204 }
1205
1206 /**
1207  * amdgpu_ip_block_version_cmp
1208  *
1209  * @adev: amdgpu_device pointer
1210  * @type: enum amd_ip_block_type
1211  * @major: major version
1212  * @minor: minor version
1213  *
1214  * return 0 if equal or greater
1215  * return 1 if smaller or the ip_block doesn't exist
1216  */
1217 int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
1218                                 enum amd_ip_block_type type,
1219                                 u32 major, u32 minor)
1220 {
1221         struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
1222
1223         if (ip_block && ((ip_block->version->major > major) ||
1224                         ((ip_block->version->major == major) &&
1225                         (ip_block->version->minor >= minor))))
1226                 return 0;
1227
1228         return 1;
1229 }
1230
1231 /**
1232  * amdgpu_ip_block_add
1233  *
1234  * @adev: amdgpu_device pointer
1235  * @ip_block_version: pointer to the IP to add
1236  *
1237  * Adds the IP block driver information to the collection of IPs
1238  * on the asic.
1239  */
1240 int amdgpu_ip_block_add(struct amdgpu_device *adev,
1241                         const struct amdgpu_ip_block_version *ip_block_version)
1242 {
1243         if (!ip_block_version)
1244                 return -EINVAL;
1245
1246         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1247
1248         return 0;
1249 }
1250
1251 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1252 {
1253         adev->enable_virtual_display = false;
1254
1255         if (amdgpu_virtual_display) {
1256                 struct drm_device *ddev = adev->ddev;
1257                 const char *pci_address_name = pci_name(ddev->pdev);
1258                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1259
1260                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1261                 pciaddstr_tmp = pciaddstr;
1262                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1263                         pciaddname = strsep(&pciaddname_tmp, ",");
1264                         if (!strcmp("all", pciaddname)
1265                             || !strcmp(pci_address_name, pciaddname)) {
1266                                 long num_crtc;
1267                                 int res = -1;
1268
1269                                 adev->enable_virtual_display = true;
1270
1271                                 if (pciaddname_tmp)
1272                                         res = kstrtol(pciaddname_tmp, 10,
1273                                                       &num_crtc);
1274
1275                                 if (!res) {
1276                                         if (num_crtc < 1)
1277                                                 num_crtc = 1;
1278                                         if (num_crtc > 6)
1279                                                 num_crtc = 6;
1280                                         adev->mode_info.num_crtc = num_crtc;
1281                                 } else {
1282                                         adev->mode_info.num_crtc = 1;
1283                                 }
1284                                 break;
1285                         }
1286                 }
1287
1288                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1289                          amdgpu_virtual_display, pci_address_name,
1290                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1291
1292                 kfree(pciaddstr);
1293         }
1294 }
1295
1296 static int amdgpu_early_init(struct amdgpu_device *adev)
1297 {
1298         int i, r;
1299
1300         amdgpu_device_enable_virtual_display(adev);
1301
1302         switch (adev->asic_type) {
1303         case CHIP_TOPAZ:
1304         case CHIP_TONGA:
1305         case CHIP_FIJI:
1306         case CHIP_POLARIS11:
1307         case CHIP_POLARIS10:
1308         case CHIP_POLARIS12:
1309         case CHIP_CARRIZO:
1310         case CHIP_STONEY:
1311                 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1312                         adev->family = AMDGPU_FAMILY_CZ;
1313                 else
1314                         adev->family = AMDGPU_FAMILY_VI;
1315
1316                 r = vi_set_ip_blocks(adev);
1317                 if (r)
1318                         return r;
1319                 break;
1320 #ifdef CONFIG_DRM_AMDGPU_SI
1321         case CHIP_VERDE:
1322         case CHIP_TAHITI:
1323         case CHIP_PITCAIRN:
1324         case CHIP_OLAND:
1325         case CHIP_HAINAN:
1326                 adev->family = AMDGPU_FAMILY_SI;
1327                 r = si_set_ip_blocks(adev);
1328                 if (r)
1329                         return r;
1330                 break;
1331 #endif
1332 #ifdef CONFIG_DRM_AMDGPU_CIK
1333         case CHIP_BONAIRE:
1334         case CHIP_HAWAII:
1335         case CHIP_KAVERI:
1336         case CHIP_KABINI:
1337         case CHIP_MULLINS:
1338                 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1339                         adev->family = AMDGPU_FAMILY_CI;
1340                 else
1341                         adev->family = AMDGPU_FAMILY_KV;
1342
1343                 r = cik_set_ip_blocks(adev);
1344                 if (r)
1345                         return r;
1346                 break;
1347 #endif
1348         default:
1349                 /* FIXME: not supported yet */
1350                 return -EINVAL;
1351         }
1352
1353         if (amdgpu_sriov_vf(adev)) {
1354                 r = amdgpu_virt_request_full_gpu(adev, true);
1355                 if (r)
1356                         return r;
1357         }
1358
1359         for (i = 0; i < adev->num_ip_blocks; i++) {
1360                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1361                         DRM_ERROR("disabled ip block: %d\n", i);
1362                         adev->ip_blocks[i].status.valid = false;
1363                 } else {
1364                         if (adev->ip_blocks[i].version->funcs->early_init) {
1365                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1366                                 if (r == -ENOENT) {
1367                                         adev->ip_blocks[i].status.valid = false;
1368                                 } else if (r) {
1369                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
1370                                                   adev->ip_blocks[i].version->funcs->name, r);
1371                                         return r;
1372                                 } else {
1373                                         adev->ip_blocks[i].status.valid = true;
1374                                 }
1375                         } else {
1376                                 adev->ip_blocks[i].status.valid = true;
1377                         }
1378                 }
1379         }
1380
1381         adev->cg_flags &= amdgpu_cg_mask;
1382         adev->pg_flags &= amdgpu_pg_mask;
1383
1384         return 0;
1385 }
1386
1387 static int amdgpu_init(struct amdgpu_device *adev)
1388 {
1389         int i, r;
1390
1391         for (i = 0; i < adev->num_ip_blocks; i++) {
1392                 if (!adev->ip_blocks[i].status.valid)
1393                         continue;
1394                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1395                 if (r) {
1396                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1397                                   adev->ip_blocks[i].version->funcs->name, r);
1398                         return r;
1399                 }
1400                 adev->ip_blocks[i].status.sw = true;
1401                 /* need to do gmc hw init early so we can allocate gpu mem */
1402                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1403                         r = amdgpu_vram_scratch_init(adev);
1404                         if (r) {
1405                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1406                                 return r;
1407                         }
1408                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1409                         if (r) {
1410                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
1411                                 return r;
1412                         }
1413                         r = amdgpu_wb_init(adev);
1414                         if (r) {
1415                                 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
1416                                 return r;
1417                         }
1418                         adev->ip_blocks[i].status.hw = true;
1419
1420                         /* right after GMC hw init, we create CSA */
1421                         if (amdgpu_sriov_vf(adev)) {
1422                                 r = amdgpu_allocate_static_csa(adev);
1423                                 if (r) {
1424                                         DRM_ERROR("allocate CSA failed %d\n", r);
1425                                         return r;
1426                                 }
1427                         }
1428                 }
1429         }
1430
1431         for (i = 0; i < adev->num_ip_blocks; i++) {
1432                 if (!adev->ip_blocks[i].status.sw)
1433                         continue;
1434                 /* gmc hw init is done early */
1435                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
1436                         continue;
1437                 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1438                 if (r) {
1439                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1440                                   adev->ip_blocks[i].version->funcs->name, r);
1441                         return r;
1442                 }
1443                 adev->ip_blocks[i].status.hw = true;
1444         }
1445
1446         return 0;
1447 }
1448
1449 static int amdgpu_late_init(struct amdgpu_device *adev)
1450 {
1451         int i = 0, r;
1452
1453         for (i = 0; i < adev->num_ip_blocks; i++) {
1454                 if (!adev->ip_blocks[i].status.valid)
1455                         continue;
1456                 if (adev->ip_blocks[i].version->funcs->late_init) {
1457                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1458                         if (r) {
1459                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1460                                           adev->ip_blocks[i].version->funcs->name, r);
1461                                 return r;
1462                         }
1463                         adev->ip_blocks[i].status.late_initialized = true;
1464                 }
1465                 /* skip CG for VCE/UVD, it's handled specially */
1466                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1467                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1468                         /* enable clockgating to save power */
1469                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1470                                                                                      AMD_CG_STATE_GATE);
1471                         if (r) {
1472                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1473                                           adev->ip_blocks[i].version->funcs->name, r);
1474                                 return r;
1475                         }
1476                 }
1477         }
1478
1479         return 0;
1480 }
1481
1482 static int amdgpu_fini(struct amdgpu_device *adev)
1483 {
1484         int i, r;
1485
1486         /* need to disable SMC first */
1487         for (i = 0; i < adev->num_ip_blocks; i++) {
1488                 if (!adev->ip_blocks[i].status.hw)
1489                         continue;
1490                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
1491                         /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1492                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1493                                                                                      AMD_CG_STATE_UNGATE);
1494                         if (r) {
1495                                 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1496                                           adev->ip_blocks[i].version->funcs->name, r);
1497                                 return r;
1498                         }
1499                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1500                         /* XXX handle errors */
1501                         if (r) {
1502                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1503                                           adev->ip_blocks[i].version->funcs->name, r);
1504                         }
1505                         adev->ip_blocks[i].status.hw = false;
1506                         break;
1507                 }
1508         }
1509
1510         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1511                 if (!adev->ip_blocks[i].status.hw)
1512                         continue;
1513                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1514                         amdgpu_wb_fini(adev);
1515                         amdgpu_vram_scratch_fini(adev);
1516                 }
1517
1518                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1519                         adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1520                         /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1521                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1522                                                                                      AMD_CG_STATE_UNGATE);
1523                         if (r) {
1524                                 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1525                                           adev->ip_blocks[i].version->funcs->name, r);
1526                                 return r;
1527                         }
1528                 }
1529
1530                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1531                 /* XXX handle errors */
1532                 if (r) {
1533                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1534                                   adev->ip_blocks[i].version->funcs->name, r);
1535                 }
1536
1537                 adev->ip_blocks[i].status.hw = false;
1538         }
1539
1540         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1541                 if (!adev->ip_blocks[i].status.sw)
1542                         continue;
1543                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1544                 /* XXX handle errors */
1545                 if (r) {
1546                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1547                                   adev->ip_blocks[i].version->funcs->name, r);
1548                 }
1549                 adev->ip_blocks[i].status.sw = false;
1550                 adev->ip_blocks[i].status.valid = false;
1551         }
1552
1553         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1554                 if (!adev->ip_blocks[i].status.late_initialized)
1555                         continue;
1556                 if (adev->ip_blocks[i].version->funcs->late_fini)
1557                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1558                 adev->ip_blocks[i].status.late_initialized = false;
1559         }
1560
1561         if (amdgpu_sriov_vf(adev)) {
1562                 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
1563                 amdgpu_virt_release_full_gpu(adev, false);
1564         }
1565
1566         return 0;
1567 }
1568
1569 int amdgpu_suspend(struct amdgpu_device *adev)
1570 {
1571         int i, r;
1572
1573         if (amdgpu_sriov_vf(adev))
1574                 amdgpu_virt_request_full_gpu(adev, false);
1575
1576         /* ungate SMC block first */
1577         r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1578                                          AMD_CG_STATE_UNGATE);
1579         if (r) {
1580                 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1581         }
1582
1583         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1584                 if (!adev->ip_blocks[i].status.valid)
1585                         continue;
1586                 /* ungate blocks so that suspend can properly shut them down */
1587                 if (i != AMD_IP_BLOCK_TYPE_SMC) {
1588                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1589                                                                                      AMD_CG_STATE_UNGATE);
1590                         if (r) {
1591                                 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1592                                           adev->ip_blocks[i].version->funcs->name, r);
1593                         }
1594                 }
1595                 /* XXX handle errors */
1596                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
1597                 /* XXX handle errors */
1598                 if (r) {
1599                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
1600                                   adev->ip_blocks[i].version->funcs->name, r);
1601                 }
1602         }
1603
1604         if (amdgpu_sriov_vf(adev))
1605                 amdgpu_virt_release_full_gpu(adev, false);
1606
1607         return 0;
1608 }
1609
1610 static int amdgpu_sriov_resume_early(struct amdgpu_device *adev)
1611 {
1612         int i, r;
1613
1614         for (i = 0; i < adev->num_ip_blocks; i++) {
1615                 if (!adev->ip_blocks[i].status.valid)
1616                         continue;
1617
1618                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1619                                 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1620                                 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
1621                         r = adev->ip_blocks[i].version->funcs->resume(adev);
1622
1623                 if (r) {
1624                         DRM_ERROR("resume of IP block <%s> failed %d\n",
1625                                   adev->ip_blocks[i].version->funcs->name, r);
1626                         return r;
1627                 }
1628         }
1629
1630         return 0;
1631 }
1632
1633 static int amdgpu_sriov_resume_late(struct amdgpu_device *adev)
1634 {
1635         int i, r;
1636
1637         for (i = 0; i < adev->num_ip_blocks; i++) {
1638                 if (!adev->ip_blocks[i].status.valid)
1639                         continue;
1640
1641                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1642                                 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1643                                 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1644                         continue;
1645
1646                 r = adev->ip_blocks[i].version->funcs->resume(adev);
1647                 if (r) {
1648                         DRM_ERROR("resume of IP block <%s> failed %d\n",
1649                                   adev->ip_blocks[i].version->funcs->name, r);
1650                         return r;
1651                 }
1652         }
1653
1654         return 0;
1655 }
1656
1657 static int amdgpu_resume(struct amdgpu_device *adev)
1658 {
1659         int i, r;
1660
1661         for (i = 0; i < adev->num_ip_blocks; i++) {
1662                 if (!adev->ip_blocks[i].status.valid)
1663                         continue;
1664                 r = adev->ip_blocks[i].version->funcs->resume(adev);
1665                 if (r) {
1666                         DRM_ERROR("resume of IP block <%s> failed %d\n",
1667                                   adev->ip_blocks[i].version->funcs->name, r);
1668                         return r;
1669                 }
1670         }
1671
1672         return 0;
1673 }
1674
1675 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
1676 {
1677         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1678                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1679 }
1680
1681 /**
1682  * amdgpu_device_init - initialize the driver
1683  *
1684  * @adev: amdgpu_device pointer
1685  * @pdev: drm dev pointer
1686  * @pdev: pci dev pointer
1687  * @flags: driver flags
1688  *
1689  * Initializes the driver info and hw (all asics).
1690  * Returns 0 for success or an error on failure.
1691  * Called at driver startup.
1692  */
1693 int amdgpu_device_init(struct amdgpu_device *adev,
1694                        struct drm_device *ddev,
1695                        struct pci_dev *pdev,
1696                        uint32_t flags)
1697 {
1698         int r, i;
1699         bool runtime = false;
1700         u32 max_MBps;
1701
1702         adev->shutdown = false;
1703         adev->dev = &pdev->dev;
1704         adev->ddev = ddev;
1705         adev->pdev = pdev;
1706         adev->flags = flags;
1707         adev->asic_type = flags & AMD_ASIC_MASK;
1708         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1709         adev->mc.gtt_size = 512 * 1024 * 1024;
1710         adev->accel_working = false;
1711         adev->num_rings = 0;
1712         adev->mman.buffer_funcs = NULL;
1713         adev->mman.buffer_funcs_ring = NULL;
1714         adev->vm_manager.vm_pte_funcs = NULL;
1715         adev->vm_manager.vm_pte_num_rings = 0;
1716         adev->gart.gart_funcs = NULL;
1717         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
1718
1719         adev->smc_rreg = &amdgpu_invalid_rreg;
1720         adev->smc_wreg = &amdgpu_invalid_wreg;
1721         adev->pcie_rreg = &amdgpu_invalid_rreg;
1722         adev->pcie_wreg = &amdgpu_invalid_wreg;
1723         adev->pciep_rreg = &amdgpu_invalid_rreg;
1724         adev->pciep_wreg = &amdgpu_invalid_wreg;
1725         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1726         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1727         adev->didt_rreg = &amdgpu_invalid_rreg;
1728         adev->didt_wreg = &amdgpu_invalid_wreg;
1729         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1730         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
1731         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1732         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1733
1734
1735         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1736                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1737                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1738
1739         /* mutex initialization are all done here so we
1740          * can recall function without having locking issues */
1741         mutex_init(&adev->vm_manager.lock);
1742         atomic_set(&adev->irq.ih.lock, 0);
1743         mutex_init(&adev->pm.mutex);
1744         mutex_init(&adev->gfx.gpu_clock_mutex);
1745         mutex_init(&adev->srbm_mutex);
1746         mutex_init(&adev->grbm_idx_mutex);
1747         mutex_init(&adev->mn_lock);
1748         hash_init(adev->mn_hash);
1749
1750         amdgpu_check_arguments(adev);
1751
1752         /* Registers mapping */
1753         /* TODO: block userspace mapping of io register */
1754         spin_lock_init(&adev->mmio_idx_lock);
1755         spin_lock_init(&adev->smc_idx_lock);
1756         spin_lock_init(&adev->pcie_idx_lock);
1757         spin_lock_init(&adev->uvd_ctx_idx_lock);
1758         spin_lock_init(&adev->didt_idx_lock);
1759         spin_lock_init(&adev->gc_cac_idx_lock);
1760         spin_lock_init(&adev->audio_endpt_idx_lock);
1761         spin_lock_init(&adev->mm_stats.lock);
1762
1763         INIT_LIST_HEAD(&adev->shadow_list);
1764         mutex_init(&adev->shadow_list_lock);
1765
1766         INIT_LIST_HEAD(&adev->gtt_list);
1767         spin_lock_init(&adev->gtt_list_lock);
1768
1769         if (adev->asic_type >= CHIP_BONAIRE) {
1770                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1771                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1772         } else {
1773                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1774                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1775         }
1776
1777         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1778         if (adev->rmmio == NULL) {
1779                 return -ENOMEM;
1780         }
1781         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1782         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1783
1784         if (adev->asic_type >= CHIP_BONAIRE)
1785                 /* doorbell bar mapping */
1786                 amdgpu_doorbell_init(adev);
1787
1788         /* io port mapping */
1789         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1790                 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1791                         adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1792                         adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1793                         break;
1794                 }
1795         }
1796         if (adev->rio_mem == NULL)
1797                 DRM_INFO("PCI I/O BAR is not found.\n");
1798
1799         /* early init functions */
1800         r = amdgpu_early_init(adev);
1801         if (r)
1802                 return r;
1803
1804         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1805         /* this will fail for cards that aren't VGA class devices, just
1806          * ignore it */
1807         vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1808
1809         if (amdgpu_runtime_pm == 1)
1810                 runtime = true;
1811         if (amdgpu_device_is_px(ddev))
1812                 runtime = true;
1813         vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1814         if (runtime)
1815                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1816
1817         /* Read BIOS */
1818         if (!amdgpu_get_bios(adev)) {
1819                 r = -EINVAL;
1820                 goto failed;
1821         }
1822
1823         r = amdgpu_atombios_init(adev);
1824         if (r) {
1825                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1826                 goto failed;
1827         }
1828
1829         /* detect if we are with an SRIOV vbios */
1830         amdgpu_device_detect_sriov_bios(adev);
1831
1832         /* Post card if necessary */
1833         if (amdgpu_vpost_needed(adev)) {
1834                 if (!adev->bios) {
1835                         dev_err(adev->dev, "no vBIOS found\n");
1836                         r = -EINVAL;
1837                         goto failed;
1838                 }
1839                 DRM_INFO("GPU posting now...\n");
1840                 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1841                 if (r) {
1842                         dev_err(adev->dev, "gpu post error!\n");
1843                         goto failed;
1844                 }
1845         } else {
1846                 DRM_INFO("GPU post is not needed\n");
1847         }
1848
1849         /* Initialize clocks */
1850         r = amdgpu_atombios_get_clock_info(adev);
1851         if (r) {
1852                 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
1853                 goto failed;
1854         }
1855         /* init i2c buses */
1856         amdgpu_atombios_i2c_init(adev);
1857
1858         /* Fence driver */
1859         r = amdgpu_fence_driver_init(adev);
1860         if (r) {
1861                 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
1862                 goto failed;
1863         }
1864
1865         /* init the mode config */
1866         drm_mode_config_init(adev->ddev);
1867
1868         r = amdgpu_init(adev);
1869         if (r) {
1870                 dev_err(adev->dev, "amdgpu_init failed\n");
1871                 amdgpu_fini(adev);
1872                 goto failed;
1873         }
1874
1875         adev->accel_working = true;
1876
1877         /* Initialize the buffer migration limit. */
1878         if (amdgpu_moverate >= 0)
1879                 max_MBps = amdgpu_moverate;
1880         else
1881                 max_MBps = 8; /* Allow 8 MB/s. */
1882         /* Get a log2 for easy divisions. */
1883         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
1884
1885         r = amdgpu_ib_pool_init(adev);
1886         if (r) {
1887                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
1888                 goto failed;
1889         }
1890
1891         r = amdgpu_ib_ring_tests(adev);
1892         if (r)
1893                 DRM_ERROR("ib ring test failed (%d).\n", r);
1894
1895         amdgpu_fbdev_init(adev);
1896
1897         r = amdgpu_gem_debugfs_init(adev);
1898         if (r) {
1899                 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1900         }
1901
1902         r = amdgpu_debugfs_regs_init(adev);
1903         if (r) {
1904                 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1905         }
1906
1907         r = amdgpu_debugfs_firmware_init(adev);
1908         if (r) {
1909                 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1910         }
1911
1912         if ((amdgpu_testing & 1)) {
1913                 if (adev->accel_working)
1914                         amdgpu_test_moves(adev);
1915                 else
1916                         DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
1917         }
1918         if ((amdgpu_testing & 2)) {
1919                 if (adev->accel_working)
1920                         amdgpu_test_syncing(adev);
1921                 else
1922                         DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n");
1923         }
1924         if (amdgpu_benchmarking) {
1925                 if (adev->accel_working)
1926                         amdgpu_benchmark(adev, amdgpu_benchmarking);
1927                 else
1928                         DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
1929         }
1930
1931         /* enable clockgating, etc. after ib tests, etc. since some blocks require
1932          * explicit gating rather than handling it automatically.
1933          */
1934         r = amdgpu_late_init(adev);
1935         if (r) {
1936                 dev_err(adev->dev, "amdgpu_late_init failed\n");
1937                 goto failed;
1938         }
1939
1940         return 0;
1941
1942 failed:
1943         if (runtime)
1944                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
1945         return r;
1946 }
1947
1948 /**
1949  * amdgpu_device_fini - tear down the driver
1950  *
1951  * @adev: amdgpu_device pointer
1952  *
1953  * Tear down the driver info (all asics).
1954  * Called at driver shutdown.
1955  */
1956 void amdgpu_device_fini(struct amdgpu_device *adev)
1957 {
1958         int r;
1959
1960         DRM_INFO("amdgpu: finishing device.\n");
1961         adev->shutdown = true;
1962         drm_crtc_force_disable_all(adev->ddev);
1963         /* evict vram memory */
1964         amdgpu_bo_evict_vram(adev);
1965         amdgpu_ib_pool_fini(adev);
1966         amdgpu_fence_driver_fini(adev);
1967         amdgpu_fbdev_fini(adev);
1968         r = amdgpu_fini(adev);
1969         adev->accel_working = false;
1970         /* free i2c buses */
1971         amdgpu_i2c_fini(adev);
1972         amdgpu_atombios_fini(adev);
1973         kfree(adev->bios);
1974         adev->bios = NULL;
1975         vga_switcheroo_unregister_client(adev->pdev);
1976         if (adev->flags & AMD_IS_PX)
1977                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
1978         vga_client_register(adev->pdev, NULL, NULL, NULL);
1979         if (adev->rio_mem)
1980                 pci_iounmap(adev->pdev, adev->rio_mem);
1981         adev->rio_mem = NULL;
1982         iounmap(adev->rmmio);
1983         adev->rmmio = NULL;
1984         if (adev->asic_type >= CHIP_BONAIRE)
1985                 amdgpu_doorbell_fini(adev);
1986         amdgpu_debugfs_regs_cleanup(adev);
1987 }
1988
1989
1990 /*
1991  * Suspend & resume.
1992  */
1993 /**
1994  * amdgpu_device_suspend - initiate device suspend
1995  *
1996  * @pdev: drm dev pointer
1997  * @state: suspend state
1998  *
1999  * Puts the hw in the suspend state (all asics).
2000  * Returns 0 for success or an error on failure.
2001  * Called at driver suspend.
2002  */
2003 int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2004 {
2005         struct amdgpu_device *adev;
2006         struct drm_crtc *crtc;
2007         struct drm_connector *connector;
2008         int r;
2009
2010         if (dev == NULL || dev->dev_private == NULL) {
2011                 return -ENODEV;
2012         }
2013
2014         adev = dev->dev_private;
2015
2016         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2017                 return 0;
2018
2019         drm_kms_helper_poll_disable(dev);
2020
2021         /* turn off display hw */
2022         drm_modeset_lock_all(dev);
2023         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2024                 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2025         }
2026         drm_modeset_unlock_all(dev);
2027
2028         /* unpin the front buffers and cursors */
2029         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2030                 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2031                 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2032                 struct amdgpu_bo *robj;
2033
2034                 if (amdgpu_crtc->cursor_bo) {
2035                         struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2036                         r = amdgpu_bo_reserve(aobj, false);
2037                         if (r == 0) {
2038                                 amdgpu_bo_unpin(aobj);
2039                                 amdgpu_bo_unreserve(aobj);
2040                         }
2041                 }
2042
2043                 if (rfb == NULL || rfb->obj == NULL) {
2044                         continue;
2045                 }
2046                 robj = gem_to_amdgpu_bo(rfb->obj);
2047                 /* don't unpin kernel fb objects */
2048                 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2049                         r = amdgpu_bo_reserve(robj, false);
2050                         if (r == 0) {
2051                                 amdgpu_bo_unpin(robj);
2052                                 amdgpu_bo_unreserve(robj);
2053                         }
2054                 }
2055         }
2056         /* evict vram memory */
2057         amdgpu_bo_evict_vram(adev);
2058
2059         amdgpu_fence_driver_suspend(adev);
2060
2061         r = amdgpu_suspend(adev);
2062
2063         /* evict remaining vram memory
2064          * This second call to evict vram is to evict the gart page table
2065          * using the CPU.
2066          */
2067         amdgpu_bo_evict_vram(adev);
2068
2069         amdgpu_atombios_scratch_regs_save(adev);
2070         pci_save_state(dev->pdev);
2071         if (suspend) {
2072                 /* Shut down the device */
2073                 pci_disable_device(dev->pdev);
2074                 pci_set_power_state(dev->pdev, PCI_D3hot);
2075         } else {
2076                 r = amdgpu_asic_reset(adev);
2077                 if (r)
2078                         DRM_ERROR("amdgpu asic reset failed\n");
2079         }
2080
2081         if (fbcon) {
2082                 console_lock();
2083                 amdgpu_fbdev_set_suspend(adev, 1);
2084                 console_unlock();
2085         }
2086         return 0;
2087 }
2088
2089 /**
2090  * amdgpu_device_resume - initiate device resume
2091  *
2092  * @pdev: drm dev pointer
2093  *
2094  * Bring the hw back to operating state (all asics).
2095  * Returns 0 for success or an error on failure.
2096  * Called at driver resume.
2097  */
2098 int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2099 {
2100         struct drm_connector *connector;
2101         struct amdgpu_device *adev = dev->dev_private;
2102         struct drm_crtc *crtc;
2103         int r;
2104
2105         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2106                 return 0;
2107
2108         if (fbcon)
2109                 console_lock();
2110
2111         if (resume) {
2112                 pci_set_power_state(dev->pdev, PCI_D0);
2113                 pci_restore_state(dev->pdev);
2114                 r = pci_enable_device(dev->pdev);
2115                 if (r) {
2116                         if (fbcon)
2117                                 console_unlock();
2118                         return r;
2119                 }
2120         }
2121         amdgpu_atombios_scratch_regs_restore(adev);
2122
2123         /* post card */
2124         if (amdgpu_need_post(adev)) {
2125                 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2126                 if (r)
2127                         DRM_ERROR("amdgpu asic init failed\n");
2128         }
2129
2130         r = amdgpu_resume(adev);
2131         if (r)
2132                 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
2133
2134         amdgpu_fence_driver_resume(adev);
2135
2136         if (resume) {
2137                 r = amdgpu_ib_ring_tests(adev);
2138                 if (r)
2139                         DRM_ERROR("ib ring test failed (%d).\n", r);
2140         }
2141
2142         r = amdgpu_late_init(adev);
2143         if (r) {
2144                 if (fbcon)
2145                         console_unlock();
2146                 return r;
2147         }
2148
2149         /* pin cursors */
2150         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2151                 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2152
2153                 if (amdgpu_crtc->cursor_bo) {
2154                         struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2155                         r = amdgpu_bo_reserve(aobj, false);
2156                         if (r == 0) {
2157                                 r = amdgpu_bo_pin(aobj,
2158                                                   AMDGPU_GEM_DOMAIN_VRAM,
2159                                                   &amdgpu_crtc->cursor_addr);
2160                                 if (r != 0)
2161                                         DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2162                                 amdgpu_bo_unreserve(aobj);
2163                         }
2164                 }
2165         }
2166
2167         /* blat the mode back in */
2168         if (fbcon) {
2169                 drm_helper_resume_force_mode(dev);
2170                 /* turn on display hw */
2171                 drm_modeset_lock_all(dev);
2172                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2173                         drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2174                 }
2175                 drm_modeset_unlock_all(dev);
2176         }
2177
2178         drm_kms_helper_poll_enable(dev);
2179
2180         /*
2181          * Most of the connector probing functions try to acquire runtime pm
2182          * refs to ensure that the GPU is powered on when connector polling is
2183          * performed. Since we're calling this from a runtime PM callback,
2184          * trying to acquire rpm refs will cause us to deadlock.
2185          *
2186          * Since we're guaranteed to be holding the rpm lock, it's safe to
2187          * temporarily disable the rpm helpers so this doesn't deadlock us.
2188          */
2189 #ifdef CONFIG_PM
2190         dev->dev->power.disable_depth++;
2191 #endif
2192         drm_helper_hpd_irq_event(dev);
2193 #ifdef CONFIG_PM
2194         dev->dev->power.disable_depth--;
2195 #endif
2196
2197         if (fbcon) {
2198                 amdgpu_fbdev_set_suspend(adev, 0);
2199                 console_unlock();
2200         }
2201
2202         return 0;
2203 }
2204
2205 static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2206 {
2207         int i;
2208         bool asic_hang = false;
2209
2210         for (i = 0; i < adev->num_ip_blocks; i++) {
2211                 if (!adev->ip_blocks[i].status.valid)
2212                         continue;
2213                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2214                         adev->ip_blocks[i].status.hang =
2215                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2216                 if (adev->ip_blocks[i].status.hang) {
2217                         DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
2218                         asic_hang = true;
2219                 }
2220         }
2221         return asic_hang;
2222 }
2223
2224 static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
2225 {
2226         int i, r = 0;
2227
2228         for (i = 0; i < adev->num_ip_blocks; i++) {
2229                 if (!adev->ip_blocks[i].status.valid)
2230                         continue;
2231                 if (adev->ip_blocks[i].status.hang &&
2232                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2233                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
2234                         if (r)
2235                                 return r;
2236                 }
2237         }
2238
2239         return 0;
2240 }
2241
2242 static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2243 {
2244         int i;
2245
2246         for (i = 0; i < adev->num_ip_blocks; i++) {
2247                 if (!adev->ip_blocks[i].status.valid)
2248                         continue;
2249                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2250                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2251                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2252                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2253                         if (adev->ip_blocks[i].status.hang) {
2254                                 DRM_INFO("Some block need full reset!\n");
2255                                 return true;
2256                         }
2257                 }
2258         }
2259         return false;
2260 }
2261
2262 static int amdgpu_soft_reset(struct amdgpu_device *adev)
2263 {
2264         int i, r = 0;
2265
2266         for (i = 0; i < adev->num_ip_blocks; i++) {
2267                 if (!adev->ip_blocks[i].status.valid)
2268                         continue;
2269                 if (adev->ip_blocks[i].status.hang &&
2270                     adev->ip_blocks[i].version->funcs->soft_reset) {
2271                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
2272                         if (r)
2273                                 return r;
2274                 }
2275         }
2276
2277         return 0;
2278 }
2279
2280 static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2281 {
2282         int i, r = 0;
2283
2284         for (i = 0; i < adev->num_ip_blocks; i++) {
2285                 if (!adev->ip_blocks[i].status.valid)
2286                         continue;
2287                 if (adev->ip_blocks[i].status.hang &&
2288                     adev->ip_blocks[i].version->funcs->post_soft_reset)
2289                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
2290                 if (r)
2291                         return r;
2292         }
2293
2294         return 0;
2295 }
2296
2297 bool amdgpu_need_backup(struct amdgpu_device *adev)
2298 {
2299         if (adev->flags & AMD_IS_APU)
2300                 return false;
2301
2302         return amdgpu_lockup_timeout > 0 ? true : false;
2303 }
2304
2305 static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2306                                            struct amdgpu_ring *ring,
2307                                            struct amdgpu_bo *bo,
2308                                            struct dma_fence **fence)
2309 {
2310         uint32_t domain;
2311         int r;
2312
2313        if (!bo->shadow)
2314                return 0;
2315
2316        r = amdgpu_bo_reserve(bo, false);
2317        if (r)
2318                return r;
2319        domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2320        /* if bo has been evicted, then no need to recover */
2321        if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2322                r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2323                                                  NULL, fence, true);
2324                if (r) {
2325                        DRM_ERROR("recover page table failed!\n");
2326                        goto err;
2327                }
2328        }
2329 err:
2330        amdgpu_bo_unreserve(bo);
2331        return r;
2332 }
2333
2334 /**
2335  * amdgpu_sriov_gpu_reset - reset the asic
2336  *
2337  * @adev: amdgpu device pointer
2338  * @voluntary: if this reset is requested by guest.
2339  *             (true means by guest and false means by HYPERVISOR )
2340  *
2341  * Attempt the reset the GPU if it has hung (all asics).
2342  * for SRIOV case.
2343  * Returns 0 for success or an error on failure.
2344  */
2345 int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
2346 {
2347         int i, r = 0;
2348         int resched;
2349         struct amdgpu_bo *bo, *tmp;
2350         struct amdgpu_ring *ring;
2351         struct dma_fence *fence = NULL, *next = NULL;
2352
2353         mutex_lock(&adev->virt.lock_reset);
2354         atomic_inc(&adev->gpu_reset_counter);
2355
2356         /* block TTM */
2357         resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2358
2359         /* block scheduler */
2360         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2361                 ring = adev->rings[i];
2362
2363                 if (!ring || !ring->sched.thread)
2364                         continue;
2365
2366                 kthread_park(ring->sched.thread);
2367                 amd_sched_hw_job_reset(&ring->sched);
2368         }
2369
2370         /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2371         amdgpu_fence_driver_force_completion(adev);
2372
2373         /* request to take full control of GPU before re-initialization  */
2374         if (voluntary)
2375                 amdgpu_virt_reset_gpu(adev);
2376         else
2377                 amdgpu_virt_request_full_gpu(adev, true);
2378
2379
2380         /* Resume IP prior to SMC */
2381         amdgpu_sriov_resume_early(adev);
2382
2383         /* we need recover gart prior to run SMC/CP/SDMA resume */
2384         amdgpu_ttm_recover_gart(adev);
2385
2386         /* now we are okay to resume SMC/CP/SDMA */
2387         amdgpu_sriov_resume_late(adev);
2388
2389         amdgpu_irq_gpu_reset_resume_helper(adev);
2390
2391         if (amdgpu_ib_ring_tests(adev))
2392                 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2393
2394         /* release full control of GPU after ib test */
2395         amdgpu_virt_release_full_gpu(adev, true);
2396
2397         DRM_INFO("recover vram bo from shadow\n");
2398
2399         ring = adev->mman.buffer_funcs_ring;
2400         mutex_lock(&adev->shadow_list_lock);
2401         list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2402                 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2403                 if (fence) {
2404                         r = dma_fence_wait(fence, false);
2405                         if (r) {
2406                                 WARN(r, "recovery from shadow isn't completed\n");
2407                                 break;
2408                         }
2409                 }
2410
2411                 dma_fence_put(fence);
2412                 fence = next;
2413         }
2414         mutex_unlock(&adev->shadow_list_lock);
2415
2416         if (fence) {
2417                 r = dma_fence_wait(fence, false);
2418                 if (r)
2419                         WARN(r, "recovery from shadow isn't completed\n");
2420         }
2421         dma_fence_put(fence);
2422
2423         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2424                 struct amdgpu_ring *ring = adev->rings[i];
2425                 if (!ring || !ring->sched.thread)
2426                         continue;
2427
2428                 amd_sched_job_recovery(&ring->sched);
2429                 kthread_unpark(ring->sched.thread);
2430         }
2431
2432         drm_helper_resume_force_mode(adev->ddev);
2433         ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2434         if (r) {
2435                 /* bad news, how to tell it to userspace ? */
2436                 dev_info(adev->dev, "GPU reset failed\n");
2437         }
2438
2439         mutex_unlock(&adev->virt.lock_reset);
2440         return r;
2441 }
2442
2443 /**
2444  * amdgpu_gpu_reset - reset the asic
2445  *
2446  * @adev: amdgpu device pointer
2447  *
2448  * Attempt the reset the GPU if it has hung (all asics).
2449  * Returns 0 for success or an error on failure.
2450  */
2451 int amdgpu_gpu_reset(struct amdgpu_device *adev)
2452 {
2453         int i, r;
2454         int resched;
2455         bool need_full_reset;
2456
2457         if (amdgpu_sriov_vf(adev))
2458                 return amdgpu_sriov_gpu_reset(adev, true);
2459
2460         if (!amdgpu_check_soft_reset(adev)) {
2461                 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2462                 return 0;
2463         }
2464
2465         atomic_inc(&adev->gpu_reset_counter);
2466
2467         /* block TTM */
2468         resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2469
2470         /* block scheduler */
2471         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2472                 struct amdgpu_ring *ring = adev->rings[i];
2473
2474                 if (!ring)
2475                         continue;
2476                 kthread_park(ring->sched.thread);
2477                 amd_sched_hw_job_reset(&ring->sched);
2478         }
2479         /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2480         amdgpu_fence_driver_force_completion(adev);
2481
2482         need_full_reset = amdgpu_need_full_reset(adev);
2483
2484         if (!need_full_reset) {
2485                 amdgpu_pre_soft_reset(adev);
2486                 r = amdgpu_soft_reset(adev);
2487                 amdgpu_post_soft_reset(adev);
2488                 if (r || amdgpu_check_soft_reset(adev)) {
2489                         DRM_INFO("soft reset failed, will fallback to full reset!\n");
2490                         need_full_reset = true;
2491                 }
2492         }
2493
2494         if (need_full_reset) {
2495                 r = amdgpu_suspend(adev);
2496
2497 retry:
2498                 /* Disable fb access */
2499                 if (adev->mode_info.num_crtc) {
2500                         struct amdgpu_mode_mc_save save;
2501                         amdgpu_display_stop_mc_access(adev, &save);
2502                         amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2503                 }
2504                 amdgpu_atombios_scratch_regs_save(adev);
2505                 r = amdgpu_asic_reset(adev);
2506                 amdgpu_atombios_scratch_regs_restore(adev);
2507                 /* post card */
2508                 amdgpu_atom_asic_init(adev->mode_info.atom_context);
2509
2510                 if (!r) {
2511                         dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2512                         r = amdgpu_resume(adev);
2513                 }
2514         }
2515         if (!r) {
2516                 amdgpu_irq_gpu_reset_resume_helper(adev);
2517                 if (need_full_reset && amdgpu_need_backup(adev)) {
2518                         r = amdgpu_ttm_recover_gart(adev);
2519                         if (r)
2520                                 DRM_ERROR("gart recovery failed!!!\n");
2521                 }
2522                 r = amdgpu_ib_ring_tests(adev);
2523                 if (r) {
2524                         dev_err(adev->dev, "ib ring test failed (%d).\n", r);
2525                         r = amdgpu_suspend(adev);
2526                         need_full_reset = true;
2527                         goto retry;
2528                 }
2529                 /**
2530                  * recovery vm page tables, since we cannot depend on VRAM is
2531                  * consistent after gpu full reset.
2532                  */
2533                 if (need_full_reset && amdgpu_need_backup(adev)) {
2534                         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2535                         struct amdgpu_bo *bo, *tmp;
2536                         struct dma_fence *fence = NULL, *next = NULL;
2537
2538                         DRM_INFO("recover vram bo from shadow\n");
2539                         mutex_lock(&adev->shadow_list_lock);
2540                         list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2541                                 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2542                                 if (fence) {
2543                                         r = dma_fence_wait(fence, false);
2544                                         if (r) {
2545                                                 WARN(r, "recovery from shadow isn't completed\n");
2546                                                 break;
2547                                         }
2548                                 }
2549
2550                                 dma_fence_put(fence);
2551                                 fence = next;
2552                         }
2553                         mutex_unlock(&adev->shadow_list_lock);
2554                         if (fence) {
2555                                 r = dma_fence_wait(fence, false);
2556                                 if (r)
2557                                         WARN(r, "recovery from shadow isn't completed\n");
2558                         }
2559                         dma_fence_put(fence);
2560                 }
2561                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2562                         struct amdgpu_ring *ring = adev->rings[i];
2563                         if (!ring)
2564                                 continue;
2565
2566                         amd_sched_job_recovery(&ring->sched);
2567                         kthread_unpark(ring->sched.thread);
2568                 }
2569         } else {
2570                 dev_err(adev->dev, "asic resume failed (%d).\n", r);
2571                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2572                         if (adev->rings[i]) {
2573                                 kthread_unpark(adev->rings[i]->sched.thread);
2574                         }
2575                 }
2576         }
2577
2578         drm_helper_resume_force_mode(adev->ddev);
2579
2580         ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2581         if (r) {
2582                 /* bad news, how to tell it to userspace ? */
2583                 dev_info(adev->dev, "GPU reset failed\n");
2584         }
2585
2586         return r;
2587 }
2588
2589 void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2590 {
2591         u32 mask;
2592         int ret;
2593
2594         if (amdgpu_pcie_gen_cap)
2595                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2596
2597         if (amdgpu_pcie_lane_cap)
2598                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2599
2600         /* covers APUs as well */
2601         if (pci_is_root_bus(adev->pdev->bus)) {
2602                 if (adev->pm.pcie_gen_mask == 0)
2603                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2604                 if (adev->pm.pcie_mlw_mask == 0)
2605                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2606                 return;
2607         }
2608
2609         if (adev->pm.pcie_gen_mask == 0) {
2610                 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2611                 if (!ret) {
2612                         adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2613                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2614                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2615
2616                         if (mask & DRM_PCIE_SPEED_25)
2617                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2618                         if (mask & DRM_PCIE_SPEED_50)
2619                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2620                         if (mask & DRM_PCIE_SPEED_80)
2621                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2622                 } else {
2623                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2624                 }
2625         }
2626         if (adev->pm.pcie_mlw_mask == 0) {
2627                 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2628                 if (!ret) {
2629                         switch (mask) {
2630                         case 32:
2631                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2632                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2633                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2634                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2635                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2636                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2637                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2638                                 break;
2639                         case 16:
2640                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2641                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2642                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2643                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2644                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2645                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2646                                 break;
2647                         case 12:
2648                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2649                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2650                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2651                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2652                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2653                                 break;
2654                         case 8:
2655                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2656                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2657                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2658                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2659                                 break;
2660                         case 4:
2661                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2662                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2663                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2664                                 break;
2665                         case 2:
2666                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2667                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2668                                 break;
2669                         case 1:
2670                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2671                                 break;
2672                         default:
2673                                 break;
2674                         }
2675                 } else {
2676                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2677                 }
2678         }
2679 }
2680
2681 /*
2682  * Debugfs
2683  */
2684 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
2685                              const struct drm_info_list *files,
2686                              unsigned nfiles)
2687 {
2688         unsigned i;
2689
2690         for (i = 0; i < adev->debugfs_count; i++) {
2691                 if (adev->debugfs[i].files == files) {
2692                         /* Already registered */
2693                         return 0;
2694                 }
2695         }
2696
2697         i = adev->debugfs_count + 1;
2698         if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2699                 DRM_ERROR("Reached maximum number of debugfs components.\n");
2700                 DRM_ERROR("Report so we increase "
2701                           "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2702                 return -EINVAL;
2703         }
2704         adev->debugfs[adev->debugfs_count].files = files;
2705         adev->debugfs[adev->debugfs_count].num_files = nfiles;
2706         adev->debugfs_count = i;
2707 #if defined(CONFIG_DEBUG_FS)
2708         drm_debugfs_create_files(files, nfiles,
2709                                  adev->ddev->primary->debugfs_root,
2710                                  adev->ddev->primary);
2711 #endif
2712         return 0;
2713 }
2714
2715 #if defined(CONFIG_DEBUG_FS)
2716
2717 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2718                                         size_t size, loff_t *pos)
2719 {
2720         struct amdgpu_device *adev = file_inode(f)->i_private;
2721         ssize_t result = 0;
2722         int r;
2723         bool pm_pg_lock, use_bank;
2724         unsigned instance_bank, sh_bank, se_bank;
2725
2726         if (size & 0x3 || *pos & 0x3)
2727                 return -EINVAL;
2728
2729         /* are we reading registers for which a PG lock is necessary? */
2730         pm_pg_lock = (*pos >> 23) & 1;
2731
2732         if (*pos & (1ULL << 62)) {
2733                 se_bank = (*pos >> 24) & 0x3FF;
2734                 sh_bank = (*pos >> 34) & 0x3FF;
2735                 instance_bank = (*pos >> 44) & 0x3FF;
2736
2737                 if (se_bank == 0x3FF)
2738                         se_bank = 0xFFFFFFFF;
2739                 if (sh_bank == 0x3FF)
2740                         sh_bank = 0xFFFFFFFF;
2741                 if (instance_bank == 0x3FF)
2742                         instance_bank = 0xFFFFFFFF;
2743                 use_bank = 1;
2744         } else {
2745                 use_bank = 0;
2746         }
2747
2748         *pos &= (1UL << 22) - 1;
2749
2750         if (use_bank) {
2751                 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2752                     (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2753                         return -EINVAL;
2754                 mutex_lock(&adev->grbm_idx_mutex);
2755                 amdgpu_gfx_select_se_sh(adev, se_bank,
2756                                         sh_bank, instance_bank);
2757         }
2758
2759         if (pm_pg_lock)
2760                 mutex_lock(&adev->pm.mutex);
2761
2762         while (size) {
2763                 uint32_t value;
2764
2765                 if (*pos > adev->rmmio_size)
2766                         goto end;
2767
2768                 value = RREG32(*pos >> 2);
2769                 r = put_user(value, (uint32_t *)buf);
2770                 if (r) {
2771                         result = r;
2772                         goto end;
2773                 }
2774
2775                 result += 4;
2776                 buf += 4;
2777                 *pos += 4;
2778                 size -= 4;
2779         }
2780
2781 end:
2782         if (use_bank) {
2783                 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2784                 mutex_unlock(&adev->grbm_idx_mutex);
2785         }
2786
2787         if (pm_pg_lock)
2788                 mutex_unlock(&adev->pm.mutex);
2789
2790         return result;
2791 }
2792
2793 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2794                                          size_t size, loff_t *pos)
2795 {
2796         struct amdgpu_device *adev = file_inode(f)->i_private;
2797         ssize_t result = 0;
2798         int r;
2799         bool pm_pg_lock, use_bank;
2800         unsigned instance_bank, sh_bank, se_bank;
2801
2802         if (size & 0x3 || *pos & 0x3)
2803                 return -EINVAL;
2804
2805         /* are we reading registers for which a PG lock is necessary? */
2806         pm_pg_lock = (*pos >> 23) & 1;
2807
2808         if (*pos & (1ULL << 62)) {
2809                 se_bank = (*pos >> 24) & 0x3FF;
2810                 sh_bank = (*pos >> 34) & 0x3FF;
2811                 instance_bank = (*pos >> 44) & 0x3FF;
2812
2813                 if (se_bank == 0x3FF)
2814                         se_bank = 0xFFFFFFFF;
2815                 if (sh_bank == 0x3FF)
2816                         sh_bank = 0xFFFFFFFF;
2817                 if (instance_bank == 0x3FF)
2818                         instance_bank = 0xFFFFFFFF;
2819                 use_bank = 1;
2820         } else {
2821                 use_bank = 0;
2822         }
2823
2824         *pos &= (1UL << 22) - 1;
2825
2826         if (use_bank) {
2827                 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2828                     (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2829                         return -EINVAL;
2830                 mutex_lock(&adev->grbm_idx_mutex);
2831                 amdgpu_gfx_select_se_sh(adev, se_bank,
2832                                         sh_bank, instance_bank);
2833         }
2834
2835         if (pm_pg_lock)
2836                 mutex_lock(&adev->pm.mutex);
2837
2838         while (size) {
2839                 uint32_t value;
2840
2841                 if (*pos > adev->rmmio_size)
2842                         return result;
2843
2844                 r = get_user(value, (uint32_t *)buf);
2845                 if (r)
2846                         return r;
2847
2848                 WREG32(*pos >> 2, value);
2849
2850                 result += 4;
2851                 buf += 4;
2852                 *pos += 4;
2853                 size -= 4;
2854         }
2855
2856         if (use_bank) {
2857                 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2858                 mutex_unlock(&adev->grbm_idx_mutex);
2859         }
2860
2861         if (pm_pg_lock)
2862                 mutex_unlock(&adev->pm.mutex);
2863
2864         return result;
2865 }
2866
2867 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
2868                                         size_t size, loff_t *pos)
2869 {
2870         struct amdgpu_device *adev = file_inode(f)->i_private;
2871         ssize_t result = 0;
2872         int r;
2873
2874         if (size & 0x3 || *pos & 0x3)
2875                 return -EINVAL;
2876
2877         while (size) {
2878                 uint32_t value;
2879
2880                 value = RREG32_PCIE(*pos >> 2);
2881                 r = put_user(value, (uint32_t *)buf);
2882                 if (r)
2883                         return r;
2884
2885                 result += 4;
2886                 buf += 4;
2887                 *pos += 4;
2888                 size -= 4;
2889         }
2890
2891         return result;
2892 }
2893
2894 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
2895                                          size_t size, loff_t *pos)
2896 {
2897         struct amdgpu_device *adev = file_inode(f)->i_private;
2898         ssize_t result = 0;
2899         int r;
2900
2901         if (size & 0x3 || *pos & 0x3)
2902                 return -EINVAL;
2903
2904         while (size) {
2905                 uint32_t value;
2906
2907                 r = get_user(value, (uint32_t *)buf);
2908                 if (r)
2909                         return r;
2910
2911                 WREG32_PCIE(*pos >> 2, value);
2912
2913                 result += 4;
2914                 buf += 4;
2915                 *pos += 4;
2916                 size -= 4;
2917         }
2918
2919         return result;
2920 }
2921
2922 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
2923                                         size_t size, loff_t *pos)
2924 {
2925         struct amdgpu_device *adev = file_inode(f)->i_private;
2926         ssize_t result = 0;
2927         int r;
2928
2929         if (size & 0x3 || *pos & 0x3)
2930                 return -EINVAL;
2931
2932         while (size) {
2933                 uint32_t value;
2934
2935                 value = RREG32_DIDT(*pos >> 2);
2936                 r = put_user(value, (uint32_t *)buf);
2937                 if (r)
2938                         return r;
2939
2940                 result += 4;
2941                 buf += 4;
2942                 *pos += 4;
2943                 size -= 4;
2944         }
2945
2946         return result;
2947 }
2948
2949 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
2950                                          size_t size, loff_t *pos)
2951 {
2952         struct amdgpu_device *adev = file_inode(f)->i_private;
2953         ssize_t result = 0;
2954         int r;
2955
2956         if (size & 0x3 || *pos & 0x3)
2957                 return -EINVAL;
2958
2959         while (size) {
2960                 uint32_t value;
2961
2962                 r = get_user(value, (uint32_t *)buf);
2963                 if (r)
2964                         return r;
2965
2966                 WREG32_DIDT(*pos >> 2, value);
2967
2968                 result += 4;
2969                 buf += 4;
2970                 *pos += 4;
2971                 size -= 4;
2972         }
2973
2974         return result;
2975 }
2976
2977 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
2978                                         size_t size, loff_t *pos)
2979 {
2980         struct amdgpu_device *adev = file_inode(f)->i_private;
2981         ssize_t result = 0;
2982         int r;
2983
2984         if (size & 0x3 || *pos & 0x3)
2985                 return -EINVAL;
2986
2987         while (size) {
2988                 uint32_t value;
2989
2990                 value = RREG32_SMC(*pos);
2991                 r = put_user(value, (uint32_t *)buf);
2992                 if (r)
2993                         return r;
2994
2995                 result += 4;
2996                 buf += 4;
2997                 *pos += 4;
2998                 size -= 4;
2999         }
3000
3001         return result;
3002 }
3003
3004 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3005                                          size_t size, loff_t *pos)
3006 {
3007         struct amdgpu_device *adev = file_inode(f)->i_private;
3008         ssize_t result = 0;
3009         int r;
3010
3011         if (size & 0x3 || *pos & 0x3)
3012                 return -EINVAL;
3013
3014         while (size) {
3015                 uint32_t value;
3016
3017                 r = get_user(value, (uint32_t *)buf);
3018                 if (r)
3019                         return r;
3020
3021                 WREG32_SMC(*pos, value);
3022
3023                 result += 4;
3024                 buf += 4;
3025                 *pos += 4;
3026                 size -= 4;
3027         }
3028
3029         return result;
3030 }
3031
3032 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3033                                         size_t size, loff_t *pos)
3034 {
3035         struct amdgpu_device *adev = file_inode(f)->i_private;
3036         ssize_t result = 0;
3037         int r;
3038         uint32_t *config, no_regs = 0;
3039
3040         if (size & 0x3 || *pos & 0x3)
3041                 return -EINVAL;
3042
3043         config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
3044         if (!config)
3045                 return -ENOMEM;
3046
3047         /* version, increment each time something is added */
3048         config[no_regs++] = 3;
3049         config[no_regs++] = adev->gfx.config.max_shader_engines;
3050         config[no_regs++] = adev->gfx.config.max_tile_pipes;
3051         config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3052         config[no_regs++] = adev->gfx.config.max_sh_per_se;
3053         config[no_regs++] = adev->gfx.config.max_backends_per_se;
3054         config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3055         config[no_regs++] = adev->gfx.config.max_gprs;
3056         config[no_regs++] = adev->gfx.config.max_gs_threads;
3057         config[no_regs++] = adev->gfx.config.max_hw_contexts;
3058         config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3059         config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3060         config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3061         config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3062         config[no_regs++] = adev->gfx.config.num_tile_pipes;
3063         config[no_regs++] = adev->gfx.config.backend_enable_mask;
3064         config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3065         config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3066         config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3067         config[no_regs++] = adev->gfx.config.num_gpus;
3068         config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3069         config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3070         config[no_regs++] = adev->gfx.config.gb_addr_config;
3071         config[no_regs++] = adev->gfx.config.num_rbs;
3072
3073         /* rev==1 */
3074         config[no_regs++] = adev->rev_id;
3075         config[no_regs++] = adev->pg_flags;
3076         config[no_regs++] = adev->cg_flags;
3077
3078         /* rev==2 */
3079         config[no_regs++] = adev->family;
3080         config[no_regs++] = adev->external_rev_id;
3081
3082         /* rev==3 */
3083         config[no_regs++] = adev->pdev->device;
3084         config[no_regs++] = adev->pdev->revision;
3085         config[no_regs++] = adev->pdev->subsystem_device;
3086         config[no_regs++] = adev->pdev->subsystem_vendor;
3087
3088         while (size && (*pos < no_regs * 4)) {
3089                 uint32_t value;
3090
3091                 value = config[*pos >> 2];
3092                 r = put_user(value, (uint32_t *)buf);
3093                 if (r) {
3094                         kfree(config);
3095                         return r;
3096                 }
3097
3098                 result += 4;
3099                 buf += 4;
3100                 *pos += 4;
3101                 size -= 4;
3102         }
3103
3104         kfree(config);
3105         return result;
3106 }
3107
3108 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3109                                         size_t size, loff_t *pos)
3110 {
3111         struct amdgpu_device *adev = file_inode(f)->i_private;
3112         int idx, r;
3113         int32_t value;
3114
3115         if (size != 4 || *pos & 0x3)
3116                 return -EINVAL;
3117
3118         /* convert offset to sensor number */
3119         idx = *pos >> 2;
3120
3121         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
3122                 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value);
3123         else
3124                 return -EINVAL;
3125
3126         if (!r)
3127                 r = put_user(value, (int32_t *)buf);
3128
3129         return !r ? 4 : r;
3130 }
3131
3132 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3133                                         size_t size, loff_t *pos)
3134 {
3135         struct amdgpu_device *adev = f->f_inode->i_private;
3136         int r, x;
3137         ssize_t result=0;
3138         uint32_t offset, se, sh, cu, wave, simd, data[32];
3139
3140         if (size & 3 || *pos & 3)
3141                 return -EINVAL;
3142
3143         /* decode offset */
3144         offset = (*pos & 0x7F);
3145         se = ((*pos >> 7) & 0xFF);
3146         sh = ((*pos >> 15) & 0xFF);
3147         cu = ((*pos >> 23) & 0xFF);
3148         wave = ((*pos >> 31) & 0xFF);
3149         simd = ((*pos >> 37) & 0xFF);
3150
3151         /* switch to the specific se/sh/cu */
3152         mutex_lock(&adev->grbm_idx_mutex);
3153         amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3154
3155         x = 0;
3156         if (adev->gfx.funcs->read_wave_data)
3157                 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
3158
3159         amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3160         mutex_unlock(&adev->grbm_idx_mutex);
3161
3162         if (!x)
3163                 return -EINVAL;
3164
3165         while (size && (offset < x * 4)) {
3166                 uint32_t value;
3167
3168                 value = data[offset >> 2];
3169                 r = put_user(value, (uint32_t *)buf);
3170                 if (r)
3171                         return r;
3172
3173                 result += 4;
3174                 buf += 4;
3175                 offset += 4;
3176                 size -= 4;
3177         }
3178
3179         return result;
3180 }
3181
3182 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3183                                         size_t size, loff_t *pos)
3184 {
3185         struct amdgpu_device *adev = f->f_inode->i_private;
3186         int r;
3187         ssize_t result = 0;
3188         uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3189
3190         if (size & 3 || *pos & 3)
3191                 return -EINVAL;
3192
3193         /* decode offset */
3194         offset = (*pos & 0xFFF);       /* in dwords */
3195         se = ((*pos >> 12) & 0xFF);
3196         sh = ((*pos >> 20) & 0xFF);
3197         cu = ((*pos >> 28) & 0xFF);
3198         wave = ((*pos >> 36) & 0xFF);
3199         simd = ((*pos >> 44) & 0xFF);
3200         thread = ((*pos >> 52) & 0xFF);
3201         bank = ((*pos >> 60) & 1);
3202
3203         data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3204         if (!data)
3205                 return -ENOMEM;
3206
3207         /* switch to the specific se/sh/cu */
3208         mutex_lock(&adev->grbm_idx_mutex);
3209         amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3210
3211         if (bank == 0) {
3212                 if (adev->gfx.funcs->read_wave_vgprs)
3213                         adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3214         } else {
3215                 if (adev->gfx.funcs->read_wave_sgprs)
3216                         adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3217         }
3218
3219         amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3220         mutex_unlock(&adev->grbm_idx_mutex);
3221
3222         while (size) {
3223                 uint32_t value;
3224
3225                 value = data[offset++];
3226                 r = put_user(value, (uint32_t *)buf);
3227                 if (r) {
3228                         result = r;
3229                         goto err;
3230                 }
3231
3232                 result += 4;
3233                 buf += 4;
3234                 size -= 4;
3235         }
3236
3237 err:
3238         kfree(data);
3239         return result;
3240 }
3241
3242 static const struct file_operations amdgpu_debugfs_regs_fops = {
3243         .owner = THIS_MODULE,
3244         .read = amdgpu_debugfs_regs_read,
3245         .write = amdgpu_debugfs_regs_write,
3246         .llseek = default_llseek
3247 };
3248 static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3249         .owner = THIS_MODULE,
3250         .read = amdgpu_debugfs_regs_didt_read,
3251         .write = amdgpu_debugfs_regs_didt_write,
3252         .llseek = default_llseek
3253 };
3254 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3255         .owner = THIS_MODULE,
3256         .read = amdgpu_debugfs_regs_pcie_read,
3257         .write = amdgpu_debugfs_regs_pcie_write,
3258         .llseek = default_llseek
3259 };
3260 static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3261         .owner = THIS_MODULE,
3262         .read = amdgpu_debugfs_regs_smc_read,
3263         .write = amdgpu_debugfs_regs_smc_write,
3264         .llseek = default_llseek
3265 };
3266
3267 static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3268         .owner = THIS_MODULE,
3269         .read = amdgpu_debugfs_gca_config_read,
3270         .llseek = default_llseek
3271 };
3272
3273 static const struct file_operations amdgpu_debugfs_sensors_fops = {
3274         .owner = THIS_MODULE,
3275         .read = amdgpu_debugfs_sensor_read,
3276         .llseek = default_llseek
3277 };
3278
3279 static const struct file_operations amdgpu_debugfs_wave_fops = {
3280         .owner = THIS_MODULE,
3281         .read = amdgpu_debugfs_wave_read,
3282         .llseek = default_llseek
3283 };
3284 static const struct file_operations amdgpu_debugfs_gpr_fops = {
3285         .owner = THIS_MODULE,
3286         .read = amdgpu_debugfs_gpr_read,
3287         .llseek = default_llseek
3288 };
3289
3290 static const struct file_operations *debugfs_regs[] = {
3291         &amdgpu_debugfs_regs_fops,
3292         &amdgpu_debugfs_regs_didt_fops,
3293         &amdgpu_debugfs_regs_pcie_fops,
3294         &amdgpu_debugfs_regs_smc_fops,
3295         &amdgpu_debugfs_gca_config_fops,
3296         &amdgpu_debugfs_sensors_fops,
3297         &amdgpu_debugfs_wave_fops,
3298         &amdgpu_debugfs_gpr_fops,
3299 };
3300
3301 static const char *debugfs_regs_names[] = {
3302         "amdgpu_regs",
3303         "amdgpu_regs_didt",
3304         "amdgpu_regs_pcie",
3305         "amdgpu_regs_smc",
3306         "amdgpu_gca_config",
3307         "amdgpu_sensors",
3308         "amdgpu_wave",
3309         "amdgpu_gpr",
3310 };
3311
3312 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3313 {
3314         struct drm_minor *minor = adev->ddev->primary;
3315         struct dentry *ent, *root = minor->debugfs_root;
3316         unsigned i, j;
3317
3318         for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3319                 ent = debugfs_create_file(debugfs_regs_names[i],
3320                                           S_IFREG | S_IRUGO, root,
3321                                           adev, debugfs_regs[i]);
3322                 if (IS_ERR(ent)) {
3323                         for (j = 0; j < i; j++) {
3324                                 debugfs_remove(adev->debugfs_regs[i]);
3325                                 adev->debugfs_regs[i] = NULL;
3326                         }
3327                         return PTR_ERR(ent);
3328                 }
3329
3330                 if (!i)
3331                         i_size_write(ent->d_inode, adev->rmmio_size);
3332                 adev->debugfs_regs[i] = ent;
3333         }
3334
3335         return 0;
3336 }
3337
3338 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3339 {
3340         unsigned i;
3341
3342         for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3343                 if (adev->debugfs_regs[i]) {
3344                         debugfs_remove(adev->debugfs_regs[i]);
3345                         adev->debugfs_regs[i] = NULL;
3346                 }
3347         }
3348 }
3349
3350 int amdgpu_debugfs_init(struct drm_minor *minor)
3351 {
3352         return 0;
3353 }
3354 #else
3355 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3356 {
3357         return 0;
3358 }
3359 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
3360 #endif