drm/amd: Fix detection of _PR3 on the PCIe root port
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
39
40 #include <drm/drm_aperture.h>
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_crtc_helper.h>
43 #include <drm/drm_fb_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/amdgpu_drm.h>
46 #include <linux/vgaarb.h>
47 #include <linux/vga_switcheroo.h>
48 #include <linux/efi.h>
49 #include "amdgpu.h"
50 #include "amdgpu_trace.h"
51 #include "amdgpu_i2c.h"
52 #include "atom.h"
53 #include "amdgpu_atombios.h"
54 #include "amdgpu_atomfirmware.h"
55 #include "amd_pcie.h"
56 #ifdef CONFIG_DRM_AMDGPU_SI
57 #include "si.h"
58 #endif
59 #ifdef CONFIG_DRM_AMDGPU_CIK
60 #include "cik.h"
61 #endif
62 #include "vi.h"
63 #include "soc15.h"
64 #include "nv.h"
65 #include "bif/bif_4_1_d.h"
66 #include <linux/firmware.h>
67 #include "amdgpu_vf_error.h"
68
69 #include "amdgpu_amdkfd.h"
70 #include "amdgpu_pm.h"
71
72 #include "amdgpu_xgmi.h"
73 #include "amdgpu_ras.h"
74 #include "amdgpu_pmu.h"
75 #include "amdgpu_fru_eeprom.h"
76 #include "amdgpu_reset.h"
77
78 #include <linux/suspend.h>
79 #include <drm/task_barrier.h>
80 #include <linux/pm_runtime.h>
81
82 #include <drm/drm_drv.h>
83
84 #if IS_ENABLED(CONFIG_X86)
85 #include <asm/intel-family.h>
86 #endif
87
88 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
89 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
90 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
91 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
92 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
93 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
94 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
95
96 #define AMDGPU_RESUME_MS                2000
97 #define AMDGPU_MAX_RETRY_LIMIT          2
98 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
99
100 static const struct drm_driver amdgpu_kms_driver;
101
102 const char *amdgpu_asic_name[] = {
103         "TAHITI",
104         "PITCAIRN",
105         "VERDE",
106         "OLAND",
107         "HAINAN",
108         "BONAIRE",
109         "KAVERI",
110         "KABINI",
111         "HAWAII",
112         "MULLINS",
113         "TOPAZ",
114         "TONGA",
115         "FIJI",
116         "CARRIZO",
117         "STONEY",
118         "POLARIS10",
119         "POLARIS11",
120         "POLARIS12",
121         "VEGAM",
122         "VEGA10",
123         "VEGA12",
124         "VEGA20",
125         "RAVEN",
126         "ARCTURUS",
127         "RENOIR",
128         "ALDEBARAN",
129         "NAVI10",
130         "CYAN_SKILLFISH",
131         "NAVI14",
132         "NAVI12",
133         "SIENNA_CICHLID",
134         "NAVY_FLOUNDER",
135         "VANGOGH",
136         "DIMGREY_CAVEFISH",
137         "BEIGE_GOBY",
138         "YELLOW_CARP",
139         "IP DISCOVERY",
140         "LAST",
141 };
142
143 /**
144  * DOC: pcie_replay_count
145  *
146  * The amdgpu driver provides a sysfs API for reporting the total number
147  * of PCIe replays (NAKs)
148  * The file pcie_replay_count is used for this and returns the total
149  * number of replays as a sum of the NAKs generated and NAKs received
150  */
151
152 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
153                 struct device_attribute *attr, char *buf)
154 {
155         struct drm_device *ddev = dev_get_drvdata(dev);
156         struct amdgpu_device *adev = drm_to_adev(ddev);
157         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
158
159         return sysfs_emit(buf, "%llu\n", cnt);
160 }
161
162 static DEVICE_ATTR(pcie_replay_count, 0444,
163                 amdgpu_device_get_pcie_replay_count, NULL);
164
165 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
166
167
168 /**
169  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
170  *
171  * @dev: drm_device pointer
172  *
173  * Returns true if the device is a dGPU with ATPX power control,
174  * otherwise return false.
175  */
176 bool amdgpu_device_supports_px(struct drm_device *dev)
177 {
178         struct amdgpu_device *adev = drm_to_adev(dev);
179
180         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
181                 return true;
182         return false;
183 }
184
185 /**
186  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
187  *
188  * @dev: drm_device pointer
189  *
190  * Returns true if the device is a dGPU with ACPI power control,
191  * otherwise return false.
192  */
193 bool amdgpu_device_supports_boco(struct drm_device *dev)
194 {
195         struct amdgpu_device *adev = drm_to_adev(dev);
196
197         if (adev->has_pr3 ||
198             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
199                 return true;
200         return false;
201 }
202
203 /**
204  * amdgpu_device_supports_baco - Does the device support BACO
205  *
206  * @dev: drm_device pointer
207  *
208  * Returns true if the device supporte BACO,
209  * otherwise return false.
210  */
211 bool amdgpu_device_supports_baco(struct drm_device *dev)
212 {
213         struct amdgpu_device *adev = drm_to_adev(dev);
214
215         return amdgpu_asic_supports_baco(adev);
216 }
217
218 /**
219  * amdgpu_device_supports_smart_shift - Is the device dGPU with
220  * smart shift support
221  *
222  * @dev: drm_device pointer
223  *
224  * Returns true if the device is a dGPU with Smart Shift support,
225  * otherwise returns false.
226  */
227 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
228 {
229         return (amdgpu_device_supports_boco(dev) &&
230                 amdgpu_acpi_is_power_shift_control_supported());
231 }
232
233 /*
234  * VRAM access helper functions
235  */
236
237 /**
238  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
239  *
240  * @adev: amdgpu_device pointer
241  * @pos: offset of the buffer in vram
242  * @buf: virtual address of the buffer in system memory
243  * @size: read/write size, sizeof(@buf) must > @size
244  * @write: true - write to vram, otherwise - read from vram
245  */
246 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
247                              void *buf, size_t size, bool write)
248 {
249         unsigned long flags;
250         uint32_t hi = ~0, tmp = 0;
251         uint32_t *data = buf;
252         uint64_t last;
253         int idx;
254
255         if (!drm_dev_enter(adev_to_drm(adev), &idx))
256                 return;
257
258         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
259
260         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
261         for (last = pos + size; pos < last; pos += 4) {
262                 tmp = pos >> 31;
263
264                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
265                 if (tmp != hi) {
266                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
267                         hi = tmp;
268                 }
269                 if (write)
270                         WREG32_NO_KIQ(mmMM_DATA, *data++);
271                 else
272                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
273         }
274
275         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
276         drm_dev_exit(idx);
277 }
278
279 /**
280  * amdgpu_device_aper_access - access vram by vram aperature
281  *
282  * @adev: amdgpu_device pointer
283  * @pos: offset of the buffer in vram
284  * @buf: virtual address of the buffer in system memory
285  * @size: read/write size, sizeof(@buf) must > @size
286  * @write: true - write to vram, otherwise - read from vram
287  *
288  * The return value means how many bytes have been transferred.
289  */
290 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
291                                  void *buf, size_t size, bool write)
292 {
293 #ifdef CONFIG_64BIT
294         void __iomem *addr;
295         size_t count = 0;
296         uint64_t last;
297
298         if (!adev->mman.aper_base_kaddr)
299                 return 0;
300
301         last = min(pos + size, adev->gmc.visible_vram_size);
302         if (last > pos) {
303                 addr = adev->mman.aper_base_kaddr + pos;
304                 count = last - pos;
305
306                 if (write) {
307                         memcpy_toio(addr, buf, count);
308                         /* Make sure HDP write cache flush happens without any reordering
309                          * after the system memory contents are sent over PCIe device
310                          */
311                         mb();
312                         amdgpu_device_flush_hdp(adev, NULL);
313                 } else {
314                         amdgpu_device_invalidate_hdp(adev, NULL);
315                         /* Make sure HDP read cache is invalidated before issuing a read
316                          * to the PCIe device
317                          */
318                         mb();
319                         memcpy_fromio(buf, addr, count);
320                 }
321
322         }
323
324         return count;
325 #else
326         return 0;
327 #endif
328 }
329
330 /**
331  * amdgpu_device_vram_access - read/write a buffer in vram
332  *
333  * @adev: amdgpu_device pointer
334  * @pos: offset of the buffer in vram
335  * @buf: virtual address of the buffer in system memory
336  * @size: read/write size, sizeof(@buf) must > @size
337  * @write: true - write to vram, otherwise - read from vram
338  */
339 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
340                                void *buf, size_t size, bool write)
341 {
342         size_t count;
343
344         /* try to using vram apreature to access vram first */
345         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
346         size -= count;
347         if (size) {
348                 /* using MM to access rest vram */
349                 pos += count;
350                 buf += count;
351                 amdgpu_device_mm_access(adev, pos, buf, size, write);
352         }
353 }
354
355 /*
356  * register access helper functions.
357  */
358
359 /* Check if hw access should be skipped because of hotplug or device error */
360 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
361 {
362         if (adev->no_hw_access)
363                 return true;
364
365 #ifdef CONFIG_LOCKDEP
366         /*
367          * This is a bit complicated to understand, so worth a comment. What we assert
368          * here is that the GPU reset is not running on another thread in parallel.
369          *
370          * For this we trylock the read side of the reset semaphore, if that succeeds
371          * we know that the reset is not running in paralell.
372          *
373          * If the trylock fails we assert that we are either already holding the read
374          * side of the lock or are the reset thread itself and hold the write side of
375          * the lock.
376          */
377         if (in_task()) {
378                 if (down_read_trylock(&adev->reset_domain->sem))
379                         up_read(&adev->reset_domain->sem);
380                 else
381                         lockdep_assert_held(&adev->reset_domain->sem);
382         }
383 #endif
384         return false;
385 }
386
387 /**
388  * amdgpu_device_rreg - read a memory mapped IO or indirect register
389  *
390  * @adev: amdgpu_device pointer
391  * @reg: dword aligned register offset
392  * @acc_flags: access flags which require special behavior
393  *
394  * Returns the 32 bit value from the offset specified.
395  */
396 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
397                             uint32_t reg, uint32_t acc_flags)
398 {
399         uint32_t ret;
400
401         if (amdgpu_device_skip_hw_access(adev))
402                 return 0;
403
404         if ((reg * 4) < adev->rmmio_size) {
405                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
406                     amdgpu_sriov_runtime(adev) &&
407                     down_read_trylock(&adev->reset_domain->sem)) {
408                         ret = amdgpu_kiq_rreg(adev, reg);
409                         up_read(&adev->reset_domain->sem);
410                 } else {
411                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
412                 }
413         } else {
414                 ret = adev->pcie_rreg(adev, reg * 4);
415         }
416
417         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
418
419         return ret;
420 }
421
422 /*
423  * MMIO register read with bytes helper functions
424  * @offset:bytes offset from MMIO start
425  */
426
427 /**
428  * amdgpu_mm_rreg8 - read a memory mapped IO register
429  *
430  * @adev: amdgpu_device pointer
431  * @offset: byte aligned register offset
432  *
433  * Returns the 8 bit value from the offset specified.
434  */
435 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
436 {
437         if (amdgpu_device_skip_hw_access(adev))
438                 return 0;
439
440         if (offset < adev->rmmio_size)
441                 return (readb(adev->rmmio + offset));
442         BUG();
443 }
444
445 /*
446  * MMIO register write with bytes helper functions
447  * @offset:bytes offset from MMIO start
448  * @value: the value want to be written to the register
449  */
450
451 /**
452  * amdgpu_mm_wreg8 - read a memory mapped IO register
453  *
454  * @adev: amdgpu_device pointer
455  * @offset: byte aligned register offset
456  * @value: 8 bit value to write
457  *
458  * Writes the value specified to the offset specified.
459  */
460 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
461 {
462         if (amdgpu_device_skip_hw_access(adev))
463                 return;
464
465         if (offset < adev->rmmio_size)
466                 writeb(value, adev->rmmio + offset);
467         else
468                 BUG();
469 }
470
471 /**
472  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
473  *
474  * @adev: amdgpu_device pointer
475  * @reg: dword aligned register offset
476  * @v: 32 bit value to write to the register
477  * @acc_flags: access flags which require special behavior
478  *
479  * Writes the value specified to the offset specified.
480  */
481 void amdgpu_device_wreg(struct amdgpu_device *adev,
482                         uint32_t reg, uint32_t v,
483                         uint32_t acc_flags)
484 {
485         if (amdgpu_device_skip_hw_access(adev))
486                 return;
487
488         if ((reg * 4) < adev->rmmio_size) {
489                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
490                     amdgpu_sriov_runtime(adev) &&
491                     down_read_trylock(&adev->reset_domain->sem)) {
492                         amdgpu_kiq_wreg(adev, reg, v);
493                         up_read(&adev->reset_domain->sem);
494                 } else {
495                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
496                 }
497         } else {
498                 adev->pcie_wreg(adev, reg * 4, v);
499         }
500
501         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
502 }
503
504 /**
505  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
506  *
507  * @adev: amdgpu_device pointer
508  * @reg: mmio/rlc register
509  * @v: value to write
510  * @xcc_id: xcc accelerated compute core id
511  *
512  * this function is invoked only for the debugfs register access
513  */
514 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
515                              uint32_t reg, uint32_t v,
516                              uint32_t xcc_id)
517 {
518         if (amdgpu_device_skip_hw_access(adev))
519                 return;
520
521         if (amdgpu_sriov_fullaccess(adev) &&
522             adev->gfx.rlc.funcs &&
523             adev->gfx.rlc.funcs->is_rlcg_access_range) {
524                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
525                         return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
526         } else if ((reg * 4) >= adev->rmmio_size) {
527                 adev->pcie_wreg(adev, reg * 4, v);
528         } else {
529                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
530         }
531 }
532
533 /**
534  * amdgpu_device_indirect_rreg - read an indirect register
535  *
536  * @adev: amdgpu_device pointer
537  * @reg_addr: indirect register address to read from
538  *
539  * Returns the value of indirect register @reg_addr
540  */
541 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
542                                 u32 reg_addr)
543 {
544         unsigned long flags, pcie_index, pcie_data;
545         void __iomem *pcie_index_offset;
546         void __iomem *pcie_data_offset;
547         u32 r;
548
549         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
550         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
551
552         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
553         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
554         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
555
556         writel(reg_addr, pcie_index_offset);
557         readl(pcie_index_offset);
558         r = readl(pcie_data_offset);
559         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
560
561         return r;
562 }
563
564 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
565                                     u64 reg_addr)
566 {
567         unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
568         u32 r;
569         void __iomem *pcie_index_offset;
570         void __iomem *pcie_index_hi_offset;
571         void __iomem *pcie_data_offset;
572
573         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
574         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
575         if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
576                 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
577         else
578                 pcie_index_hi = 0;
579
580         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
581         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
582         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
583         if (pcie_index_hi != 0)
584                 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
585                                 pcie_index_hi * 4;
586
587         writel(reg_addr, pcie_index_offset);
588         readl(pcie_index_offset);
589         if (pcie_index_hi != 0) {
590                 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
591                 readl(pcie_index_hi_offset);
592         }
593         r = readl(pcie_data_offset);
594
595         /* clear the high bits */
596         if (pcie_index_hi != 0) {
597                 writel(0, pcie_index_hi_offset);
598                 readl(pcie_index_hi_offset);
599         }
600
601         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
602
603         return r;
604 }
605
606 /**
607  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
608  *
609  * @adev: amdgpu_device pointer
610  * @reg_addr: indirect register address to read from
611  *
612  * Returns the value of indirect register @reg_addr
613  */
614 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
615                                   u32 reg_addr)
616 {
617         unsigned long flags, pcie_index, pcie_data;
618         void __iomem *pcie_index_offset;
619         void __iomem *pcie_data_offset;
620         u64 r;
621
622         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
623         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
624
625         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
626         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
627         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
628
629         /* read low 32 bits */
630         writel(reg_addr, pcie_index_offset);
631         readl(pcie_index_offset);
632         r = readl(pcie_data_offset);
633         /* read high 32 bits */
634         writel(reg_addr + 4, pcie_index_offset);
635         readl(pcie_index_offset);
636         r |= ((u64)readl(pcie_data_offset) << 32);
637         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
638
639         return r;
640 }
641
642 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
643                                   u64 reg_addr)
644 {
645         unsigned long flags, pcie_index, pcie_data;
646         unsigned long pcie_index_hi = 0;
647         void __iomem *pcie_index_offset;
648         void __iomem *pcie_index_hi_offset;
649         void __iomem *pcie_data_offset;
650         u64 r;
651
652         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
653         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
654         if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
655                 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
656
657         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
658         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
659         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
660         if (pcie_index_hi != 0)
661                 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
662                         pcie_index_hi * 4;
663
664         /* read low 32 bits */
665         writel(reg_addr, pcie_index_offset);
666         readl(pcie_index_offset);
667         if (pcie_index_hi != 0) {
668                 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
669                 readl(pcie_index_hi_offset);
670         }
671         r = readl(pcie_data_offset);
672         /* read high 32 bits */
673         writel(reg_addr + 4, pcie_index_offset);
674         readl(pcie_index_offset);
675         if (pcie_index_hi != 0) {
676                 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
677                 readl(pcie_index_hi_offset);
678         }
679         r |= ((u64)readl(pcie_data_offset) << 32);
680
681         /* clear the high bits */
682         if (pcie_index_hi != 0) {
683                 writel(0, pcie_index_hi_offset);
684                 readl(pcie_index_hi_offset);
685         }
686
687         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
688
689         return r;
690 }
691
692 /**
693  * amdgpu_device_indirect_wreg - write an indirect register address
694  *
695  * @adev: amdgpu_device pointer
696  * @reg_addr: indirect register offset
697  * @reg_data: indirect register data
698  *
699  */
700 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
701                                  u32 reg_addr, u32 reg_data)
702 {
703         unsigned long flags, pcie_index, pcie_data;
704         void __iomem *pcie_index_offset;
705         void __iomem *pcie_data_offset;
706
707         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
708         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
709
710         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
711         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
712         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
713
714         writel(reg_addr, pcie_index_offset);
715         readl(pcie_index_offset);
716         writel(reg_data, pcie_data_offset);
717         readl(pcie_data_offset);
718         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
719 }
720
721 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
722                                      u64 reg_addr, u32 reg_data)
723 {
724         unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
725         void __iomem *pcie_index_offset;
726         void __iomem *pcie_index_hi_offset;
727         void __iomem *pcie_data_offset;
728
729         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
730         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
731         if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
732                 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
733         else
734                 pcie_index_hi = 0;
735
736         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
737         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
738         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
739         if (pcie_index_hi != 0)
740                 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
741                                 pcie_index_hi * 4;
742
743         writel(reg_addr, pcie_index_offset);
744         readl(pcie_index_offset);
745         if (pcie_index_hi != 0) {
746                 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
747                 readl(pcie_index_hi_offset);
748         }
749         writel(reg_data, pcie_data_offset);
750         readl(pcie_data_offset);
751
752         /* clear the high bits */
753         if (pcie_index_hi != 0) {
754                 writel(0, pcie_index_hi_offset);
755                 readl(pcie_index_hi_offset);
756         }
757
758         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
759 }
760
761 /**
762  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
763  *
764  * @adev: amdgpu_device pointer
765  * @reg_addr: indirect register offset
766  * @reg_data: indirect register data
767  *
768  */
769 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
770                                    u32 reg_addr, u64 reg_data)
771 {
772         unsigned long flags, pcie_index, pcie_data;
773         void __iomem *pcie_index_offset;
774         void __iomem *pcie_data_offset;
775
776         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
777         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
778
779         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
780         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
781         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
782
783         /* write low 32 bits */
784         writel(reg_addr, pcie_index_offset);
785         readl(pcie_index_offset);
786         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
787         readl(pcie_data_offset);
788         /* write high 32 bits */
789         writel(reg_addr + 4, pcie_index_offset);
790         readl(pcie_index_offset);
791         writel((u32)(reg_data >> 32), pcie_data_offset);
792         readl(pcie_data_offset);
793         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
794 }
795
796 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
797                                    u64 reg_addr, u64 reg_data)
798 {
799         unsigned long flags, pcie_index, pcie_data;
800         unsigned long pcie_index_hi = 0;
801         void __iomem *pcie_index_offset;
802         void __iomem *pcie_index_hi_offset;
803         void __iomem *pcie_data_offset;
804
805         pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
806         pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
807         if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
808                 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
809
810         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
811         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
812         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
813         if (pcie_index_hi != 0)
814                 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
815                                 pcie_index_hi * 4;
816
817         /* write low 32 bits */
818         writel(reg_addr, pcie_index_offset);
819         readl(pcie_index_offset);
820         if (pcie_index_hi != 0) {
821                 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
822                 readl(pcie_index_hi_offset);
823         }
824         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
825         readl(pcie_data_offset);
826         /* write high 32 bits */
827         writel(reg_addr + 4, pcie_index_offset);
828         readl(pcie_index_offset);
829         if (pcie_index_hi != 0) {
830                 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
831                 readl(pcie_index_hi_offset);
832         }
833         writel((u32)(reg_data >> 32), pcie_data_offset);
834         readl(pcie_data_offset);
835
836         /* clear the high bits */
837         if (pcie_index_hi != 0) {
838                 writel(0, pcie_index_hi_offset);
839                 readl(pcie_index_hi_offset);
840         }
841
842         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
843 }
844
845 /**
846  * amdgpu_device_get_rev_id - query device rev_id
847  *
848  * @adev: amdgpu_device pointer
849  *
850  * Return device rev_id
851  */
852 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
853 {
854         return adev->nbio.funcs->get_rev_id(adev);
855 }
856
857 /**
858  * amdgpu_invalid_rreg - dummy reg read function
859  *
860  * @adev: amdgpu_device pointer
861  * @reg: offset of register
862  *
863  * Dummy register read function.  Used for register blocks
864  * that certain asics don't have (all asics).
865  * Returns the value in the register.
866  */
867 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
868 {
869         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
870         BUG();
871         return 0;
872 }
873
874 static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
875 {
876         DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
877         BUG();
878         return 0;
879 }
880
881 /**
882  * amdgpu_invalid_wreg - dummy reg write function
883  *
884  * @adev: amdgpu_device pointer
885  * @reg: offset of register
886  * @v: value to write to the register
887  *
888  * Dummy register read function.  Used for register blocks
889  * that certain asics don't have (all asics).
890  */
891 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
892 {
893         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
894                   reg, v);
895         BUG();
896 }
897
898 static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
899 {
900         DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
901                   reg, v);
902         BUG();
903 }
904
905 /**
906  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
907  *
908  * @adev: amdgpu_device pointer
909  * @reg: offset of register
910  *
911  * Dummy register read function.  Used for register blocks
912  * that certain asics don't have (all asics).
913  * Returns the value in the register.
914  */
915 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
916 {
917         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
918         BUG();
919         return 0;
920 }
921
922 static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
923 {
924         DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
925         BUG();
926         return 0;
927 }
928
929 /**
930  * amdgpu_invalid_wreg64 - dummy reg write function
931  *
932  * @adev: amdgpu_device pointer
933  * @reg: offset of register
934  * @v: value to write to the register
935  *
936  * Dummy register read function.  Used for register blocks
937  * that certain asics don't have (all asics).
938  */
939 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
940 {
941         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
942                   reg, v);
943         BUG();
944 }
945
946 static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
947 {
948         DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
949                   reg, v);
950         BUG();
951 }
952
953 /**
954  * amdgpu_block_invalid_rreg - dummy reg read function
955  *
956  * @adev: amdgpu_device pointer
957  * @block: offset of instance
958  * @reg: offset of register
959  *
960  * Dummy register read function.  Used for register blocks
961  * that certain asics don't have (all asics).
962  * Returns the value in the register.
963  */
964 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
965                                           uint32_t block, uint32_t reg)
966 {
967         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
968                   reg, block);
969         BUG();
970         return 0;
971 }
972
973 /**
974  * amdgpu_block_invalid_wreg - dummy reg write function
975  *
976  * @adev: amdgpu_device pointer
977  * @block: offset of instance
978  * @reg: offset of register
979  * @v: value to write to the register
980  *
981  * Dummy register read function.  Used for register blocks
982  * that certain asics don't have (all asics).
983  */
984 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
985                                       uint32_t block,
986                                       uint32_t reg, uint32_t v)
987 {
988         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
989                   reg, block, v);
990         BUG();
991 }
992
993 /**
994  * amdgpu_device_asic_init - Wrapper for atom asic_init
995  *
996  * @adev: amdgpu_device pointer
997  *
998  * Does any asic specific work and then calls atom asic init.
999  */
1000 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1001 {
1002         int ret;
1003
1004         amdgpu_asic_pre_asic_init(adev);
1005
1006         if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1007             amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1008                 amdgpu_psp_wait_for_bootloader(adev);
1009                 ret = amdgpu_atomfirmware_asic_init(adev, true);
1010                 return ret;
1011         } else {
1012                 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
1013         }
1014
1015         return 0;
1016 }
1017
1018 /**
1019  * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1020  *
1021  * @adev: amdgpu_device pointer
1022  *
1023  * Allocates a scratch page of VRAM for use by various things in the
1024  * driver.
1025  */
1026 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
1027 {
1028         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1029                                        AMDGPU_GEM_DOMAIN_VRAM |
1030                                        AMDGPU_GEM_DOMAIN_GTT,
1031                                        &adev->mem_scratch.robj,
1032                                        &adev->mem_scratch.gpu_addr,
1033                                        (void **)&adev->mem_scratch.ptr);
1034 }
1035
1036 /**
1037  * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1038  *
1039  * @adev: amdgpu_device pointer
1040  *
1041  * Frees the VRAM scratch page.
1042  */
1043 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
1044 {
1045         amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
1046 }
1047
1048 /**
1049  * amdgpu_device_program_register_sequence - program an array of registers.
1050  *
1051  * @adev: amdgpu_device pointer
1052  * @registers: pointer to the register array
1053  * @array_size: size of the register array
1054  *
1055  * Programs an array or registers with and or masks.
1056  * This is a helper for setting golden registers.
1057  */
1058 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1059                                              const u32 *registers,
1060                                              const u32 array_size)
1061 {
1062         u32 tmp, reg, and_mask, or_mask;
1063         int i;
1064
1065         if (array_size % 3)
1066                 return;
1067
1068         for (i = 0; i < array_size; i += 3) {
1069                 reg = registers[i + 0];
1070                 and_mask = registers[i + 1];
1071                 or_mask = registers[i + 2];
1072
1073                 if (and_mask == 0xffffffff) {
1074                         tmp = or_mask;
1075                 } else {
1076                         tmp = RREG32(reg);
1077                         tmp &= ~and_mask;
1078                         if (adev->family >= AMDGPU_FAMILY_AI)
1079                                 tmp |= (or_mask & and_mask);
1080                         else
1081                                 tmp |= or_mask;
1082                 }
1083                 WREG32(reg, tmp);
1084         }
1085 }
1086
1087 /**
1088  * amdgpu_device_pci_config_reset - reset the GPU
1089  *
1090  * @adev: amdgpu_device pointer
1091  *
1092  * Resets the GPU using the pci config reset sequence.
1093  * Only applicable to asics prior to vega10.
1094  */
1095 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1096 {
1097         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1098 }
1099
1100 /**
1101  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1102  *
1103  * @adev: amdgpu_device pointer
1104  *
1105  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1106  */
1107 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1108 {
1109         return pci_reset_function(adev->pdev);
1110 }
1111
1112 /*
1113  * amdgpu_device_wb_*()
1114  * Writeback is the method by which the GPU updates special pages in memory
1115  * with the status of certain GPU events (fences, ring pointers,etc.).
1116  */
1117
1118 /**
1119  * amdgpu_device_wb_fini - Disable Writeback and free memory
1120  *
1121  * @adev: amdgpu_device pointer
1122  *
1123  * Disables Writeback and frees the Writeback memory (all asics).
1124  * Used at driver shutdown.
1125  */
1126 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1127 {
1128         if (adev->wb.wb_obj) {
1129                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1130                                       &adev->wb.gpu_addr,
1131                                       (void **)&adev->wb.wb);
1132                 adev->wb.wb_obj = NULL;
1133         }
1134 }
1135
1136 /**
1137  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1138  *
1139  * @adev: amdgpu_device pointer
1140  *
1141  * Initializes writeback and allocates writeback memory (all asics).
1142  * Used at driver startup.
1143  * Returns 0 on success or an -error on failure.
1144  */
1145 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1146 {
1147         int r;
1148
1149         if (adev->wb.wb_obj == NULL) {
1150                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1151                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1152                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1153                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1154                                             (void **)&adev->wb.wb);
1155                 if (r) {
1156                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1157                         return r;
1158                 }
1159
1160                 adev->wb.num_wb = AMDGPU_MAX_WB;
1161                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1162
1163                 /* clear wb memory */
1164                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1165         }
1166
1167         return 0;
1168 }
1169
1170 /**
1171  * amdgpu_device_wb_get - Allocate a wb entry
1172  *
1173  * @adev: amdgpu_device pointer
1174  * @wb: wb index
1175  *
1176  * Allocate a wb slot for use by the driver (all asics).
1177  * Returns 0 on success or -EINVAL on failure.
1178  */
1179 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1180 {
1181         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1182
1183         if (offset < adev->wb.num_wb) {
1184                 __set_bit(offset, adev->wb.used);
1185                 *wb = offset << 3; /* convert to dw offset */
1186                 return 0;
1187         } else {
1188                 return -EINVAL;
1189         }
1190 }
1191
1192 /**
1193  * amdgpu_device_wb_free - Free a wb entry
1194  *
1195  * @adev: amdgpu_device pointer
1196  * @wb: wb index
1197  *
1198  * Free a wb slot allocated for use by the driver (all asics)
1199  */
1200 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1201 {
1202         wb >>= 3;
1203         if (wb < adev->wb.num_wb)
1204                 __clear_bit(wb, adev->wb.used);
1205 }
1206
1207 /**
1208  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1209  *
1210  * @adev: amdgpu_device pointer
1211  *
1212  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1213  * to fail, but if any of the BARs is not accessible after the size we abort
1214  * driver loading by returning -ENODEV.
1215  */
1216 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1217 {
1218         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1219         struct pci_bus *root;
1220         struct resource *res;
1221         unsigned int i;
1222         u16 cmd;
1223         int r;
1224
1225         if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1226                 return 0;
1227
1228         /* Bypass for VF */
1229         if (amdgpu_sriov_vf(adev))
1230                 return 0;
1231
1232         /* skip if the bios has already enabled large BAR */
1233         if (adev->gmc.real_vram_size &&
1234             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1235                 return 0;
1236
1237         /* Check if the root BUS has 64bit memory resources */
1238         root = adev->pdev->bus;
1239         while (root->parent)
1240                 root = root->parent;
1241
1242         pci_bus_for_each_resource(root, res, i) {
1243                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1244                     res->start > 0x100000000ull)
1245                         break;
1246         }
1247
1248         /* Trying to resize is pointless without a root hub window above 4GB */
1249         if (!res)
1250                 return 0;
1251
1252         /* Limit the BAR size to what is available */
1253         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1254                         rbar_size);
1255
1256         /* Disable memory decoding while we change the BAR addresses and size */
1257         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1258         pci_write_config_word(adev->pdev, PCI_COMMAND,
1259                               cmd & ~PCI_COMMAND_MEMORY);
1260
1261         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1262         amdgpu_doorbell_fini(adev);
1263         if (adev->asic_type >= CHIP_BONAIRE)
1264                 pci_release_resource(adev->pdev, 2);
1265
1266         pci_release_resource(adev->pdev, 0);
1267
1268         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1269         if (r == -ENOSPC)
1270                 DRM_INFO("Not enough PCI address space for a large BAR.");
1271         else if (r && r != -ENOTSUPP)
1272                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1273
1274         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1275
1276         /* When the doorbell or fb BAR isn't available we have no chance of
1277          * using the device.
1278          */
1279         r = amdgpu_doorbell_init(adev);
1280         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1281                 return -ENODEV;
1282
1283         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1284
1285         return 0;
1286 }
1287
1288 static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
1289 {
1290         if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1291                 return false;
1292
1293         return true;
1294 }
1295
1296 /*
1297  * GPU helpers function.
1298  */
1299 /**
1300  * amdgpu_device_need_post - check if the hw need post or not
1301  *
1302  * @adev: amdgpu_device pointer
1303  *
1304  * Check if the asic has been initialized (all asics) at driver startup
1305  * or post is needed if  hw reset is performed.
1306  * Returns true if need or false if not.
1307  */
1308 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1309 {
1310         uint32_t reg;
1311
1312         if (amdgpu_sriov_vf(adev))
1313                 return false;
1314
1315         if (!amdgpu_device_read_bios(adev))
1316                 return false;
1317
1318         if (amdgpu_passthrough(adev)) {
1319                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1320                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1321                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1322                  * vpost executed for smc version below 22.15
1323                  */
1324                 if (adev->asic_type == CHIP_FIJI) {
1325                         int err;
1326                         uint32_t fw_ver;
1327
1328                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1329                         /* force vPost if error occured */
1330                         if (err)
1331                                 return true;
1332
1333                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1334                         if (fw_ver < 0x00160e00)
1335                                 return true;
1336                 }
1337         }
1338
1339         /* Don't post if we need to reset whole hive on init */
1340         if (adev->gmc.xgmi.pending_reset)
1341                 return false;
1342
1343         if (adev->has_hw_reset) {
1344                 adev->has_hw_reset = false;
1345                 return true;
1346         }
1347
1348         /* bios scratch used on CIK+ */
1349         if (adev->asic_type >= CHIP_BONAIRE)
1350                 return amdgpu_atombios_scratch_need_asic_init(adev);
1351
1352         /* check MEM_SIZE for older asics */
1353         reg = amdgpu_asic_get_config_memsize(adev);
1354
1355         if ((reg != 0) && (reg != 0xffffffff))
1356                 return false;
1357
1358         return true;
1359 }
1360
1361 /*
1362  * Check whether seamless boot is supported.
1363  *
1364  * So far we only support seamless boot on DCE 3.0 or later.
1365  * If users report that it works on older ASICS as well, we may
1366  * loosen this.
1367  */
1368 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1369 {
1370         switch (amdgpu_seamless) {
1371         case -1:
1372                 break;
1373         case 1:
1374                 return true;
1375         case 0:
1376                 return false;
1377         default:
1378                 DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
1379                           amdgpu_seamless);
1380                 return false;
1381         }
1382
1383         if (!(adev->flags & AMD_IS_APU))
1384                 return false;
1385
1386         if (adev->mman.keep_stolen_vga_memory)
1387                 return false;
1388
1389         return adev->ip_versions[DCE_HWIP][0] >= IP_VERSION(3, 0, 0);
1390 }
1391
1392 /*
1393  * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
1394  * speed switching. Until we have confirmation from Intel that a specific host
1395  * supports it, it's safer that we keep it disabled for all.
1396  *
1397  * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1398  * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1399  */
1400 bool amdgpu_device_pcie_dynamic_switching_supported(void)
1401 {
1402 #if IS_ENABLED(CONFIG_X86)
1403         struct cpuinfo_x86 *c = &cpu_data(0);
1404
1405         if (c->x86_vendor == X86_VENDOR_INTEL)
1406                 return false;
1407 #endif
1408         return true;
1409 }
1410
1411 /**
1412  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1413  *
1414  * @adev: amdgpu_device pointer
1415  *
1416  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1417  * be set for this device.
1418  *
1419  * Returns true if it should be used or false if not.
1420  */
1421 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1422 {
1423         switch (amdgpu_aspm) {
1424         case -1:
1425                 break;
1426         case 0:
1427                 return false;
1428         case 1:
1429                 return true;
1430         default:
1431                 return false;
1432         }
1433         return pcie_aspm_enabled(adev->pdev);
1434 }
1435
1436 bool amdgpu_device_aspm_support_quirk(void)
1437 {
1438 #if IS_ENABLED(CONFIG_X86)
1439         struct cpuinfo_x86 *c = &cpu_data(0);
1440
1441         return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
1442 #else
1443         return true;
1444 #endif
1445 }
1446
1447 /* if we get transitioned to only one device, take VGA back */
1448 /**
1449  * amdgpu_device_vga_set_decode - enable/disable vga decode
1450  *
1451  * @pdev: PCI device pointer
1452  * @state: enable/disable vga decode
1453  *
1454  * Enable/disable vga decode (all asics).
1455  * Returns VGA resource flags.
1456  */
1457 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1458                 bool state)
1459 {
1460         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1461
1462         amdgpu_asic_set_vga_state(adev, state);
1463         if (state)
1464                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1465                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1466         else
1467                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1468 }
1469
1470 /**
1471  * amdgpu_device_check_block_size - validate the vm block size
1472  *
1473  * @adev: amdgpu_device pointer
1474  *
1475  * Validates the vm block size specified via module parameter.
1476  * The vm block size defines number of bits in page table versus page directory,
1477  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1478  * page table and the remaining bits are in the page directory.
1479  */
1480 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1481 {
1482         /* defines number of bits in page table versus page directory,
1483          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1484          * page table and the remaining bits are in the page directory
1485          */
1486         if (amdgpu_vm_block_size == -1)
1487                 return;
1488
1489         if (amdgpu_vm_block_size < 9) {
1490                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1491                          amdgpu_vm_block_size);
1492                 amdgpu_vm_block_size = -1;
1493         }
1494 }
1495
1496 /**
1497  * amdgpu_device_check_vm_size - validate the vm size
1498  *
1499  * @adev: amdgpu_device pointer
1500  *
1501  * Validates the vm size in GB specified via module parameter.
1502  * The VM size is the size of the GPU virtual memory space in GB.
1503  */
1504 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1505 {
1506         /* no need to check the default value */
1507         if (amdgpu_vm_size == -1)
1508                 return;
1509
1510         if (amdgpu_vm_size < 1) {
1511                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1512                          amdgpu_vm_size);
1513                 amdgpu_vm_size = -1;
1514         }
1515 }
1516
1517 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1518 {
1519         struct sysinfo si;
1520         bool is_os_64 = (sizeof(void *) == 8);
1521         uint64_t total_memory;
1522         uint64_t dram_size_seven_GB = 0x1B8000000;
1523         uint64_t dram_size_three_GB = 0xB8000000;
1524
1525         if (amdgpu_smu_memory_pool_size == 0)
1526                 return;
1527
1528         if (!is_os_64) {
1529                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1530                 goto def_value;
1531         }
1532         si_meminfo(&si);
1533         total_memory = (uint64_t)si.totalram * si.mem_unit;
1534
1535         if ((amdgpu_smu_memory_pool_size == 1) ||
1536                 (amdgpu_smu_memory_pool_size == 2)) {
1537                 if (total_memory < dram_size_three_GB)
1538                         goto def_value1;
1539         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1540                 (amdgpu_smu_memory_pool_size == 8)) {
1541                 if (total_memory < dram_size_seven_GB)
1542                         goto def_value1;
1543         } else {
1544                 DRM_WARN("Smu memory pool size not supported\n");
1545                 goto def_value;
1546         }
1547         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1548
1549         return;
1550
1551 def_value1:
1552         DRM_WARN("No enough system memory\n");
1553 def_value:
1554         adev->pm.smu_prv_buffer_size = 0;
1555 }
1556
1557 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1558 {
1559         if (!(adev->flags & AMD_IS_APU) ||
1560             adev->asic_type < CHIP_RAVEN)
1561                 return 0;
1562
1563         switch (adev->asic_type) {
1564         case CHIP_RAVEN:
1565                 if (adev->pdev->device == 0x15dd)
1566                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1567                 if (adev->pdev->device == 0x15d8)
1568                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1569                 break;
1570         case CHIP_RENOIR:
1571                 if ((adev->pdev->device == 0x1636) ||
1572                     (adev->pdev->device == 0x164c))
1573                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1574                 else
1575                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1576                 break;
1577         case CHIP_VANGOGH:
1578                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1579                 break;
1580         case CHIP_YELLOW_CARP:
1581                 break;
1582         case CHIP_CYAN_SKILLFISH:
1583                 if ((adev->pdev->device == 0x13FE) ||
1584                     (adev->pdev->device == 0x143F))
1585                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1586                 break;
1587         default:
1588                 break;
1589         }
1590
1591         return 0;
1592 }
1593
1594 /**
1595  * amdgpu_device_check_arguments - validate module params
1596  *
1597  * @adev: amdgpu_device pointer
1598  *
1599  * Validates certain module parameters and updates
1600  * the associated values used by the driver (all asics).
1601  */
1602 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1603 {
1604         if (amdgpu_sched_jobs < 4) {
1605                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1606                          amdgpu_sched_jobs);
1607                 amdgpu_sched_jobs = 4;
1608         } else if (!is_power_of_2(amdgpu_sched_jobs)) {
1609                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1610                          amdgpu_sched_jobs);
1611                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1612         }
1613
1614         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1615                 /* gart size must be greater or equal to 32M */
1616                 dev_warn(adev->dev, "gart size (%d) too small\n",
1617                          amdgpu_gart_size);
1618                 amdgpu_gart_size = -1;
1619         }
1620
1621         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1622                 /* gtt size must be greater or equal to 32M */
1623                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1624                                  amdgpu_gtt_size);
1625                 amdgpu_gtt_size = -1;
1626         }
1627
1628         /* valid range is between 4 and 9 inclusive */
1629         if (amdgpu_vm_fragment_size != -1 &&
1630             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1631                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1632                 amdgpu_vm_fragment_size = -1;
1633         }
1634
1635         if (amdgpu_sched_hw_submission < 2) {
1636                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1637                          amdgpu_sched_hw_submission);
1638                 amdgpu_sched_hw_submission = 2;
1639         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1640                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1641                          amdgpu_sched_hw_submission);
1642                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1643         }
1644
1645         if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1646                 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1647                 amdgpu_reset_method = -1;
1648         }
1649
1650         amdgpu_device_check_smu_prv_buffer_size(adev);
1651
1652         amdgpu_device_check_vm_size(adev);
1653
1654         amdgpu_device_check_block_size(adev);
1655
1656         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1657
1658         return 0;
1659 }
1660
1661 /**
1662  * amdgpu_switcheroo_set_state - set switcheroo state
1663  *
1664  * @pdev: pci dev pointer
1665  * @state: vga_switcheroo state
1666  *
1667  * Callback for the switcheroo driver.  Suspends or resumes
1668  * the asics before or after it is powered up using ACPI methods.
1669  */
1670 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1671                                         enum vga_switcheroo_state state)
1672 {
1673         struct drm_device *dev = pci_get_drvdata(pdev);
1674         int r;
1675
1676         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1677                 return;
1678
1679         if (state == VGA_SWITCHEROO_ON) {
1680                 pr_info("switched on\n");
1681                 /* don't suspend or resume card normally */
1682                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1683
1684                 pci_set_power_state(pdev, PCI_D0);
1685                 amdgpu_device_load_pci_state(pdev);
1686                 r = pci_enable_device(pdev);
1687                 if (r)
1688                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1689                 amdgpu_device_resume(dev, true);
1690
1691                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1692         } else {
1693                 pr_info("switched off\n");
1694                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1695                 amdgpu_device_suspend(dev, true);
1696                 amdgpu_device_cache_pci_state(pdev);
1697                 /* Shut down the device */
1698                 pci_disable_device(pdev);
1699                 pci_set_power_state(pdev, PCI_D3cold);
1700                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1701         }
1702 }
1703
1704 /**
1705  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1706  *
1707  * @pdev: pci dev pointer
1708  *
1709  * Callback for the switcheroo driver.  Check of the switcheroo
1710  * state can be changed.
1711  * Returns true if the state can be changed, false if not.
1712  */
1713 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1714 {
1715         struct drm_device *dev = pci_get_drvdata(pdev);
1716
1717        /*
1718         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1719         * locking inversion with the driver load path. And the access here is
1720         * completely racy anyway. So don't bother with locking for now.
1721         */
1722         return atomic_read(&dev->open_count) == 0;
1723 }
1724
1725 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1726         .set_gpu_state = amdgpu_switcheroo_set_state,
1727         .reprobe = NULL,
1728         .can_switch = amdgpu_switcheroo_can_switch,
1729 };
1730
1731 /**
1732  * amdgpu_device_ip_set_clockgating_state - set the CG state
1733  *
1734  * @dev: amdgpu_device pointer
1735  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1736  * @state: clockgating state (gate or ungate)
1737  *
1738  * Sets the requested clockgating state for all instances of
1739  * the hardware IP specified.
1740  * Returns the error code from the last instance.
1741  */
1742 int amdgpu_device_ip_set_clockgating_state(void *dev,
1743                                            enum amd_ip_block_type block_type,
1744                                            enum amd_clockgating_state state)
1745 {
1746         struct amdgpu_device *adev = dev;
1747         int i, r = 0;
1748
1749         for (i = 0; i < adev->num_ip_blocks; i++) {
1750                 if (!adev->ip_blocks[i].status.valid)
1751                         continue;
1752                 if (adev->ip_blocks[i].version->type != block_type)
1753                         continue;
1754                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1755                         continue;
1756                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1757                         (void *)adev, state);
1758                 if (r)
1759                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1760                                   adev->ip_blocks[i].version->funcs->name, r);
1761         }
1762         return r;
1763 }
1764
1765 /**
1766  * amdgpu_device_ip_set_powergating_state - set the PG state
1767  *
1768  * @dev: amdgpu_device pointer
1769  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1770  * @state: powergating state (gate or ungate)
1771  *
1772  * Sets the requested powergating state for all instances of
1773  * the hardware IP specified.
1774  * Returns the error code from the last instance.
1775  */
1776 int amdgpu_device_ip_set_powergating_state(void *dev,
1777                                            enum amd_ip_block_type block_type,
1778                                            enum amd_powergating_state state)
1779 {
1780         struct amdgpu_device *adev = dev;
1781         int i, r = 0;
1782
1783         for (i = 0; i < adev->num_ip_blocks; i++) {
1784                 if (!adev->ip_blocks[i].status.valid)
1785                         continue;
1786                 if (adev->ip_blocks[i].version->type != block_type)
1787                         continue;
1788                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1789                         continue;
1790                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1791                         (void *)adev, state);
1792                 if (r)
1793                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1794                                   adev->ip_blocks[i].version->funcs->name, r);
1795         }
1796         return r;
1797 }
1798
1799 /**
1800  * amdgpu_device_ip_get_clockgating_state - get the CG state
1801  *
1802  * @adev: amdgpu_device pointer
1803  * @flags: clockgating feature flags
1804  *
1805  * Walks the list of IPs on the device and updates the clockgating
1806  * flags for each IP.
1807  * Updates @flags with the feature flags for each hardware IP where
1808  * clockgating is enabled.
1809  */
1810 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1811                                             u64 *flags)
1812 {
1813         int i;
1814
1815         for (i = 0; i < adev->num_ip_blocks; i++) {
1816                 if (!adev->ip_blocks[i].status.valid)
1817                         continue;
1818                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1819                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1820         }
1821 }
1822
1823 /**
1824  * amdgpu_device_ip_wait_for_idle - wait for idle
1825  *
1826  * @adev: amdgpu_device pointer
1827  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1828  *
1829  * Waits for the request hardware IP to be idle.
1830  * Returns 0 for success or a negative error code on failure.
1831  */
1832 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1833                                    enum amd_ip_block_type block_type)
1834 {
1835         int i, r;
1836
1837         for (i = 0; i < adev->num_ip_blocks; i++) {
1838                 if (!adev->ip_blocks[i].status.valid)
1839                         continue;
1840                 if (adev->ip_blocks[i].version->type == block_type) {
1841                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1842                         if (r)
1843                                 return r;
1844                         break;
1845                 }
1846         }
1847         return 0;
1848
1849 }
1850
1851 /**
1852  * amdgpu_device_ip_is_idle - is the hardware IP idle
1853  *
1854  * @adev: amdgpu_device pointer
1855  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1856  *
1857  * Check if the hardware IP is idle or not.
1858  * Returns true if it the IP is idle, false if not.
1859  */
1860 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1861                               enum amd_ip_block_type block_type)
1862 {
1863         int i;
1864
1865         for (i = 0; i < adev->num_ip_blocks; i++) {
1866                 if (!adev->ip_blocks[i].status.valid)
1867                         continue;
1868                 if (adev->ip_blocks[i].version->type == block_type)
1869                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1870         }
1871         return true;
1872
1873 }
1874
1875 /**
1876  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1877  *
1878  * @adev: amdgpu_device pointer
1879  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1880  *
1881  * Returns a pointer to the hardware IP block structure
1882  * if it exists for the asic, otherwise NULL.
1883  */
1884 struct amdgpu_ip_block *
1885 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1886                               enum amd_ip_block_type type)
1887 {
1888         int i;
1889
1890         for (i = 0; i < adev->num_ip_blocks; i++)
1891                 if (adev->ip_blocks[i].version->type == type)
1892                         return &adev->ip_blocks[i];
1893
1894         return NULL;
1895 }
1896
1897 /**
1898  * amdgpu_device_ip_block_version_cmp
1899  *
1900  * @adev: amdgpu_device pointer
1901  * @type: enum amd_ip_block_type
1902  * @major: major version
1903  * @minor: minor version
1904  *
1905  * return 0 if equal or greater
1906  * return 1 if smaller or the ip_block doesn't exist
1907  */
1908 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1909                                        enum amd_ip_block_type type,
1910                                        u32 major, u32 minor)
1911 {
1912         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1913
1914         if (ip_block && ((ip_block->version->major > major) ||
1915                         ((ip_block->version->major == major) &&
1916                         (ip_block->version->minor >= minor))))
1917                 return 0;
1918
1919         return 1;
1920 }
1921
1922 /**
1923  * amdgpu_device_ip_block_add
1924  *
1925  * @adev: amdgpu_device pointer
1926  * @ip_block_version: pointer to the IP to add
1927  *
1928  * Adds the IP block driver information to the collection of IPs
1929  * on the asic.
1930  */
1931 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1932                                const struct amdgpu_ip_block_version *ip_block_version)
1933 {
1934         if (!ip_block_version)
1935                 return -EINVAL;
1936
1937         switch (ip_block_version->type) {
1938         case AMD_IP_BLOCK_TYPE_VCN:
1939                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1940                         return 0;
1941                 break;
1942         case AMD_IP_BLOCK_TYPE_JPEG:
1943                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1944                         return 0;
1945                 break;
1946         default:
1947                 break;
1948         }
1949
1950         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1951                   ip_block_version->funcs->name);
1952
1953         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1954
1955         return 0;
1956 }
1957
1958 /**
1959  * amdgpu_device_enable_virtual_display - enable virtual display feature
1960  *
1961  * @adev: amdgpu_device pointer
1962  *
1963  * Enabled the virtual display feature if the user has enabled it via
1964  * the module parameter virtual_display.  This feature provides a virtual
1965  * display hardware on headless boards or in virtualized environments.
1966  * This function parses and validates the configuration string specified by
1967  * the user and configues the virtual display configuration (number of
1968  * virtual connectors, crtcs, etc.) specified.
1969  */
1970 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1971 {
1972         adev->enable_virtual_display = false;
1973
1974         if (amdgpu_virtual_display) {
1975                 const char *pci_address_name = pci_name(adev->pdev);
1976                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1977
1978                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1979                 pciaddstr_tmp = pciaddstr;
1980                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1981                         pciaddname = strsep(&pciaddname_tmp, ",");
1982                         if (!strcmp("all", pciaddname)
1983                             || !strcmp(pci_address_name, pciaddname)) {
1984                                 long num_crtc;
1985                                 int res = -1;
1986
1987                                 adev->enable_virtual_display = true;
1988
1989                                 if (pciaddname_tmp)
1990                                         res = kstrtol(pciaddname_tmp, 10,
1991                                                       &num_crtc);
1992
1993                                 if (!res) {
1994                                         if (num_crtc < 1)
1995                                                 num_crtc = 1;
1996                                         if (num_crtc > 6)
1997                                                 num_crtc = 6;
1998                                         adev->mode_info.num_crtc = num_crtc;
1999                                 } else {
2000                                         adev->mode_info.num_crtc = 1;
2001                                 }
2002                                 break;
2003                         }
2004                 }
2005
2006                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
2007                          amdgpu_virtual_display, pci_address_name,
2008                          adev->enable_virtual_display, adev->mode_info.num_crtc);
2009
2010                 kfree(pciaddstr);
2011         }
2012 }
2013
2014 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2015 {
2016         if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2017                 adev->mode_info.num_crtc = 1;
2018                 adev->enable_virtual_display = true;
2019                 DRM_INFO("virtual_display:%d, num_crtc:%d\n",
2020                          adev->enable_virtual_display, adev->mode_info.num_crtc);
2021         }
2022 }
2023
2024 /**
2025  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2026  *
2027  * @adev: amdgpu_device pointer
2028  *
2029  * Parses the asic configuration parameters specified in the gpu info
2030  * firmware and makes them availale to the driver for use in configuring
2031  * the asic.
2032  * Returns 0 on success, -EINVAL on failure.
2033  */
2034 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2035 {
2036         const char *chip_name;
2037         char fw_name[40];
2038         int err;
2039         const struct gpu_info_firmware_header_v1_0 *hdr;
2040
2041         adev->firmware.gpu_info_fw = NULL;
2042
2043         if (adev->mman.discovery_bin) {
2044                 /*
2045                  * FIXME: The bounding box is still needed by Navi12, so
2046                  * temporarily read it from gpu_info firmware. Should be dropped
2047                  * when DAL no longer needs it.
2048                  */
2049                 if (adev->asic_type != CHIP_NAVI12)
2050                         return 0;
2051         }
2052
2053         switch (adev->asic_type) {
2054         default:
2055                 return 0;
2056         case CHIP_VEGA10:
2057                 chip_name = "vega10";
2058                 break;
2059         case CHIP_VEGA12:
2060                 chip_name = "vega12";
2061                 break;
2062         case CHIP_RAVEN:
2063                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2064                         chip_name = "raven2";
2065                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2066                         chip_name = "picasso";
2067                 else
2068                         chip_name = "raven";
2069                 break;
2070         case CHIP_ARCTURUS:
2071                 chip_name = "arcturus";
2072                 break;
2073         case CHIP_NAVI12:
2074                 chip_name = "navi12";
2075                 break;
2076         }
2077
2078         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2079         err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
2080         if (err) {
2081                 dev_err(adev->dev,
2082                         "Failed to get gpu_info firmware \"%s\"\n",
2083                         fw_name);
2084                 goto out;
2085         }
2086
2087         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2088         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2089
2090         switch (hdr->version_major) {
2091         case 1:
2092         {
2093                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2094                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2095                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2096
2097                 /*
2098                  * Should be droped when DAL no longer needs it.
2099                  */
2100                 if (adev->asic_type == CHIP_NAVI12)
2101                         goto parse_soc_bounding_box;
2102
2103                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2104                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2105                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2106                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2107                 adev->gfx.config.max_texture_channel_caches =
2108                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2109                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2110                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2111                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2112                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2113                 adev->gfx.config.double_offchip_lds_buf =
2114                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2115                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2116                 adev->gfx.cu_info.max_waves_per_simd =
2117                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2118                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2119                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2120                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2121                 if (hdr->version_minor >= 1) {
2122                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2123                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2124                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2125                         adev->gfx.config.num_sc_per_sh =
2126                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2127                         adev->gfx.config.num_packer_per_sc =
2128                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2129                 }
2130
2131 parse_soc_bounding_box:
2132                 /*
2133                  * soc bounding box info is not integrated in disocovery table,
2134                  * we always need to parse it from gpu info firmware if needed.
2135                  */
2136                 if (hdr->version_minor == 2) {
2137                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2138                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2139                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2140                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2141                 }
2142                 break;
2143         }
2144         default:
2145                 dev_err(adev->dev,
2146                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2147                 err = -EINVAL;
2148                 goto out;
2149         }
2150 out:
2151         return err;
2152 }
2153
2154 /**
2155  * amdgpu_device_ip_early_init - run early init for hardware IPs
2156  *
2157  * @adev: amdgpu_device pointer
2158  *
2159  * Early initialization pass for hardware IPs.  The hardware IPs that make
2160  * up each asic are discovered each IP's early_init callback is run.  This
2161  * is the first stage in initializing the asic.
2162  * Returns 0 on success, negative error code on failure.
2163  */
2164 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2165 {
2166         struct drm_device *dev = adev_to_drm(adev);
2167         struct pci_dev *parent;
2168         int i, r;
2169         bool total;
2170
2171         amdgpu_device_enable_virtual_display(adev);
2172
2173         if (amdgpu_sriov_vf(adev)) {
2174                 r = amdgpu_virt_request_full_gpu(adev, true);
2175                 if (r)
2176                         return r;
2177         }
2178
2179         switch (adev->asic_type) {
2180 #ifdef CONFIG_DRM_AMDGPU_SI
2181         case CHIP_VERDE:
2182         case CHIP_TAHITI:
2183         case CHIP_PITCAIRN:
2184         case CHIP_OLAND:
2185         case CHIP_HAINAN:
2186                 adev->family = AMDGPU_FAMILY_SI;
2187                 r = si_set_ip_blocks(adev);
2188                 if (r)
2189                         return r;
2190                 break;
2191 #endif
2192 #ifdef CONFIG_DRM_AMDGPU_CIK
2193         case CHIP_BONAIRE:
2194         case CHIP_HAWAII:
2195         case CHIP_KAVERI:
2196         case CHIP_KABINI:
2197         case CHIP_MULLINS:
2198                 if (adev->flags & AMD_IS_APU)
2199                         adev->family = AMDGPU_FAMILY_KV;
2200                 else
2201                         adev->family = AMDGPU_FAMILY_CI;
2202
2203                 r = cik_set_ip_blocks(adev);
2204                 if (r)
2205                         return r;
2206                 break;
2207 #endif
2208         case CHIP_TOPAZ:
2209         case CHIP_TONGA:
2210         case CHIP_FIJI:
2211         case CHIP_POLARIS10:
2212         case CHIP_POLARIS11:
2213         case CHIP_POLARIS12:
2214         case CHIP_VEGAM:
2215         case CHIP_CARRIZO:
2216         case CHIP_STONEY:
2217                 if (adev->flags & AMD_IS_APU)
2218                         adev->family = AMDGPU_FAMILY_CZ;
2219                 else
2220                         adev->family = AMDGPU_FAMILY_VI;
2221
2222                 r = vi_set_ip_blocks(adev);
2223                 if (r)
2224                         return r;
2225                 break;
2226         default:
2227                 r = amdgpu_discovery_set_ip_blocks(adev);
2228                 if (r)
2229                         return r;
2230                 break;
2231         }
2232
2233         if (amdgpu_has_atpx() &&
2234             (amdgpu_is_atpx_hybrid() ||
2235              amdgpu_has_atpx_dgpu_power_cntl()) &&
2236             ((adev->flags & AMD_IS_APU) == 0) &&
2237             !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2238                 adev->flags |= AMD_IS_PX;
2239
2240         if (!(adev->flags & AMD_IS_APU)) {
2241                 parent = pcie_find_root_port(adev->pdev);
2242                 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2243         }
2244
2245
2246         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2247         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2248                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2249         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2250                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2251
2252         total = true;
2253         for (i = 0; i < adev->num_ip_blocks; i++) {
2254                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2255                         DRM_WARN("disabled ip block: %d <%s>\n",
2256                                   i, adev->ip_blocks[i].version->funcs->name);
2257                         adev->ip_blocks[i].status.valid = false;
2258                 } else {
2259                         if (adev->ip_blocks[i].version->funcs->early_init) {
2260                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2261                                 if (r == -ENOENT) {
2262                                         adev->ip_blocks[i].status.valid = false;
2263                                 } else if (r) {
2264                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2265                                                   adev->ip_blocks[i].version->funcs->name, r);
2266                                         total = false;
2267                                 } else {
2268                                         adev->ip_blocks[i].status.valid = true;
2269                                 }
2270                         } else {
2271                                 adev->ip_blocks[i].status.valid = true;
2272                         }
2273                 }
2274                 /* get the vbios after the asic_funcs are set up */
2275                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2276                         r = amdgpu_device_parse_gpu_info_fw(adev);
2277                         if (r)
2278                                 return r;
2279
2280                         /* Read BIOS */
2281                         if (amdgpu_device_read_bios(adev)) {
2282                                 if (!amdgpu_get_bios(adev))
2283                                         return -EINVAL;
2284
2285                                 r = amdgpu_atombios_init(adev);
2286                                 if (r) {
2287                                         dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2288                                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2289                                         return r;
2290                                 }
2291                         }
2292
2293                         /*get pf2vf msg info at it's earliest time*/
2294                         if (amdgpu_sriov_vf(adev))
2295                                 amdgpu_virt_init_data_exchange(adev);
2296
2297                 }
2298         }
2299         if (!total)
2300                 return -ENODEV;
2301
2302         amdgpu_amdkfd_device_probe(adev);
2303         adev->cg_flags &= amdgpu_cg_mask;
2304         adev->pg_flags &= amdgpu_pg_mask;
2305
2306         return 0;
2307 }
2308
2309 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2310 {
2311         int i, r;
2312
2313         for (i = 0; i < adev->num_ip_blocks; i++) {
2314                 if (!adev->ip_blocks[i].status.sw)
2315                         continue;
2316                 if (adev->ip_blocks[i].status.hw)
2317                         continue;
2318                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2319                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2320                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2321                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2322                         if (r) {
2323                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2324                                           adev->ip_blocks[i].version->funcs->name, r);
2325                                 return r;
2326                         }
2327                         adev->ip_blocks[i].status.hw = true;
2328                 }
2329         }
2330
2331         return 0;
2332 }
2333
2334 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2335 {
2336         int i, r;
2337
2338         for (i = 0; i < adev->num_ip_blocks; i++) {
2339                 if (!adev->ip_blocks[i].status.sw)
2340                         continue;
2341                 if (adev->ip_blocks[i].status.hw)
2342                         continue;
2343                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2344                 if (r) {
2345                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2346                                   adev->ip_blocks[i].version->funcs->name, r);
2347                         return r;
2348                 }
2349                 adev->ip_blocks[i].status.hw = true;
2350         }
2351
2352         return 0;
2353 }
2354
2355 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2356 {
2357         int r = 0;
2358         int i;
2359         uint32_t smu_version;
2360
2361         if (adev->asic_type >= CHIP_VEGA10) {
2362                 for (i = 0; i < adev->num_ip_blocks; i++) {
2363                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2364                                 continue;
2365
2366                         if (!adev->ip_blocks[i].status.sw)
2367                                 continue;
2368
2369                         /* no need to do the fw loading again if already done*/
2370                         if (adev->ip_blocks[i].status.hw == true)
2371                                 break;
2372
2373                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2374                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2375                                 if (r) {
2376                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2377                                                           adev->ip_blocks[i].version->funcs->name, r);
2378                                         return r;
2379                                 }
2380                         } else {
2381                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2382                                 if (r) {
2383                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2384                                                           adev->ip_blocks[i].version->funcs->name, r);
2385                                         return r;
2386                                 }
2387                         }
2388
2389                         adev->ip_blocks[i].status.hw = true;
2390                         break;
2391                 }
2392         }
2393
2394         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2395                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2396
2397         return r;
2398 }
2399
2400 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2401 {
2402         long timeout;
2403         int r, i;
2404
2405         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2406                 struct amdgpu_ring *ring = adev->rings[i];
2407
2408                 /* No need to setup the GPU scheduler for rings that don't need it */
2409                 if (!ring || ring->no_scheduler)
2410                         continue;
2411
2412                 switch (ring->funcs->type) {
2413                 case AMDGPU_RING_TYPE_GFX:
2414                         timeout = adev->gfx_timeout;
2415                         break;
2416                 case AMDGPU_RING_TYPE_COMPUTE:
2417                         timeout = adev->compute_timeout;
2418                         break;
2419                 case AMDGPU_RING_TYPE_SDMA:
2420                         timeout = adev->sdma_timeout;
2421                         break;
2422                 default:
2423                         timeout = adev->video_timeout;
2424                         break;
2425                 }
2426
2427                 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2428                                    ring->num_hw_submission, 0,
2429                                    timeout, adev->reset_domain->wq,
2430                                    ring->sched_score, ring->name,
2431                                    adev->dev);
2432                 if (r) {
2433                         DRM_ERROR("Failed to create scheduler on ring %s.\n",
2434                                   ring->name);
2435                         return r;
2436                 }
2437         }
2438
2439         amdgpu_xcp_update_partition_sched_list(adev);
2440
2441         return 0;
2442 }
2443
2444
2445 /**
2446  * amdgpu_device_ip_init - run init for hardware IPs
2447  *
2448  * @adev: amdgpu_device pointer
2449  *
2450  * Main initialization pass for hardware IPs.  The list of all the hardware
2451  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2452  * are run.  sw_init initializes the software state associated with each IP
2453  * and hw_init initializes the hardware associated with each IP.
2454  * Returns 0 on success, negative error code on failure.
2455  */
2456 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2457 {
2458         int i, r;
2459
2460         r = amdgpu_ras_init(adev);
2461         if (r)
2462                 return r;
2463
2464         for (i = 0; i < adev->num_ip_blocks; i++) {
2465                 if (!adev->ip_blocks[i].status.valid)
2466                         continue;
2467                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2468                 if (r) {
2469                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2470                                   adev->ip_blocks[i].version->funcs->name, r);
2471                         goto init_failed;
2472                 }
2473                 adev->ip_blocks[i].status.sw = true;
2474
2475                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2476                         /* need to do common hw init early so everything is set up for gmc */
2477                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2478                         if (r) {
2479                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2480                                 goto init_failed;
2481                         }
2482                         adev->ip_blocks[i].status.hw = true;
2483                 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2484                         /* need to do gmc hw init early so we can allocate gpu mem */
2485                         /* Try to reserve bad pages early */
2486                         if (amdgpu_sriov_vf(adev))
2487                                 amdgpu_virt_exchange_data(adev);
2488
2489                         r = amdgpu_device_mem_scratch_init(adev);
2490                         if (r) {
2491                                 DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2492                                 goto init_failed;
2493                         }
2494                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2495                         if (r) {
2496                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2497                                 goto init_failed;
2498                         }
2499                         r = amdgpu_device_wb_init(adev);
2500                         if (r) {
2501                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2502                                 goto init_failed;
2503                         }
2504                         adev->ip_blocks[i].status.hw = true;
2505
2506                         /* right after GMC hw init, we create CSA */
2507                         if (adev->gfx.mcbp) {
2508                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2509                                                                AMDGPU_GEM_DOMAIN_VRAM |
2510                                                                AMDGPU_GEM_DOMAIN_GTT,
2511                                                                AMDGPU_CSA_SIZE);
2512                                 if (r) {
2513                                         DRM_ERROR("allocate CSA failed %d\n", r);
2514                                         goto init_failed;
2515                                 }
2516                         }
2517                 }
2518         }
2519
2520         if (amdgpu_sriov_vf(adev))
2521                 amdgpu_virt_init_data_exchange(adev);
2522
2523         r = amdgpu_ib_pool_init(adev);
2524         if (r) {
2525                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2526                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2527                 goto init_failed;
2528         }
2529
2530         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2531         if (r)
2532                 goto init_failed;
2533
2534         r = amdgpu_device_ip_hw_init_phase1(adev);
2535         if (r)
2536                 goto init_failed;
2537
2538         r = amdgpu_device_fw_loading(adev);
2539         if (r)
2540                 goto init_failed;
2541
2542         r = amdgpu_device_ip_hw_init_phase2(adev);
2543         if (r)
2544                 goto init_failed;
2545
2546         /*
2547          * retired pages will be loaded from eeprom and reserved here,
2548          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2549          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2550          * for I2C communication which only true at this point.
2551          *
2552          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2553          * failure from bad gpu situation and stop amdgpu init process
2554          * accordingly. For other failed cases, it will still release all
2555          * the resource and print error message, rather than returning one
2556          * negative value to upper level.
2557          *
2558          * Note: theoretically, this should be called before all vram allocations
2559          * to protect retired page from abusing
2560          */
2561         r = amdgpu_ras_recovery_init(adev);
2562         if (r)
2563                 goto init_failed;
2564
2565         /**
2566          * In case of XGMI grab extra reference for reset domain for this device
2567          */
2568         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2569                 if (amdgpu_xgmi_add_device(adev) == 0) {
2570                         if (!amdgpu_sriov_vf(adev)) {
2571                                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2572
2573                                 if (WARN_ON(!hive)) {
2574                                         r = -ENOENT;
2575                                         goto init_failed;
2576                                 }
2577
2578                                 if (!hive->reset_domain ||
2579                                     !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2580                                         r = -ENOENT;
2581                                         amdgpu_put_xgmi_hive(hive);
2582                                         goto init_failed;
2583                                 }
2584
2585                                 /* Drop the early temporary reset domain we created for device */
2586                                 amdgpu_reset_put_reset_domain(adev->reset_domain);
2587                                 adev->reset_domain = hive->reset_domain;
2588                                 amdgpu_put_xgmi_hive(hive);
2589                         }
2590                 }
2591         }
2592
2593         r = amdgpu_device_init_schedulers(adev);
2594         if (r)
2595                 goto init_failed;
2596
2597         /* Don't init kfd if whole hive need to be reset during init */
2598         if (!adev->gmc.xgmi.pending_reset) {
2599                 kgd2kfd_init_zone_device(adev);
2600                 amdgpu_amdkfd_device_init(adev);
2601         }
2602
2603         amdgpu_fru_get_product_info(adev);
2604
2605 init_failed:
2606
2607         return r;
2608 }
2609
2610 /**
2611  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2612  *
2613  * @adev: amdgpu_device pointer
2614  *
2615  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2616  * this function before a GPU reset.  If the value is retained after a
2617  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2618  */
2619 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2620 {
2621         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2622 }
2623
2624 /**
2625  * amdgpu_device_check_vram_lost - check if vram is valid
2626  *
2627  * @adev: amdgpu_device pointer
2628  *
2629  * Checks the reset magic value written to the gart pointer in VRAM.
2630  * The driver calls this after a GPU reset to see if the contents of
2631  * VRAM is lost or now.
2632  * returns true if vram is lost, false if not.
2633  */
2634 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2635 {
2636         if (memcmp(adev->gart.ptr, adev->reset_magic,
2637                         AMDGPU_RESET_MAGIC_NUM))
2638                 return true;
2639
2640         if (!amdgpu_in_reset(adev))
2641                 return false;
2642
2643         /*
2644          * For all ASICs with baco/mode1 reset, the VRAM is
2645          * always assumed to be lost.
2646          */
2647         switch (amdgpu_asic_reset_method(adev)) {
2648         case AMD_RESET_METHOD_BACO:
2649         case AMD_RESET_METHOD_MODE1:
2650                 return true;
2651         default:
2652                 return false;
2653         }
2654 }
2655
2656 /**
2657  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2658  *
2659  * @adev: amdgpu_device pointer
2660  * @state: clockgating state (gate or ungate)
2661  *
2662  * The list of all the hardware IPs that make up the asic is walked and the
2663  * set_clockgating_state callbacks are run.
2664  * Late initialization pass enabling clockgating for hardware IPs.
2665  * Fini or suspend, pass disabling clockgating for hardware IPs.
2666  * Returns 0 on success, negative error code on failure.
2667  */
2668
2669 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2670                                enum amd_clockgating_state state)
2671 {
2672         int i, j, r;
2673
2674         if (amdgpu_emu_mode == 1)
2675                 return 0;
2676
2677         for (j = 0; j < adev->num_ip_blocks; j++) {
2678                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2679                 if (!adev->ip_blocks[i].status.late_initialized)
2680                         continue;
2681                 /* skip CG for GFX, SDMA on S0ix */
2682                 if (adev->in_s0ix &&
2683                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2684                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2685                         continue;
2686                 /* skip CG for VCE/UVD, it's handled specially */
2687                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2688                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2689                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2690                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2691                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2692                         /* enable clockgating to save power */
2693                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2694                                                                                      state);
2695                         if (r) {
2696                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2697                                           adev->ip_blocks[i].version->funcs->name, r);
2698                                 return r;
2699                         }
2700                 }
2701         }
2702
2703         return 0;
2704 }
2705
2706 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2707                                enum amd_powergating_state state)
2708 {
2709         int i, j, r;
2710
2711         if (amdgpu_emu_mode == 1)
2712                 return 0;
2713
2714         for (j = 0; j < adev->num_ip_blocks; j++) {
2715                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2716                 if (!adev->ip_blocks[i].status.late_initialized)
2717                         continue;
2718                 /* skip PG for GFX, SDMA on S0ix */
2719                 if (adev->in_s0ix &&
2720                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2721                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2722                         continue;
2723                 /* skip CG for VCE/UVD, it's handled specially */
2724                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2725                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2726                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2727                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2728                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2729                         /* enable powergating to save power */
2730                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2731                                                                                         state);
2732                         if (r) {
2733                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2734                                           adev->ip_blocks[i].version->funcs->name, r);
2735                                 return r;
2736                         }
2737                 }
2738         }
2739         return 0;
2740 }
2741
2742 static int amdgpu_device_enable_mgpu_fan_boost(void)
2743 {
2744         struct amdgpu_gpu_instance *gpu_ins;
2745         struct amdgpu_device *adev;
2746         int i, ret = 0;
2747
2748         mutex_lock(&mgpu_info.mutex);
2749
2750         /*
2751          * MGPU fan boost feature should be enabled
2752          * only when there are two or more dGPUs in
2753          * the system
2754          */
2755         if (mgpu_info.num_dgpu < 2)
2756                 goto out;
2757
2758         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2759                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2760                 adev = gpu_ins->adev;
2761                 if (!(adev->flags & AMD_IS_APU) &&
2762                     !gpu_ins->mgpu_fan_enabled) {
2763                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2764                         if (ret)
2765                                 break;
2766
2767                         gpu_ins->mgpu_fan_enabled = 1;
2768                 }
2769         }
2770
2771 out:
2772         mutex_unlock(&mgpu_info.mutex);
2773
2774         return ret;
2775 }
2776
2777 /**
2778  * amdgpu_device_ip_late_init - run late init for hardware IPs
2779  *
2780  * @adev: amdgpu_device pointer
2781  *
2782  * Late initialization pass for hardware IPs.  The list of all the hardware
2783  * IPs that make up the asic is walked and the late_init callbacks are run.
2784  * late_init covers any special initialization that an IP requires
2785  * after all of the have been initialized or something that needs to happen
2786  * late in the init process.
2787  * Returns 0 on success, negative error code on failure.
2788  */
2789 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2790 {
2791         struct amdgpu_gpu_instance *gpu_instance;
2792         int i = 0, r;
2793
2794         for (i = 0; i < adev->num_ip_blocks; i++) {
2795                 if (!adev->ip_blocks[i].status.hw)
2796                         continue;
2797                 if (adev->ip_blocks[i].version->funcs->late_init) {
2798                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2799                         if (r) {
2800                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2801                                           adev->ip_blocks[i].version->funcs->name, r);
2802                                 return r;
2803                         }
2804                 }
2805                 adev->ip_blocks[i].status.late_initialized = true;
2806         }
2807
2808         r = amdgpu_ras_late_init(adev);
2809         if (r) {
2810                 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2811                 return r;
2812         }
2813
2814         amdgpu_ras_set_error_query_ready(adev, true);
2815
2816         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2817         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2818
2819         amdgpu_device_fill_reset_magic(adev);
2820
2821         r = amdgpu_device_enable_mgpu_fan_boost();
2822         if (r)
2823                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2824
2825         /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2826         if (amdgpu_passthrough(adev) &&
2827             ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
2828              adev->asic_type == CHIP_ALDEBARAN))
2829                 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2830
2831         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2832                 mutex_lock(&mgpu_info.mutex);
2833
2834                 /*
2835                  * Reset device p-state to low as this was booted with high.
2836                  *
2837                  * This should be performed only after all devices from the same
2838                  * hive get initialized.
2839                  *
2840                  * However, it's unknown how many device in the hive in advance.
2841                  * As this is counted one by one during devices initializations.
2842                  *
2843                  * So, we wait for all XGMI interlinked devices initialized.
2844                  * This may bring some delays as those devices may come from
2845                  * different hives. But that should be OK.
2846                  */
2847                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2848                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2849                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2850                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2851                                         continue;
2852
2853                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2854                                                 AMDGPU_XGMI_PSTATE_MIN);
2855                                 if (r) {
2856                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2857                                         break;
2858                                 }
2859                         }
2860                 }
2861
2862                 mutex_unlock(&mgpu_info.mutex);
2863         }
2864
2865         return 0;
2866 }
2867
2868 /**
2869  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2870  *
2871  * @adev: amdgpu_device pointer
2872  *
2873  * For ASICs need to disable SMC first
2874  */
2875 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2876 {
2877         int i, r;
2878
2879         if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
2880                 return;
2881
2882         for (i = 0; i < adev->num_ip_blocks; i++) {
2883                 if (!adev->ip_blocks[i].status.hw)
2884                         continue;
2885                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2886                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2887                         /* XXX handle errors */
2888                         if (r) {
2889                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2890                                           adev->ip_blocks[i].version->funcs->name, r);
2891                         }
2892                         adev->ip_blocks[i].status.hw = false;
2893                         break;
2894                 }
2895         }
2896 }
2897
2898 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2899 {
2900         int i, r;
2901
2902         for (i = 0; i < adev->num_ip_blocks; i++) {
2903                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2904                         continue;
2905
2906                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2907                 if (r) {
2908                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2909                                   adev->ip_blocks[i].version->funcs->name, r);
2910                 }
2911         }
2912
2913         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2914         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2915
2916         amdgpu_amdkfd_suspend(adev, false);
2917
2918         /* Workaroud for ASICs need to disable SMC first */
2919         amdgpu_device_smu_fini_early(adev);
2920
2921         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2922                 if (!adev->ip_blocks[i].status.hw)
2923                         continue;
2924
2925                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2926                 /* XXX handle errors */
2927                 if (r) {
2928                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2929                                   adev->ip_blocks[i].version->funcs->name, r);
2930                 }
2931
2932                 adev->ip_blocks[i].status.hw = false;
2933         }
2934
2935         if (amdgpu_sriov_vf(adev)) {
2936                 if (amdgpu_virt_release_full_gpu(adev, false))
2937                         DRM_ERROR("failed to release exclusive mode on fini\n");
2938         }
2939
2940         return 0;
2941 }
2942
2943 /**
2944  * amdgpu_device_ip_fini - run fini for hardware IPs
2945  *
2946  * @adev: amdgpu_device pointer
2947  *
2948  * Main teardown pass for hardware IPs.  The list of all the hardware
2949  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2950  * are run.  hw_fini tears down the hardware associated with each IP
2951  * and sw_fini tears down any software state associated with each IP.
2952  * Returns 0 on success, negative error code on failure.
2953  */
2954 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2955 {
2956         int i, r;
2957
2958         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2959                 amdgpu_virt_release_ras_err_handler_data(adev);
2960
2961         if (adev->gmc.xgmi.num_physical_nodes > 1)
2962                 amdgpu_xgmi_remove_device(adev);
2963
2964         amdgpu_amdkfd_device_fini_sw(adev);
2965
2966         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2967                 if (!adev->ip_blocks[i].status.sw)
2968                         continue;
2969
2970                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2971                         amdgpu_ucode_free_bo(adev);
2972                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2973                         amdgpu_device_wb_fini(adev);
2974                         amdgpu_device_mem_scratch_fini(adev);
2975                         amdgpu_ib_pool_fini(adev);
2976                 }
2977
2978                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2979                 /* XXX handle errors */
2980                 if (r) {
2981                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2982                                   adev->ip_blocks[i].version->funcs->name, r);
2983                 }
2984                 adev->ip_blocks[i].status.sw = false;
2985                 adev->ip_blocks[i].status.valid = false;
2986         }
2987
2988         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2989                 if (!adev->ip_blocks[i].status.late_initialized)
2990                         continue;
2991                 if (adev->ip_blocks[i].version->funcs->late_fini)
2992                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2993                 adev->ip_blocks[i].status.late_initialized = false;
2994         }
2995
2996         amdgpu_ras_fini(adev);
2997
2998         return 0;
2999 }
3000
3001 /**
3002  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
3003  *
3004  * @work: work_struct.
3005  */
3006 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
3007 {
3008         struct amdgpu_device *adev =
3009                 container_of(work, struct amdgpu_device, delayed_init_work.work);
3010         int r;
3011
3012         r = amdgpu_ib_ring_tests(adev);
3013         if (r)
3014                 DRM_ERROR("ib ring test failed (%d).\n", r);
3015 }
3016
3017 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
3018 {
3019         struct amdgpu_device *adev =
3020                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
3021
3022         WARN_ON_ONCE(adev->gfx.gfx_off_state);
3023         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3024
3025         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
3026                 adev->gfx.gfx_off_state = true;
3027 }
3028
3029 /**
3030  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3031  *
3032  * @adev: amdgpu_device pointer
3033  *
3034  * Main suspend function for hardware IPs.  The list of all the hardware
3035  * IPs that make up the asic is walked, clockgating is disabled and the
3036  * suspend callbacks are run.  suspend puts the hardware and software state
3037  * in each IP into a state suitable for suspend.
3038  * Returns 0 on success, negative error code on failure.
3039  */
3040 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3041 {
3042         int i, r;
3043
3044         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3045         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3046
3047         /*
3048          * Per PMFW team's suggestion, driver needs to handle gfxoff
3049          * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
3050          * scenario. Add the missing df cstate disablement here.
3051          */
3052         if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3053                 dev_warn(adev->dev, "Failed to disallow df cstate");
3054
3055         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3056                 if (!adev->ip_blocks[i].status.valid)
3057                         continue;
3058
3059                 /* displays are handled separately */
3060                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3061                         continue;
3062
3063                 /* XXX handle errors */
3064                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3065                 /* XXX handle errors */
3066                 if (r) {
3067                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
3068                                   adev->ip_blocks[i].version->funcs->name, r);
3069                         return r;
3070                 }
3071
3072                 adev->ip_blocks[i].status.hw = false;
3073         }
3074
3075         return 0;
3076 }
3077
3078 /**
3079  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3080  *
3081  * @adev: amdgpu_device pointer
3082  *
3083  * Main suspend function for hardware IPs.  The list of all the hardware
3084  * IPs that make up the asic is walked, clockgating is disabled and the
3085  * suspend callbacks are run.  suspend puts the hardware and software state
3086  * in each IP into a state suitable for suspend.
3087  * Returns 0 on success, negative error code on failure.
3088  */
3089 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3090 {
3091         int i, r;
3092
3093         if (adev->in_s0ix)
3094                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3095
3096         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3097                 if (!adev->ip_blocks[i].status.valid)
3098                         continue;
3099                 /* displays are handled in phase1 */
3100                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3101                         continue;
3102                 /* PSP lost connection when err_event_athub occurs */
3103                 if (amdgpu_ras_intr_triggered() &&
3104                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3105                         adev->ip_blocks[i].status.hw = false;
3106                         continue;
3107                 }
3108
3109                 /* skip unnecessary suspend if we do not initialize them yet */
3110                 if (adev->gmc.xgmi.pending_reset &&
3111                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3112                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3113                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3114                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3115                         adev->ip_blocks[i].status.hw = false;
3116                         continue;
3117                 }
3118
3119                 /* skip suspend of gfx/mes and psp for S0ix
3120                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
3121                  * like at runtime. PSP is also part of the always on hardware
3122                  * so no need to suspend it.
3123                  */
3124                 if (adev->in_s0ix &&
3125                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3126                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3127                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3128                         continue;
3129
3130                 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3131                 if (adev->in_s0ix &&
3132                     (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3133                      IP_VERSION(5, 0, 0)) &&
3134                     (adev->ip_blocks[i].version->type ==
3135                      AMD_IP_BLOCK_TYPE_SDMA))
3136                         continue;
3137
3138                 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3139                  * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3140                  * from this location and RLC Autoload automatically also gets loaded
3141                  * from here based on PMFW -> PSP message during re-init sequence.
3142                  * Therefore, the psp suspend & resume should be skipped to avoid destroy
3143                  * the TMR and reload FWs again for IMU enabled APU ASICs.
3144                  */
3145                 if (amdgpu_in_reset(adev) &&
3146                     (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3147                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3148                         continue;
3149
3150                 /* XXX handle errors */
3151                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3152                 /* XXX handle errors */
3153                 if (r) {
3154                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
3155                                   adev->ip_blocks[i].version->funcs->name, r);
3156                 }
3157                 adev->ip_blocks[i].status.hw = false;
3158                 /* handle putting the SMC in the appropriate state */
3159                 if (!amdgpu_sriov_vf(adev)) {
3160                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3161                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3162                                 if (r) {
3163                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3164                                                         adev->mp1_state, r);
3165                                         return r;
3166                                 }
3167                         }
3168                 }
3169         }
3170
3171         return 0;
3172 }
3173
3174 /**
3175  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3176  *
3177  * @adev: amdgpu_device pointer
3178  *
3179  * Main suspend function for hardware IPs.  The list of all the hardware
3180  * IPs that make up the asic is walked, clockgating is disabled and the
3181  * suspend callbacks are run.  suspend puts the hardware and software state
3182  * in each IP into a state suitable for suspend.
3183  * Returns 0 on success, negative error code on failure.
3184  */
3185 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3186 {
3187         int r;
3188
3189         if (amdgpu_sriov_vf(adev)) {
3190                 amdgpu_virt_fini_data_exchange(adev);
3191                 amdgpu_virt_request_full_gpu(adev, false);
3192         }
3193
3194         r = amdgpu_device_ip_suspend_phase1(adev);
3195         if (r)
3196                 return r;
3197         r = amdgpu_device_ip_suspend_phase2(adev);
3198
3199         if (amdgpu_sriov_vf(adev))
3200                 amdgpu_virt_release_full_gpu(adev, false);
3201
3202         return r;
3203 }
3204
3205 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3206 {
3207         int i, r;
3208
3209         static enum amd_ip_block_type ip_order[] = {
3210                 AMD_IP_BLOCK_TYPE_COMMON,
3211                 AMD_IP_BLOCK_TYPE_GMC,
3212                 AMD_IP_BLOCK_TYPE_PSP,
3213                 AMD_IP_BLOCK_TYPE_IH,
3214         };
3215
3216         for (i = 0; i < adev->num_ip_blocks; i++) {
3217                 int j;
3218                 struct amdgpu_ip_block *block;
3219
3220                 block = &adev->ip_blocks[i];
3221                 block->status.hw = false;
3222
3223                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3224
3225                         if (block->version->type != ip_order[j] ||
3226                                 !block->status.valid)
3227                                 continue;
3228
3229                         r = block->version->funcs->hw_init(adev);
3230                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3231                         if (r)
3232                                 return r;
3233                         block->status.hw = true;
3234                 }
3235         }
3236
3237         return 0;
3238 }
3239
3240 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3241 {
3242         int i, r;
3243
3244         static enum amd_ip_block_type ip_order[] = {
3245                 AMD_IP_BLOCK_TYPE_SMC,
3246                 AMD_IP_BLOCK_TYPE_DCE,
3247                 AMD_IP_BLOCK_TYPE_GFX,
3248                 AMD_IP_BLOCK_TYPE_SDMA,
3249                 AMD_IP_BLOCK_TYPE_MES,
3250                 AMD_IP_BLOCK_TYPE_UVD,
3251                 AMD_IP_BLOCK_TYPE_VCE,
3252                 AMD_IP_BLOCK_TYPE_VCN,
3253                 AMD_IP_BLOCK_TYPE_JPEG
3254         };
3255
3256         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3257                 int j;
3258                 struct amdgpu_ip_block *block;
3259
3260                 for (j = 0; j < adev->num_ip_blocks; j++) {
3261                         block = &adev->ip_blocks[j];
3262
3263                         if (block->version->type != ip_order[i] ||
3264                                 !block->status.valid ||
3265                                 block->status.hw)
3266                                 continue;
3267
3268                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3269                                 r = block->version->funcs->resume(adev);
3270                         else
3271                                 r = block->version->funcs->hw_init(adev);
3272
3273                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3274                         if (r)
3275                                 return r;
3276                         block->status.hw = true;
3277                 }
3278         }
3279
3280         return 0;
3281 }
3282
3283 /**
3284  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3285  *
3286  * @adev: amdgpu_device pointer
3287  *
3288  * First resume function for hardware IPs.  The list of all the hardware
3289  * IPs that make up the asic is walked and the resume callbacks are run for
3290  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3291  * after a suspend and updates the software state as necessary.  This
3292  * function is also used for restoring the GPU after a GPU reset.
3293  * Returns 0 on success, negative error code on failure.
3294  */
3295 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3296 {
3297         int i, r;
3298
3299         for (i = 0; i < adev->num_ip_blocks; i++) {
3300                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3301                         continue;
3302                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3303                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3304                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3305                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3306
3307                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3308                         if (r) {
3309                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3310                                           adev->ip_blocks[i].version->funcs->name, r);
3311                                 return r;
3312                         }
3313                         adev->ip_blocks[i].status.hw = true;
3314                 }
3315         }
3316
3317         return 0;
3318 }
3319
3320 /**
3321  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3322  *
3323  * @adev: amdgpu_device pointer
3324  *
3325  * First resume function for hardware IPs.  The list of all the hardware
3326  * IPs that make up the asic is walked and the resume callbacks are run for
3327  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3328  * functional state after a suspend and updates the software state as
3329  * necessary.  This function is also used for restoring the GPU after a GPU
3330  * reset.
3331  * Returns 0 on success, negative error code on failure.
3332  */
3333 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3334 {
3335         int i, r;
3336
3337         for (i = 0; i < adev->num_ip_blocks; i++) {
3338                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3339                         continue;
3340                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3341                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3342                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3343                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3344                         continue;
3345                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3346                 if (r) {
3347                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3348                                   adev->ip_blocks[i].version->funcs->name, r);
3349                         return r;
3350                 }
3351                 adev->ip_blocks[i].status.hw = true;
3352         }
3353
3354         return 0;
3355 }
3356
3357 /**
3358  * amdgpu_device_ip_resume - run resume for hardware IPs
3359  *
3360  * @adev: amdgpu_device pointer
3361  *
3362  * Main resume function for hardware IPs.  The hardware IPs
3363  * are split into two resume functions because they are
3364  * also used in recovering from a GPU reset and some additional
3365  * steps need to be take between them.  In this case (S3/S4) they are
3366  * run sequentially.
3367  * Returns 0 on success, negative error code on failure.
3368  */
3369 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3370 {
3371         int r;
3372
3373         r = amdgpu_device_ip_resume_phase1(adev);
3374         if (r)
3375                 return r;
3376
3377         r = amdgpu_device_fw_loading(adev);
3378         if (r)
3379                 return r;
3380
3381         r = amdgpu_device_ip_resume_phase2(adev);
3382
3383         return r;
3384 }
3385
3386 /**
3387  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3388  *
3389  * @adev: amdgpu_device pointer
3390  *
3391  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3392  */
3393 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3394 {
3395         if (amdgpu_sriov_vf(adev)) {
3396                 if (adev->is_atom_fw) {
3397                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3398                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3399                 } else {
3400                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3401                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3402                 }
3403
3404                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3405                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3406         }
3407 }
3408
3409 /**
3410  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3411  *
3412  * @asic_type: AMD asic type
3413  *
3414  * Check if there is DC (new modesetting infrastructre) support for an asic.
3415  * returns true if DC has support, false if not.
3416  */
3417 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3418 {
3419         switch (asic_type) {
3420 #ifdef CONFIG_DRM_AMDGPU_SI
3421         case CHIP_HAINAN:
3422 #endif
3423         case CHIP_TOPAZ:
3424                 /* chips with no display hardware */
3425                 return false;
3426 #if defined(CONFIG_DRM_AMD_DC)
3427         case CHIP_TAHITI:
3428         case CHIP_PITCAIRN:
3429         case CHIP_VERDE:
3430         case CHIP_OLAND:
3431                 /*
3432                  * We have systems in the wild with these ASICs that require
3433                  * LVDS and VGA support which is not supported with DC.
3434                  *
3435                  * Fallback to the non-DC driver here by default so as not to
3436                  * cause regressions.
3437                  */
3438 #if defined(CONFIG_DRM_AMD_DC_SI)
3439                 return amdgpu_dc > 0;
3440 #else
3441                 return false;
3442 #endif
3443         case CHIP_BONAIRE:
3444         case CHIP_KAVERI:
3445         case CHIP_KABINI:
3446         case CHIP_MULLINS:
3447                 /*
3448                  * We have systems in the wild with these ASICs that require
3449                  * VGA support which is not supported with DC.
3450                  *
3451                  * Fallback to the non-DC driver here by default so as not to
3452                  * cause regressions.
3453                  */
3454                 return amdgpu_dc > 0;
3455         default:
3456                 return amdgpu_dc != 0;
3457 #else
3458         default:
3459                 if (amdgpu_dc > 0)
3460                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
3461                 return false;
3462 #endif
3463         }
3464 }
3465
3466 /**
3467  * amdgpu_device_has_dc_support - check if dc is supported
3468  *
3469  * @adev: amdgpu_device pointer
3470  *
3471  * Returns true for supported, false for not supported
3472  */
3473 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3474 {
3475         if (adev->enable_virtual_display ||
3476             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3477                 return false;
3478
3479         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3480 }
3481
3482 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3483 {
3484         struct amdgpu_device *adev =
3485                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3486         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3487
3488         /* It's a bug to not have a hive within this function */
3489         if (WARN_ON(!hive))
3490                 return;
3491
3492         /*
3493          * Use task barrier to synchronize all xgmi reset works across the
3494          * hive. task_barrier_enter and task_barrier_exit will block
3495          * until all the threads running the xgmi reset works reach
3496          * those points. task_barrier_full will do both blocks.
3497          */
3498         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3499
3500                 task_barrier_enter(&hive->tb);
3501                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3502
3503                 if (adev->asic_reset_res)
3504                         goto fail;
3505
3506                 task_barrier_exit(&hive->tb);
3507                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3508
3509                 if (adev->asic_reset_res)
3510                         goto fail;
3511
3512                 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3513                     adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3514                         adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3515         } else {
3516
3517                 task_barrier_full(&hive->tb);
3518                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3519         }
3520
3521 fail:
3522         if (adev->asic_reset_res)
3523                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3524                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3525         amdgpu_put_xgmi_hive(hive);
3526 }
3527
3528 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3529 {
3530         char *input = amdgpu_lockup_timeout;
3531         char *timeout_setting = NULL;
3532         int index = 0;
3533         long timeout;
3534         int ret = 0;
3535
3536         /*
3537          * By default timeout for non compute jobs is 10000
3538          * and 60000 for compute jobs.
3539          * In SR-IOV or passthrough mode, timeout for compute
3540          * jobs are 60000 by default.
3541          */
3542         adev->gfx_timeout = msecs_to_jiffies(10000);
3543         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3544         if (amdgpu_sriov_vf(adev))
3545                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3546                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3547         else
3548                 adev->compute_timeout =  msecs_to_jiffies(60000);
3549
3550         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3551                 while ((timeout_setting = strsep(&input, ",")) &&
3552                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3553                         ret = kstrtol(timeout_setting, 0, &timeout);
3554                         if (ret)
3555                                 return ret;
3556
3557                         if (timeout == 0) {
3558                                 index++;
3559                                 continue;
3560                         } else if (timeout < 0) {
3561                                 timeout = MAX_SCHEDULE_TIMEOUT;
3562                                 dev_warn(adev->dev, "lockup timeout disabled");
3563                                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3564                         } else {
3565                                 timeout = msecs_to_jiffies(timeout);
3566                         }
3567
3568                         switch (index++) {
3569                         case 0:
3570                                 adev->gfx_timeout = timeout;
3571                                 break;
3572                         case 1:
3573                                 adev->compute_timeout = timeout;
3574                                 break;
3575                         case 2:
3576                                 adev->sdma_timeout = timeout;
3577                                 break;
3578                         case 3:
3579                                 adev->video_timeout = timeout;
3580                                 break;
3581                         default:
3582                                 break;
3583                         }
3584                 }
3585                 /*
3586                  * There is only one value specified and
3587                  * it should apply to all non-compute jobs.
3588                  */
3589                 if (index == 1) {
3590                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3591                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3592                                 adev->compute_timeout = adev->gfx_timeout;
3593                 }
3594         }
3595
3596         return ret;
3597 }
3598
3599 /**
3600  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3601  *
3602  * @adev: amdgpu_device pointer
3603  *
3604  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3605  */
3606 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3607 {
3608         struct iommu_domain *domain;
3609
3610         domain = iommu_get_domain_for_dev(adev->dev);
3611         if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3612                 adev->ram_is_direct_mapped = true;
3613 }
3614
3615 static const struct attribute *amdgpu_dev_attributes[] = {
3616         &dev_attr_pcie_replay_count.attr,
3617         NULL
3618 };
3619
3620 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3621 {
3622         if (amdgpu_mcbp == 1)
3623                 adev->gfx.mcbp = true;
3624         else if (amdgpu_mcbp == 0)
3625                 adev->gfx.mcbp = false;
3626         else if ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 0, 0)) &&
3627                  (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0)) &&
3628                  adev->gfx.num_gfx_rings)
3629                 adev->gfx.mcbp = true;
3630
3631         if (amdgpu_sriov_vf(adev))
3632                 adev->gfx.mcbp = true;
3633
3634         if (adev->gfx.mcbp)
3635                 DRM_INFO("MCBP is enabled\n");
3636 }
3637
3638 /**
3639  * amdgpu_device_init - initialize the driver
3640  *
3641  * @adev: amdgpu_device pointer
3642  * @flags: driver flags
3643  *
3644  * Initializes the driver info and hw (all asics).
3645  * Returns 0 for success or an error on failure.
3646  * Called at driver startup.
3647  */
3648 int amdgpu_device_init(struct amdgpu_device *adev,
3649                        uint32_t flags)
3650 {
3651         struct drm_device *ddev = adev_to_drm(adev);
3652         struct pci_dev *pdev = adev->pdev;
3653         int r, i;
3654         bool px = false;
3655         u32 max_MBps;
3656         int tmp;
3657
3658         adev->shutdown = false;
3659         adev->flags = flags;
3660
3661         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3662                 adev->asic_type = amdgpu_force_asic_type;
3663         else
3664                 adev->asic_type = flags & AMD_ASIC_MASK;
3665
3666         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3667         if (amdgpu_emu_mode == 1)
3668                 adev->usec_timeout *= 10;
3669         adev->gmc.gart_size = 512 * 1024 * 1024;
3670         adev->accel_working = false;
3671         adev->num_rings = 0;
3672         RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3673         adev->mman.buffer_funcs = NULL;
3674         adev->mman.buffer_funcs_ring = NULL;
3675         adev->vm_manager.vm_pte_funcs = NULL;
3676         adev->vm_manager.vm_pte_num_scheds = 0;
3677         adev->gmc.gmc_funcs = NULL;
3678         adev->harvest_ip_mask = 0x0;
3679         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3680         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3681
3682         adev->smc_rreg = &amdgpu_invalid_rreg;
3683         adev->smc_wreg = &amdgpu_invalid_wreg;
3684         adev->pcie_rreg = &amdgpu_invalid_rreg;
3685         adev->pcie_wreg = &amdgpu_invalid_wreg;
3686         adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
3687         adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
3688         adev->pciep_rreg = &amdgpu_invalid_rreg;
3689         adev->pciep_wreg = &amdgpu_invalid_wreg;
3690         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3691         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3692         adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
3693         adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
3694         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3695         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3696         adev->didt_rreg = &amdgpu_invalid_rreg;
3697         adev->didt_wreg = &amdgpu_invalid_wreg;
3698         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3699         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3700         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3701         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3702
3703         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3704                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3705                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3706
3707         /* mutex initialization are all done here so we
3708          * can recall function without having locking issues
3709          */
3710         mutex_init(&adev->firmware.mutex);
3711         mutex_init(&adev->pm.mutex);
3712         mutex_init(&adev->gfx.gpu_clock_mutex);
3713         mutex_init(&adev->srbm_mutex);
3714         mutex_init(&adev->gfx.pipe_reserve_mutex);
3715         mutex_init(&adev->gfx.gfx_off_mutex);
3716         mutex_init(&adev->gfx.partition_mutex);
3717         mutex_init(&adev->grbm_idx_mutex);
3718         mutex_init(&adev->mn_lock);
3719         mutex_init(&adev->virt.vf_errors.lock);
3720         hash_init(adev->mn_hash);
3721         mutex_init(&adev->psp.mutex);
3722         mutex_init(&adev->notifier_lock);
3723         mutex_init(&adev->pm.stable_pstate_ctx_lock);
3724         mutex_init(&adev->benchmark_mutex);
3725
3726         amdgpu_device_init_apu_flags(adev);
3727
3728         r = amdgpu_device_check_arguments(adev);
3729         if (r)
3730                 return r;
3731
3732         spin_lock_init(&adev->mmio_idx_lock);
3733         spin_lock_init(&adev->smc_idx_lock);
3734         spin_lock_init(&adev->pcie_idx_lock);
3735         spin_lock_init(&adev->uvd_ctx_idx_lock);
3736         spin_lock_init(&adev->didt_idx_lock);
3737         spin_lock_init(&adev->gc_cac_idx_lock);
3738         spin_lock_init(&adev->se_cac_idx_lock);
3739         spin_lock_init(&adev->audio_endpt_idx_lock);
3740         spin_lock_init(&adev->mm_stats.lock);
3741
3742         INIT_LIST_HEAD(&adev->shadow_list);
3743         mutex_init(&adev->shadow_list_lock);
3744
3745         INIT_LIST_HEAD(&adev->reset_list);
3746
3747         INIT_LIST_HEAD(&adev->ras_list);
3748
3749         INIT_LIST_HEAD(&adev->pm.od_kobj_list);
3750
3751         INIT_DELAYED_WORK(&adev->delayed_init_work,
3752                           amdgpu_device_delayed_init_work_handler);
3753         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3754                           amdgpu_device_delay_enable_gfx_off);
3755
3756         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3757
3758         adev->gfx.gfx_off_req_count = 1;
3759         adev->gfx.gfx_off_residency = 0;
3760         adev->gfx.gfx_off_entrycount = 0;
3761         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3762
3763         atomic_set(&adev->throttling_logging_enabled, 1);
3764         /*
3765          * If throttling continues, logging will be performed every minute
3766          * to avoid log flooding. "-1" is subtracted since the thermal
3767          * throttling interrupt comes every second. Thus, the total logging
3768          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3769          * for throttling interrupt) = 60 seconds.
3770          */
3771         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3772         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3773
3774         /* Registers mapping */
3775         /* TODO: block userspace mapping of io register */
3776         if (adev->asic_type >= CHIP_BONAIRE) {
3777                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3778                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3779         } else {
3780                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3781                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3782         }
3783
3784         for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3785                 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3786
3787         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3788         if (!adev->rmmio)
3789                 return -ENOMEM;
3790
3791         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3792         DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
3793
3794         /*
3795          * Reset domain needs to be present early, before XGMI hive discovered
3796          * (if any) and intitialized to use reset sem and in_gpu reset flag
3797          * early on during init and before calling to RREG32.
3798          */
3799         adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3800         if (!adev->reset_domain)
3801                 return -ENOMEM;
3802
3803         /* detect hw virtualization here */
3804         amdgpu_detect_virtualization(adev);
3805
3806         amdgpu_device_get_pcie_info(adev);
3807
3808         r = amdgpu_device_get_job_timeout_settings(adev);
3809         if (r) {
3810                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3811                 return r;
3812         }
3813
3814         /* early init functions */
3815         r = amdgpu_device_ip_early_init(adev);
3816         if (r)
3817                 return r;
3818
3819         amdgpu_device_set_mcbp(adev);
3820
3821         /* Get rid of things like offb */
3822         r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
3823         if (r)
3824                 return r;
3825
3826         /* Enable TMZ based on IP_VERSION */
3827         amdgpu_gmc_tmz_set(adev);
3828
3829         amdgpu_gmc_noretry_set(adev);
3830         /* Need to get xgmi info early to decide the reset behavior*/
3831         if (adev->gmc.xgmi.supported) {
3832                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3833                 if (r)
3834                         return r;
3835         }
3836
3837         /* enable PCIE atomic ops */
3838         if (amdgpu_sriov_vf(adev)) {
3839                 if (adev->virt.fw_reserve.p_pf2vf)
3840                         adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3841                                                       adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3842                                 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3843         /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
3844          * internal path natively support atomics, set have_atomics_support to true.
3845          */
3846         } else if ((adev->flags & AMD_IS_APU) &&
3847                    (amdgpu_ip_version(adev, GC_HWIP, 0) >
3848                     IP_VERSION(9, 0, 0))) {
3849                 adev->have_atomics_support = true;
3850         } else {
3851                 adev->have_atomics_support =
3852                         !pci_enable_atomic_ops_to_root(adev->pdev,
3853                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3854                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3855         }
3856
3857         if (!adev->have_atomics_support)
3858                 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3859
3860         /* doorbell bar mapping and doorbell index init*/
3861         amdgpu_doorbell_init(adev);
3862
3863         if (amdgpu_emu_mode == 1) {
3864                 /* post the asic on emulation mode */
3865                 emu_soc_asic_init(adev);
3866                 goto fence_driver_init;
3867         }
3868
3869         amdgpu_reset_init(adev);
3870
3871         /* detect if we are with an SRIOV vbios */
3872         if (adev->bios)
3873                 amdgpu_device_detect_sriov_bios(adev);
3874
3875         /* check if we need to reset the asic
3876          *  E.g., driver was not cleanly unloaded previously, etc.
3877          */
3878         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3879                 if (adev->gmc.xgmi.num_physical_nodes) {
3880                         dev_info(adev->dev, "Pending hive reset.\n");
3881                         adev->gmc.xgmi.pending_reset = true;
3882                         /* Only need to init necessary block for SMU to handle the reset */
3883                         for (i = 0; i < adev->num_ip_blocks; i++) {
3884                                 if (!adev->ip_blocks[i].status.valid)
3885                                         continue;
3886                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3887                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3888                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3889                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3890                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3891                                                 adev->ip_blocks[i].version->funcs->name);
3892                                         adev->ip_blocks[i].status.hw = true;
3893                                 }
3894                         }
3895                 } else {
3896                         tmp = amdgpu_reset_method;
3897                         /* It should do a default reset when loading or reloading the driver,
3898                          * regardless of the module parameter reset_method.
3899                          */
3900                         amdgpu_reset_method = AMD_RESET_METHOD_NONE;
3901                         r = amdgpu_asic_reset(adev);
3902                         amdgpu_reset_method = tmp;
3903                         if (r) {
3904                                 dev_err(adev->dev, "asic reset on init failed\n");
3905                                 goto failed;
3906                         }
3907                 }
3908         }
3909
3910         /* Post card if necessary */
3911         if (amdgpu_device_need_post(adev)) {
3912                 if (!adev->bios) {
3913                         dev_err(adev->dev, "no vBIOS found\n");
3914                         r = -EINVAL;
3915                         goto failed;
3916                 }
3917                 DRM_INFO("GPU posting now...\n");
3918                 r = amdgpu_device_asic_init(adev);
3919                 if (r) {
3920                         dev_err(adev->dev, "gpu post error!\n");
3921                         goto failed;
3922                 }
3923         }
3924
3925         if (adev->bios) {
3926                 if (adev->is_atom_fw) {
3927                         /* Initialize clocks */
3928                         r = amdgpu_atomfirmware_get_clock_info(adev);
3929                         if (r) {
3930                                 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3931                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3932                                 goto failed;
3933                         }
3934                 } else {
3935                         /* Initialize clocks */
3936                         r = amdgpu_atombios_get_clock_info(adev);
3937                         if (r) {
3938                                 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3939                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3940                                 goto failed;
3941                         }
3942                         /* init i2c buses */
3943                         if (!amdgpu_device_has_dc_support(adev))
3944                                 amdgpu_atombios_i2c_init(adev);
3945                 }
3946         }
3947
3948 fence_driver_init:
3949         /* Fence driver */
3950         r = amdgpu_fence_driver_sw_init(adev);
3951         if (r) {
3952                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3953                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3954                 goto failed;
3955         }
3956
3957         /* init the mode config */
3958         drm_mode_config_init(adev_to_drm(adev));
3959
3960         r = amdgpu_device_ip_init(adev);
3961         if (r) {
3962                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3963                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3964                 goto release_ras_con;
3965         }
3966
3967         amdgpu_fence_driver_hw_init(adev);
3968
3969         dev_info(adev->dev,
3970                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3971                         adev->gfx.config.max_shader_engines,
3972                         adev->gfx.config.max_sh_per_se,
3973                         adev->gfx.config.max_cu_per_sh,
3974                         adev->gfx.cu_info.number);
3975
3976         adev->accel_working = true;
3977
3978         amdgpu_vm_check_compute_bug(adev);
3979
3980         /* Initialize the buffer migration limit. */
3981         if (amdgpu_moverate >= 0)
3982                 max_MBps = amdgpu_moverate;
3983         else
3984                 max_MBps = 8; /* Allow 8 MB/s. */
3985         /* Get a log2 for easy divisions. */
3986         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3987
3988         /*
3989          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3990          * Otherwise the mgpu fan boost feature will be skipped due to the
3991          * gpu instance is counted less.
3992          */
3993         amdgpu_register_gpu_instance(adev);
3994
3995         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3996          * explicit gating rather than handling it automatically.
3997          */
3998         if (!adev->gmc.xgmi.pending_reset) {
3999                 r = amdgpu_device_ip_late_init(adev);
4000                 if (r) {
4001                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4002                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4003                         goto release_ras_con;
4004                 }
4005                 /* must succeed. */
4006                 amdgpu_ras_resume(adev);
4007                 queue_delayed_work(system_wq, &adev->delayed_init_work,
4008                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
4009         }
4010
4011         if (amdgpu_sriov_vf(adev)) {
4012                 amdgpu_virt_release_full_gpu(adev, true);
4013                 flush_delayed_work(&adev->delayed_init_work);
4014         }
4015
4016         /*
4017          * Place those sysfs registering after `late_init`. As some of those
4018          * operations performed in `late_init` might affect the sysfs
4019          * interfaces creating.
4020          */
4021         r = amdgpu_atombios_sysfs_init(adev);
4022         if (r)
4023                 drm_err(&adev->ddev,
4024                         "registering atombios sysfs failed (%d).\n", r);
4025
4026         r = amdgpu_pm_sysfs_init(adev);
4027         if (r)
4028                 DRM_ERROR("registering pm sysfs failed (%d).\n", r);
4029
4030         r = amdgpu_ucode_sysfs_init(adev);
4031         if (r) {
4032                 adev->ucode_sysfs_en = false;
4033                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
4034         } else
4035                 adev->ucode_sysfs_en = true;
4036
4037         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
4038         if (r)
4039                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
4040
4041         amdgpu_fru_sysfs_init(adev);
4042
4043         if (IS_ENABLED(CONFIG_PERF_EVENTS))
4044                 r = amdgpu_pmu_init(adev);
4045         if (r)
4046                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4047
4048         /* Have stored pci confspace at hand for restore in sudden PCI error */
4049         if (amdgpu_device_cache_pci_state(adev->pdev))
4050                 pci_restore_state(pdev);
4051
4052         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4053         /* this will fail for cards that aren't VGA class devices, just
4054          * ignore it
4055          */
4056         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4057                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4058
4059         px = amdgpu_device_supports_px(ddev);
4060
4061         if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
4062                                 apple_gmux_detect(NULL, NULL)))
4063                 vga_switcheroo_register_client(adev->pdev,
4064                                                &amdgpu_switcheroo_ops, px);
4065
4066         if (px)
4067                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4068
4069         if (adev->gmc.xgmi.pending_reset)
4070                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
4071                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
4072
4073         amdgpu_device_check_iommu_direct_map(adev);
4074
4075         return 0;
4076
4077 release_ras_con:
4078         if (amdgpu_sriov_vf(adev))
4079                 amdgpu_virt_release_full_gpu(adev, true);
4080
4081         /* failed in exclusive mode due to timeout */
4082         if (amdgpu_sriov_vf(adev) &&
4083                 !amdgpu_sriov_runtime(adev) &&
4084                 amdgpu_virt_mmio_blocked(adev) &&
4085                 !amdgpu_virt_wait_reset(adev)) {
4086                 dev_err(adev->dev, "VF exclusive mode timeout\n");
4087                 /* Don't send request since VF is inactive. */
4088                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4089                 adev->virt.ops = NULL;
4090                 r = -EAGAIN;
4091         }
4092         amdgpu_release_ras_context(adev);
4093
4094 failed:
4095         amdgpu_vf_error_trans_all(adev);
4096
4097         return r;
4098 }
4099
4100 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4101 {
4102
4103         /* Clear all CPU mappings pointing to this device */
4104         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4105
4106         /* Unmap all mapped bars - Doorbell, registers and VRAM */
4107         amdgpu_doorbell_fini(adev);
4108
4109         iounmap(adev->rmmio);
4110         adev->rmmio = NULL;
4111         if (adev->mman.aper_base_kaddr)
4112                 iounmap(adev->mman.aper_base_kaddr);
4113         adev->mman.aper_base_kaddr = NULL;
4114
4115         /* Memory manager related */
4116         if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4117                 arch_phys_wc_del(adev->gmc.vram_mtrr);
4118                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4119         }
4120 }
4121
4122 /**
4123  * amdgpu_device_fini_hw - tear down the driver
4124  *
4125  * @adev: amdgpu_device pointer
4126  *
4127  * Tear down the driver info (all asics).
4128  * Called at driver shutdown.
4129  */
4130 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4131 {
4132         dev_info(adev->dev, "amdgpu: finishing device.\n");
4133         flush_delayed_work(&adev->delayed_init_work);
4134         adev->shutdown = true;
4135
4136         /* make sure IB test finished before entering exclusive mode
4137          * to avoid preemption on IB test
4138          */
4139         if (amdgpu_sriov_vf(adev)) {
4140                 amdgpu_virt_request_full_gpu(adev, false);
4141                 amdgpu_virt_fini_data_exchange(adev);
4142         }
4143
4144         /* disable all interrupts */
4145         amdgpu_irq_disable_all(adev);
4146         if (adev->mode_info.mode_config_initialized) {
4147                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4148                         drm_helper_force_disable_all(adev_to_drm(adev));
4149                 else
4150                         drm_atomic_helper_shutdown(adev_to_drm(adev));
4151         }
4152         amdgpu_fence_driver_hw_fini(adev);
4153
4154         if (adev->mman.initialized)
4155                 drain_workqueue(adev->mman.bdev.wq);
4156
4157         if (adev->pm.sysfs_initialized)
4158                 amdgpu_pm_sysfs_fini(adev);
4159         if (adev->ucode_sysfs_en)
4160                 amdgpu_ucode_sysfs_fini(adev);
4161         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4162         amdgpu_fru_sysfs_fini(adev);
4163
4164         /* disable ras feature must before hw fini */
4165         amdgpu_ras_pre_fini(adev);
4166
4167         amdgpu_device_ip_fini_early(adev);
4168
4169         amdgpu_irq_fini_hw(adev);
4170
4171         if (adev->mman.initialized)
4172                 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4173
4174         amdgpu_gart_dummy_page_fini(adev);
4175
4176         if (drm_dev_is_unplugged(adev_to_drm(adev)))
4177                 amdgpu_device_unmap_mmio(adev);
4178
4179 }
4180
4181 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4182 {
4183         int idx;
4184         bool px;
4185
4186         amdgpu_fence_driver_sw_fini(adev);
4187         amdgpu_device_ip_fini(adev);
4188         amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4189         adev->accel_working = false;
4190         dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4191
4192         amdgpu_reset_fini(adev);
4193
4194         /* free i2c buses */
4195         if (!amdgpu_device_has_dc_support(adev))
4196                 amdgpu_i2c_fini(adev);
4197
4198         if (amdgpu_emu_mode != 1)
4199                 amdgpu_atombios_fini(adev);
4200
4201         kfree(adev->bios);
4202         adev->bios = NULL;
4203
4204         px = amdgpu_device_supports_px(adev_to_drm(adev));
4205
4206         if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
4207                                 apple_gmux_detect(NULL, NULL)))
4208                 vga_switcheroo_unregister_client(adev->pdev);
4209
4210         if (px)
4211                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4212
4213         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4214                 vga_client_unregister(adev->pdev);
4215
4216         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4217
4218                 iounmap(adev->rmmio);
4219                 adev->rmmio = NULL;
4220                 amdgpu_doorbell_fini(adev);
4221                 drm_dev_exit(idx);
4222         }
4223
4224         if (IS_ENABLED(CONFIG_PERF_EVENTS))
4225                 amdgpu_pmu_fini(adev);
4226         if (adev->mman.discovery_bin)
4227                 amdgpu_discovery_fini(adev);
4228
4229         amdgpu_reset_put_reset_domain(adev->reset_domain);
4230         adev->reset_domain = NULL;
4231
4232         kfree(adev->pci_state);
4233
4234 }
4235
4236 /**
4237  * amdgpu_device_evict_resources - evict device resources
4238  * @adev: amdgpu device object
4239  *
4240  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4241  * of the vram memory type. Mainly used for evicting device resources
4242  * at suspend time.
4243  *
4244  */
4245 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4246 {
4247         int ret;
4248
4249         /* No need to evict vram on APUs for suspend to ram or s2idle */
4250         if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4251                 return 0;
4252
4253         ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4254         if (ret)
4255                 DRM_WARN("evicting device resources failed\n");
4256         return ret;
4257 }
4258
4259 /*
4260  * Suspend & resume.
4261  */
4262 /**
4263  * amdgpu_device_suspend - initiate device suspend
4264  *
4265  * @dev: drm dev pointer
4266  * @fbcon : notify the fbdev of suspend
4267  *
4268  * Puts the hw in the suspend state (all asics).
4269  * Returns 0 for success or an error on failure.
4270  * Called at driver suspend.
4271  */
4272 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4273 {
4274         struct amdgpu_device *adev = drm_to_adev(dev);
4275         int r = 0;
4276
4277         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4278                 return 0;
4279
4280         adev->in_suspend = true;
4281
4282         /* Evict the majority of BOs before grabbing the full access */
4283         r = amdgpu_device_evict_resources(adev);
4284         if (r)
4285                 return r;
4286
4287         if (amdgpu_sriov_vf(adev)) {
4288                 amdgpu_virt_fini_data_exchange(adev);
4289                 r = amdgpu_virt_request_full_gpu(adev, false);
4290                 if (r)
4291                         return r;
4292         }
4293
4294         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4295                 DRM_WARN("smart shift update failed\n");
4296
4297         if (fbcon)
4298                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4299
4300         cancel_delayed_work_sync(&adev->delayed_init_work);
4301         flush_delayed_work(&adev->gfx.gfx_off_delay_work);
4302
4303         amdgpu_ras_suspend(adev);
4304
4305         amdgpu_device_ip_suspend_phase1(adev);
4306
4307         if (!adev->in_s0ix)
4308                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4309
4310         r = amdgpu_device_evict_resources(adev);
4311         if (r)
4312                 return r;
4313
4314         amdgpu_fence_driver_hw_fini(adev);
4315
4316         amdgpu_device_ip_suspend_phase2(adev);
4317
4318         if (amdgpu_sriov_vf(adev))
4319                 amdgpu_virt_release_full_gpu(adev, false);
4320
4321         return 0;
4322 }
4323
4324 /**
4325  * amdgpu_device_resume - initiate device resume
4326  *
4327  * @dev: drm dev pointer
4328  * @fbcon : notify the fbdev of resume
4329  *
4330  * Bring the hw back to operating state (all asics).
4331  * Returns 0 for success or an error on failure.
4332  * Called at driver resume.
4333  */
4334 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4335 {
4336         struct amdgpu_device *adev = drm_to_adev(dev);
4337         int r = 0;
4338
4339         if (amdgpu_sriov_vf(adev)) {
4340                 r = amdgpu_virt_request_full_gpu(adev, true);
4341                 if (r)
4342                         return r;
4343         }
4344
4345         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4346                 return 0;
4347
4348         if (adev->in_s0ix)
4349                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4350
4351         /* post card */
4352         if (amdgpu_device_need_post(adev)) {
4353                 r = amdgpu_device_asic_init(adev);
4354                 if (r)
4355                         dev_err(adev->dev, "amdgpu asic init failed\n");
4356         }
4357
4358         r = amdgpu_device_ip_resume(adev);
4359
4360         if (r) {
4361                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4362                 goto exit;
4363         }
4364         amdgpu_fence_driver_hw_init(adev);
4365
4366         r = amdgpu_device_ip_late_init(adev);
4367         if (r)
4368                 goto exit;
4369
4370         queue_delayed_work(system_wq, &adev->delayed_init_work,
4371                            msecs_to_jiffies(AMDGPU_RESUME_MS));
4372
4373         if (!adev->in_s0ix) {
4374                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4375                 if (r)
4376                         goto exit;
4377         }
4378
4379 exit:
4380         if (amdgpu_sriov_vf(adev)) {
4381                 amdgpu_virt_init_data_exchange(adev);
4382                 amdgpu_virt_release_full_gpu(adev, true);
4383         }
4384
4385         if (r)
4386                 return r;
4387
4388         /* Make sure IB tests flushed */
4389         flush_delayed_work(&adev->delayed_init_work);
4390
4391         if (fbcon)
4392                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4393
4394         amdgpu_ras_resume(adev);
4395
4396         if (adev->mode_info.num_crtc) {
4397                 /*
4398                  * Most of the connector probing functions try to acquire runtime pm
4399                  * refs to ensure that the GPU is powered on when connector polling is
4400                  * performed. Since we're calling this from a runtime PM callback,
4401                  * trying to acquire rpm refs will cause us to deadlock.
4402                  *
4403                  * Since we're guaranteed to be holding the rpm lock, it's safe to
4404                  * temporarily disable the rpm helpers so this doesn't deadlock us.
4405                  */
4406 #ifdef CONFIG_PM
4407                 dev->dev->power.disable_depth++;
4408 #endif
4409                 if (!adev->dc_enabled)
4410                         drm_helper_hpd_irq_event(dev);
4411                 else
4412                         drm_kms_helper_hotplug_event(dev);
4413 #ifdef CONFIG_PM
4414                 dev->dev->power.disable_depth--;
4415 #endif
4416         }
4417         adev->in_suspend = false;
4418
4419         if (adev->enable_mes)
4420                 amdgpu_mes_self_test(adev);
4421
4422         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4423                 DRM_WARN("smart shift update failed\n");
4424
4425         return 0;
4426 }
4427
4428 /**
4429  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4430  *
4431  * @adev: amdgpu_device pointer
4432  *
4433  * The list of all the hardware IPs that make up the asic is walked and
4434  * the check_soft_reset callbacks are run.  check_soft_reset determines
4435  * if the asic is still hung or not.
4436  * Returns true if any of the IPs are still in a hung state, false if not.
4437  */
4438 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4439 {
4440         int i;
4441         bool asic_hang = false;
4442
4443         if (amdgpu_sriov_vf(adev))
4444                 return true;
4445
4446         if (amdgpu_asic_need_full_reset(adev))
4447                 return true;
4448
4449         for (i = 0; i < adev->num_ip_blocks; i++) {
4450                 if (!adev->ip_blocks[i].status.valid)
4451                         continue;
4452                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4453                         adev->ip_blocks[i].status.hang =
4454                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4455                 if (adev->ip_blocks[i].status.hang) {
4456                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4457                         asic_hang = true;
4458                 }
4459         }
4460         return asic_hang;
4461 }
4462
4463 /**
4464  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4465  *
4466  * @adev: amdgpu_device pointer
4467  *
4468  * The list of all the hardware IPs that make up the asic is walked and the
4469  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4470  * handles any IP specific hardware or software state changes that are
4471  * necessary for a soft reset to succeed.
4472  * Returns 0 on success, negative error code on failure.
4473  */
4474 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4475 {
4476         int i, r = 0;
4477
4478         for (i = 0; i < adev->num_ip_blocks; i++) {
4479                 if (!adev->ip_blocks[i].status.valid)
4480                         continue;
4481                 if (adev->ip_blocks[i].status.hang &&
4482                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4483                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4484                         if (r)
4485                                 return r;
4486                 }
4487         }
4488
4489         return 0;
4490 }
4491
4492 /**
4493  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4494  *
4495  * @adev: amdgpu_device pointer
4496  *
4497  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4498  * reset is necessary to recover.
4499  * Returns true if a full asic reset is required, false if not.
4500  */
4501 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4502 {
4503         int i;
4504
4505         if (amdgpu_asic_need_full_reset(adev))
4506                 return true;
4507
4508         for (i = 0; i < adev->num_ip_blocks; i++) {
4509                 if (!adev->ip_blocks[i].status.valid)
4510                         continue;
4511                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4512                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4513                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4514                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4515                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4516                         if (adev->ip_blocks[i].status.hang) {
4517                                 dev_info(adev->dev, "Some block need full reset!\n");
4518                                 return true;
4519                         }
4520                 }
4521         }
4522         return false;
4523 }
4524
4525 /**
4526  * amdgpu_device_ip_soft_reset - do a soft reset
4527  *
4528  * @adev: amdgpu_device pointer
4529  *
4530  * The list of all the hardware IPs that make up the asic is walked and the
4531  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4532  * IP specific hardware or software state changes that are necessary to soft
4533  * reset the IP.
4534  * Returns 0 on success, negative error code on failure.
4535  */
4536 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4537 {
4538         int i, r = 0;
4539
4540         for (i = 0; i < adev->num_ip_blocks; i++) {
4541                 if (!adev->ip_blocks[i].status.valid)
4542                         continue;
4543                 if (adev->ip_blocks[i].status.hang &&
4544                     adev->ip_blocks[i].version->funcs->soft_reset) {
4545                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4546                         if (r)
4547                                 return r;
4548                 }
4549         }
4550
4551         return 0;
4552 }
4553
4554 /**
4555  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4556  *
4557  * @adev: amdgpu_device pointer
4558  *
4559  * The list of all the hardware IPs that make up the asic is walked and the
4560  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4561  * handles any IP specific hardware or software state changes that are
4562  * necessary after the IP has been soft reset.
4563  * Returns 0 on success, negative error code on failure.
4564  */
4565 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4566 {
4567         int i, r = 0;
4568
4569         for (i = 0; i < adev->num_ip_blocks; i++) {
4570                 if (!adev->ip_blocks[i].status.valid)
4571                         continue;
4572                 if (adev->ip_blocks[i].status.hang &&
4573                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4574                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4575                 if (r)
4576                         return r;
4577         }
4578
4579         return 0;
4580 }
4581
4582 /**
4583  * amdgpu_device_recover_vram - Recover some VRAM contents
4584  *
4585  * @adev: amdgpu_device pointer
4586  *
4587  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4588  * restore things like GPUVM page tables after a GPU reset where
4589  * the contents of VRAM might be lost.
4590  *
4591  * Returns:
4592  * 0 on success, negative error code on failure.
4593  */
4594 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4595 {
4596         struct dma_fence *fence = NULL, *next = NULL;
4597         struct amdgpu_bo *shadow;
4598         struct amdgpu_bo_vm *vmbo;
4599         long r = 1, tmo;
4600
4601         if (amdgpu_sriov_runtime(adev))
4602                 tmo = msecs_to_jiffies(8000);
4603         else
4604                 tmo = msecs_to_jiffies(100);
4605
4606         dev_info(adev->dev, "recover vram bo from shadow start\n");
4607         mutex_lock(&adev->shadow_list_lock);
4608         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4609                 /* If vm is compute context or adev is APU, shadow will be NULL */
4610                 if (!vmbo->shadow)
4611                         continue;
4612                 shadow = vmbo->shadow;
4613
4614                 /* No need to recover an evicted BO */
4615                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4616                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4617                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4618                         continue;
4619
4620                 r = amdgpu_bo_restore_shadow(shadow, &next);
4621                 if (r)
4622                         break;
4623
4624                 if (fence) {
4625                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4626                         dma_fence_put(fence);
4627                         fence = next;
4628                         if (tmo == 0) {
4629                                 r = -ETIMEDOUT;
4630                                 break;
4631                         } else if (tmo < 0) {
4632                                 r = tmo;
4633                                 break;
4634                         }
4635                 } else {
4636                         fence = next;
4637                 }
4638         }
4639         mutex_unlock(&adev->shadow_list_lock);
4640
4641         if (fence)
4642                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4643         dma_fence_put(fence);
4644
4645         if (r < 0 || tmo <= 0) {
4646                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4647                 return -EIO;
4648         }
4649
4650         dev_info(adev->dev, "recover vram bo from shadow done\n");
4651         return 0;
4652 }
4653
4654
4655 /**
4656  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4657  *
4658  * @adev: amdgpu_device pointer
4659  * @from_hypervisor: request from hypervisor
4660  *
4661  * do VF FLR and reinitialize Asic
4662  * return 0 means succeeded otherwise failed
4663  */
4664 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4665                                      bool from_hypervisor)
4666 {
4667         int r;
4668         struct amdgpu_hive_info *hive = NULL;
4669         int retry_limit = 0;
4670
4671 retry:
4672         amdgpu_amdkfd_pre_reset(adev);
4673
4674         if (from_hypervisor)
4675                 r = amdgpu_virt_request_full_gpu(adev, true);
4676         else
4677                 r = amdgpu_virt_reset_gpu(adev);
4678         if (r)
4679                 return r;
4680         amdgpu_irq_gpu_reset_resume_helper(adev);
4681
4682         /* some sw clean up VF needs to do before recover */
4683         amdgpu_virt_post_reset(adev);
4684
4685         /* Resume IP prior to SMC */
4686         r = amdgpu_device_ip_reinit_early_sriov(adev);
4687         if (r)
4688                 goto error;
4689
4690         amdgpu_virt_init_data_exchange(adev);
4691
4692         r = amdgpu_device_fw_loading(adev);
4693         if (r)
4694                 return r;
4695
4696         /* now we are okay to resume SMC/CP/SDMA */
4697         r = amdgpu_device_ip_reinit_late_sriov(adev);
4698         if (r)
4699                 goto error;
4700
4701         hive = amdgpu_get_xgmi_hive(adev);
4702         /* Update PSP FW topology after reset */
4703         if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4704                 r = amdgpu_xgmi_update_topology(hive, adev);
4705
4706         if (hive)
4707                 amdgpu_put_xgmi_hive(hive);
4708
4709         if (!r) {
4710                 r = amdgpu_ib_ring_tests(adev);
4711
4712                 amdgpu_amdkfd_post_reset(adev);
4713         }
4714
4715 error:
4716         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4717                 amdgpu_inc_vram_lost(adev);
4718                 r = amdgpu_device_recover_vram(adev);
4719         }
4720         amdgpu_virt_release_full_gpu(adev, true);
4721
4722         if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4723                 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4724                         retry_limit++;
4725                         goto retry;
4726                 } else
4727                         DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4728         }
4729
4730         return r;
4731 }
4732
4733 /**
4734  * amdgpu_device_has_job_running - check if there is any job in mirror list
4735  *
4736  * @adev: amdgpu_device pointer
4737  *
4738  * check if there is any job in mirror list
4739  */
4740 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4741 {
4742         int i;
4743         struct drm_sched_job *job;
4744
4745         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4746                 struct amdgpu_ring *ring = adev->rings[i];
4747
4748                 if (!ring || !ring->sched.thread)
4749                         continue;
4750
4751                 spin_lock(&ring->sched.job_list_lock);
4752                 job = list_first_entry_or_null(&ring->sched.pending_list,
4753                                                struct drm_sched_job, list);
4754                 spin_unlock(&ring->sched.job_list_lock);
4755                 if (job)
4756                         return true;
4757         }
4758         return false;
4759 }
4760
4761 /**
4762  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4763  *
4764  * @adev: amdgpu_device pointer
4765  *
4766  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4767  * a hung GPU.
4768  */
4769 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4770 {
4771
4772         if (amdgpu_gpu_recovery == 0)
4773                 goto disabled;
4774
4775         /* Skip soft reset check in fatal error mode */
4776         if (!amdgpu_ras_is_poison_mode_supported(adev))
4777                 return true;
4778
4779         if (amdgpu_sriov_vf(adev))
4780                 return true;
4781
4782         if (amdgpu_gpu_recovery == -1) {
4783                 switch (adev->asic_type) {
4784 #ifdef CONFIG_DRM_AMDGPU_SI
4785                 case CHIP_VERDE:
4786                 case CHIP_TAHITI:
4787                 case CHIP_PITCAIRN:
4788                 case CHIP_OLAND:
4789                 case CHIP_HAINAN:
4790 #endif
4791 #ifdef CONFIG_DRM_AMDGPU_CIK
4792                 case CHIP_KAVERI:
4793                 case CHIP_KABINI:
4794                 case CHIP_MULLINS:
4795 #endif
4796                 case CHIP_CARRIZO:
4797                 case CHIP_STONEY:
4798                 case CHIP_CYAN_SKILLFISH:
4799                         goto disabled;
4800                 default:
4801                         break;
4802                 }
4803         }
4804
4805         return true;
4806
4807 disabled:
4808                 dev_info(adev->dev, "GPU recovery disabled.\n");
4809                 return false;
4810 }
4811
4812 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4813 {
4814         u32 i;
4815         int ret = 0;
4816
4817         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4818
4819         dev_info(adev->dev, "GPU mode1 reset\n");
4820
4821         /* disable BM */
4822         pci_clear_master(adev->pdev);
4823
4824         amdgpu_device_cache_pci_state(adev->pdev);
4825
4826         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4827                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4828                 ret = amdgpu_dpm_mode1_reset(adev);
4829         } else {
4830                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4831                 ret = psp_gpu_reset(adev);
4832         }
4833
4834         if (ret)
4835                 goto mode1_reset_failed;
4836
4837         amdgpu_device_load_pci_state(adev->pdev);
4838         ret = amdgpu_psp_wait_for_bootloader(adev);
4839         if (ret)
4840                 goto mode1_reset_failed;
4841
4842         /* wait for asic to come out of reset */
4843         for (i = 0; i < adev->usec_timeout; i++) {
4844                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4845
4846                 if (memsize != 0xffffffff)
4847                         break;
4848                 udelay(1);
4849         }
4850
4851         if (i >= adev->usec_timeout) {
4852                 ret = -ETIMEDOUT;
4853                 goto mode1_reset_failed;
4854         }
4855
4856         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4857
4858         return 0;
4859
4860 mode1_reset_failed:
4861         dev_err(adev->dev, "GPU mode1 reset failed\n");
4862         return ret;
4863 }
4864
4865 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4866                                  struct amdgpu_reset_context *reset_context)
4867 {
4868         int i, r = 0;
4869         struct amdgpu_job *job = NULL;
4870         bool need_full_reset =
4871                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4872
4873         if (reset_context->reset_req_dev == adev)
4874                 job = reset_context->job;
4875
4876         if (amdgpu_sriov_vf(adev)) {
4877                 /* stop the data exchange thread */
4878                 amdgpu_virt_fini_data_exchange(adev);
4879         }
4880
4881         amdgpu_fence_driver_isr_toggle(adev, true);
4882
4883         /* block all schedulers and reset given job's ring */
4884         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4885                 struct amdgpu_ring *ring = adev->rings[i];
4886
4887                 if (!ring || !ring->sched.thread)
4888                         continue;
4889
4890                 /* Clear job fence from fence drv to avoid force_completion
4891                  * leave NULL and vm flush fence in fence drv
4892                  */
4893                 amdgpu_fence_driver_clear_job_fences(ring);
4894
4895                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4896                 amdgpu_fence_driver_force_completion(ring);
4897         }
4898
4899         amdgpu_fence_driver_isr_toggle(adev, false);
4900
4901         if (job && job->vm)
4902                 drm_sched_increase_karma(&job->base);
4903
4904         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4905         /* If reset handler not implemented, continue; otherwise return */
4906         if (r == -EOPNOTSUPP)
4907                 r = 0;
4908         else
4909                 return r;
4910
4911         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4912         if (!amdgpu_sriov_vf(adev)) {
4913
4914                 if (!need_full_reset)
4915                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4916
4917                 if (!need_full_reset && amdgpu_gpu_recovery &&
4918                     amdgpu_device_ip_check_soft_reset(adev)) {
4919                         amdgpu_device_ip_pre_soft_reset(adev);
4920                         r = amdgpu_device_ip_soft_reset(adev);
4921                         amdgpu_device_ip_post_soft_reset(adev);
4922                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4923                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4924                                 need_full_reset = true;
4925                         }
4926                 }
4927
4928                 if (need_full_reset)
4929                         r = amdgpu_device_ip_suspend(adev);
4930                 if (need_full_reset)
4931                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4932                 else
4933                         clear_bit(AMDGPU_NEED_FULL_RESET,
4934                                   &reset_context->flags);
4935         }
4936
4937         return r;
4938 }
4939
4940 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4941 {
4942         int i;
4943
4944         lockdep_assert_held(&adev->reset_domain->sem);
4945
4946         for (i = 0; i < adev->num_regs; i++) {
4947                 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4948                 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4949                                              adev->reset_dump_reg_value[i]);
4950         }
4951
4952         return 0;
4953 }
4954
4955 #ifndef CONFIG_DEV_COREDUMP
4956 static void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
4957                             struct amdgpu_reset_context *reset_context)
4958 {
4959 }
4960 #else
4961 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4962                 size_t count, void *data, size_t datalen)
4963 {
4964         struct drm_printer p;
4965         struct amdgpu_coredump_info *coredump = data;
4966         struct drm_print_iterator iter;
4967         int i;
4968
4969         iter.data = buffer;
4970         iter.offset = 0;
4971         iter.start = offset;
4972         iter.remain = count;
4973
4974         p = drm_coredump_printer(&iter);
4975
4976         drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4977         drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4978         drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4979         drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec, coredump->reset_time.tv_nsec);
4980         if (coredump->reset_task_info.pid)
4981                 drm_printf(&p, "process_name: %s PID: %d\n",
4982                            coredump->reset_task_info.process_name,
4983                            coredump->reset_task_info.pid);
4984
4985         if (coredump->reset_vram_lost)
4986                 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4987         if (coredump->adev->num_regs) {
4988                 drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4989
4990                 for (i = 0; i < coredump->adev->num_regs; i++)
4991                         drm_printf(&p, "0x%08x: 0x%08x\n",
4992                                    coredump->adev->reset_dump_reg_list[i],
4993                                    coredump->adev->reset_dump_reg_value[i]);
4994         }
4995
4996         return count - iter.remain;
4997 }
4998
4999 static void amdgpu_devcoredump_free(void *data)
5000 {
5001         kfree(data);
5002 }
5003
5004 static void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
5005                             struct amdgpu_reset_context *reset_context)
5006 {
5007         struct amdgpu_coredump_info *coredump;
5008         struct drm_device *dev = adev_to_drm(adev);
5009
5010         coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
5011
5012         if (!coredump) {
5013                 DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__);
5014                 return;
5015         }
5016
5017         coredump->reset_vram_lost = vram_lost;
5018
5019         if (reset_context->job && reset_context->job->vm)
5020                 coredump->reset_task_info = reset_context->job->vm->task_info;
5021
5022         coredump->adev = adev;
5023
5024         ktime_get_ts64(&coredump->reset_time);
5025
5026         dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT,
5027                       amdgpu_devcoredump_read, amdgpu_devcoredump_free);
5028 }
5029 #endif
5030
5031 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5032                          struct amdgpu_reset_context *reset_context)
5033 {
5034         struct amdgpu_device *tmp_adev = NULL;
5035         bool need_full_reset, skip_hw_reset, vram_lost = false;
5036         int r = 0;
5037         bool gpu_reset_for_dev_remove = 0;
5038
5039         /* Try reset handler method first */
5040         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5041                                     reset_list);
5042         amdgpu_reset_reg_dumps(tmp_adev);
5043
5044         reset_context->reset_device_list = device_list_handle;
5045         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5046         /* If reset handler not implemented, continue; otherwise return */
5047         if (r == -EOPNOTSUPP)
5048                 r = 0;
5049         else
5050                 return r;
5051
5052         /* Reset handler not implemented, use the default method */
5053         need_full_reset =
5054                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5055         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5056
5057         gpu_reset_for_dev_remove =
5058                 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5059                         test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5060
5061         /*
5062          * ASIC reset has to be done on all XGMI hive nodes ASAP
5063          * to allow proper links negotiation in FW (within 1 sec)
5064          */
5065         if (!skip_hw_reset && need_full_reset) {
5066                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5067                         /* For XGMI run all resets in parallel to speed up the process */
5068                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5069                                 tmp_adev->gmc.xgmi.pending_reset = false;
5070                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
5071                                         r = -EALREADY;
5072                         } else
5073                                 r = amdgpu_asic_reset(tmp_adev);
5074
5075                         if (r) {
5076                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
5077                                          r, adev_to_drm(tmp_adev)->unique);
5078                                 goto out;
5079                         }
5080                 }
5081
5082                 /* For XGMI wait for all resets to complete before proceed */
5083                 if (!r) {
5084                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5085                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5086                                         flush_work(&tmp_adev->xgmi_reset_work);
5087                                         r = tmp_adev->asic_reset_res;
5088                                         if (r)
5089                                                 break;
5090                                 }
5091                         }
5092                 }
5093         }
5094
5095         if (!r && amdgpu_ras_intr_triggered()) {
5096                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5097                         if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
5098                             tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
5099                                 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
5100                 }
5101
5102                 amdgpu_ras_intr_cleared();
5103         }
5104
5105         /* Since the mode1 reset affects base ip blocks, the
5106          * phase1 ip blocks need to be resumed. Otherwise there
5107          * will be a BIOS signature error and the psp bootloader
5108          * can't load kdb on the next amdgpu install.
5109          */
5110         if (gpu_reset_for_dev_remove) {
5111                 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
5112                         amdgpu_device_ip_resume_phase1(tmp_adev);
5113
5114                 goto end;
5115         }
5116
5117         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5118                 if (need_full_reset) {
5119                         /* post card */
5120                         r = amdgpu_device_asic_init(tmp_adev);
5121                         if (r) {
5122                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
5123                         } else {
5124                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5125
5126                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
5127                                 if (r)
5128                                         goto out;
5129
5130                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5131
5132                                 amdgpu_coredump(tmp_adev, vram_lost, reset_context);
5133
5134                                 if (vram_lost) {
5135                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
5136                                         amdgpu_inc_vram_lost(tmp_adev);
5137                                 }
5138
5139                                 r = amdgpu_device_fw_loading(tmp_adev);
5140                                 if (r)
5141                                         return r;
5142
5143                                 r = amdgpu_xcp_restore_partition_mode(
5144                                         tmp_adev->xcp_mgr);
5145                                 if (r)
5146                                         goto out;
5147
5148                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
5149                                 if (r)
5150                                         goto out;
5151
5152                                 if (vram_lost)
5153                                         amdgpu_device_fill_reset_magic(tmp_adev);
5154
5155                                 /*
5156                                  * Add this ASIC as tracked as reset was already
5157                                  * complete successfully.
5158                                  */
5159                                 amdgpu_register_gpu_instance(tmp_adev);
5160
5161                                 if (!reset_context->hive &&
5162                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5163                                         amdgpu_xgmi_add_device(tmp_adev);
5164
5165                                 r = amdgpu_device_ip_late_init(tmp_adev);
5166                                 if (r)
5167                                         goto out;
5168
5169                                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
5170
5171                                 /*
5172                                  * The GPU enters bad state once faulty pages
5173                                  * by ECC has reached the threshold, and ras
5174                                  * recovery is scheduled next. So add one check
5175                                  * here to break recovery if it indeed exceeds
5176                                  * bad page threshold, and remind user to
5177                                  * retire this GPU or setting one bigger
5178                                  * bad_page_threshold value to fix this once
5179                                  * probing driver again.
5180                                  */
5181                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
5182                                         /* must succeed. */
5183                                         amdgpu_ras_resume(tmp_adev);
5184                                 } else {
5185                                         r = -EINVAL;
5186                                         goto out;
5187                                 }
5188
5189                                 /* Update PSP FW topology after reset */
5190                                 if (reset_context->hive &&
5191                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5192                                         r = amdgpu_xgmi_update_topology(
5193                                                 reset_context->hive, tmp_adev);
5194                         }
5195                 }
5196
5197 out:
5198                 if (!r) {
5199                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5200                         r = amdgpu_ib_ring_tests(tmp_adev);
5201                         if (r) {
5202                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5203                                 need_full_reset = true;
5204                                 r = -EAGAIN;
5205                                 goto end;
5206                         }
5207                 }
5208
5209                 if (!r)
5210                         r = amdgpu_device_recover_vram(tmp_adev);
5211                 else
5212                         tmp_adev->asic_reset_res = r;
5213         }
5214
5215 end:
5216         if (need_full_reset)
5217                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5218         else
5219                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5220         return r;
5221 }
5222
5223 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5224 {
5225
5226         switch (amdgpu_asic_reset_method(adev)) {
5227         case AMD_RESET_METHOD_MODE1:
5228                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5229                 break;
5230         case AMD_RESET_METHOD_MODE2:
5231                 adev->mp1_state = PP_MP1_STATE_RESET;
5232                 break;
5233         default:
5234                 adev->mp1_state = PP_MP1_STATE_NONE;
5235                 break;
5236         }
5237 }
5238
5239 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5240 {
5241         amdgpu_vf_error_trans_all(adev);
5242         adev->mp1_state = PP_MP1_STATE_NONE;
5243 }
5244
5245 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5246 {
5247         struct pci_dev *p = NULL;
5248
5249         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5250                         adev->pdev->bus->number, 1);
5251         if (p) {
5252                 pm_runtime_enable(&(p->dev));
5253                 pm_runtime_resume(&(p->dev));
5254         }
5255
5256         pci_dev_put(p);
5257 }
5258
5259 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5260 {
5261         enum amd_reset_method reset_method;
5262         struct pci_dev *p = NULL;
5263         u64 expires;
5264
5265         /*
5266          * For now, only BACO and mode1 reset are confirmed
5267          * to suffer the audio issue without proper suspended.
5268          */
5269         reset_method = amdgpu_asic_reset_method(adev);
5270         if ((reset_method != AMD_RESET_METHOD_BACO) &&
5271              (reset_method != AMD_RESET_METHOD_MODE1))
5272                 return -EINVAL;
5273
5274         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5275                         adev->pdev->bus->number, 1);
5276         if (!p)
5277                 return -ENODEV;
5278
5279         expires = pm_runtime_autosuspend_expiration(&(p->dev));
5280         if (!expires)
5281                 /*
5282                  * If we cannot get the audio device autosuspend delay,
5283                  * a fixed 4S interval will be used. Considering 3S is
5284                  * the audio controller default autosuspend delay setting.
5285                  * 4S used here is guaranteed to cover that.
5286                  */
5287                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5288
5289         while (!pm_runtime_status_suspended(&(p->dev))) {
5290                 if (!pm_runtime_suspend(&(p->dev)))
5291                         break;
5292
5293                 if (expires < ktime_get_mono_fast_ns()) {
5294                         dev_warn(adev->dev, "failed to suspend display audio\n");
5295                         pci_dev_put(p);
5296                         /* TODO: abort the succeeding gpu reset? */
5297                         return -ETIMEDOUT;
5298                 }
5299         }
5300
5301         pm_runtime_disable(&(p->dev));
5302
5303         pci_dev_put(p);
5304         return 0;
5305 }
5306
5307 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5308 {
5309         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5310
5311 #if defined(CONFIG_DEBUG_FS)
5312         if (!amdgpu_sriov_vf(adev))
5313                 cancel_work(&adev->reset_work);
5314 #endif
5315
5316         if (adev->kfd.dev)
5317                 cancel_work(&adev->kfd.reset_work);
5318
5319         if (amdgpu_sriov_vf(adev))
5320                 cancel_work(&adev->virt.flr_work);
5321
5322         if (con && adev->ras_enabled)
5323                 cancel_work(&con->recovery_work);
5324
5325 }
5326
5327 /**
5328  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5329  *
5330  * @adev: amdgpu_device pointer
5331  * @job: which job trigger hang
5332  * @reset_context: amdgpu reset context pointer
5333  *
5334  * Attempt to reset the GPU if it has hung (all asics).
5335  * Attempt to do soft-reset or full-reset and reinitialize Asic
5336  * Returns 0 for success or an error on failure.
5337  */
5338
5339 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5340                               struct amdgpu_job *job,
5341                               struct amdgpu_reset_context *reset_context)
5342 {
5343         struct list_head device_list, *device_list_handle =  NULL;
5344         bool job_signaled = false;
5345         struct amdgpu_hive_info *hive = NULL;
5346         struct amdgpu_device *tmp_adev = NULL;
5347         int i, r = 0;
5348         bool need_emergency_restart = false;
5349         bool audio_suspended = false;
5350         bool gpu_reset_for_dev_remove = false;
5351
5352         gpu_reset_for_dev_remove =
5353                         test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5354                                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5355
5356         /*
5357          * Special case: RAS triggered and full reset isn't supported
5358          */
5359         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5360
5361         /*
5362          * Flush RAM to disk so that after reboot
5363          * the user can read log and see why the system rebooted.
5364          */
5365         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5366                 DRM_WARN("Emergency reboot.");
5367
5368                 ksys_sync_helper();
5369                 emergency_restart();
5370         }
5371
5372         dev_info(adev->dev, "GPU %s begin!\n",
5373                 need_emergency_restart ? "jobs stop":"reset");
5374
5375         if (!amdgpu_sriov_vf(adev))
5376                 hive = amdgpu_get_xgmi_hive(adev);
5377         if (hive)
5378                 mutex_lock(&hive->hive_lock);
5379
5380         reset_context->job = job;
5381         reset_context->hive = hive;
5382         /*
5383          * Build list of devices to reset.
5384          * In case we are in XGMI hive mode, resort the device list
5385          * to put adev in the 1st position.
5386          */
5387         INIT_LIST_HEAD(&device_list);
5388         if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5389                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5390                         list_add_tail(&tmp_adev->reset_list, &device_list);
5391                         if (gpu_reset_for_dev_remove && adev->shutdown)
5392                                 tmp_adev->shutdown = true;
5393                 }
5394                 if (!list_is_first(&adev->reset_list, &device_list))
5395                         list_rotate_to_front(&adev->reset_list, &device_list);
5396                 device_list_handle = &device_list;
5397         } else {
5398                 list_add_tail(&adev->reset_list, &device_list);
5399                 device_list_handle = &device_list;
5400         }
5401
5402         /* We need to lock reset domain only once both for XGMI and single device */
5403         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5404                                     reset_list);
5405         amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5406
5407         /* block all schedulers and reset given job's ring */
5408         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5409
5410                 amdgpu_device_set_mp1_state(tmp_adev);
5411
5412                 /*
5413                  * Try to put the audio codec into suspend state
5414                  * before gpu reset started.
5415                  *
5416                  * Due to the power domain of the graphics device
5417                  * is shared with AZ power domain. Without this,
5418                  * we may change the audio hardware from behind
5419                  * the audio driver's back. That will trigger
5420                  * some audio codec errors.
5421                  */
5422                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5423                         audio_suspended = true;
5424
5425                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5426
5427                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5428
5429                 if (!amdgpu_sriov_vf(tmp_adev))
5430                         amdgpu_amdkfd_pre_reset(tmp_adev);
5431
5432                 /*
5433                  * Mark these ASICs to be reseted as untracked first
5434                  * And add them back after reset completed
5435                  */
5436                 amdgpu_unregister_gpu_instance(tmp_adev);
5437
5438                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5439
5440                 /* disable ras on ALL IPs */
5441                 if (!need_emergency_restart &&
5442                       amdgpu_device_ip_need_full_reset(tmp_adev))
5443                         amdgpu_ras_suspend(tmp_adev);
5444
5445                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5446                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5447
5448                         if (!ring || !ring->sched.thread)
5449                                 continue;
5450
5451                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5452
5453                         if (need_emergency_restart)
5454                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5455                 }
5456                 atomic_inc(&tmp_adev->gpu_reset_counter);
5457         }
5458
5459         if (need_emergency_restart)
5460                 goto skip_sched_resume;
5461
5462         /*
5463          * Must check guilty signal here since after this point all old
5464          * HW fences are force signaled.
5465          *
5466          * job->base holds a reference to parent fence
5467          */
5468         if (job && dma_fence_is_signaled(&job->hw_fence)) {
5469                 job_signaled = true;
5470                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5471                 goto skip_hw_reset;
5472         }
5473
5474 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5475         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5476                 if (gpu_reset_for_dev_remove) {
5477                         /* Workaroud for ASICs need to disable SMC first */
5478                         amdgpu_device_smu_fini_early(tmp_adev);
5479                 }
5480                 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5481                 /*TODO Should we stop ?*/
5482                 if (r) {
5483                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5484                                   r, adev_to_drm(tmp_adev)->unique);
5485                         tmp_adev->asic_reset_res = r;
5486                 }
5487
5488                 /*
5489                  * Drop all pending non scheduler resets. Scheduler resets
5490                  * were already dropped during drm_sched_stop
5491                  */
5492                 amdgpu_device_stop_pending_resets(tmp_adev);
5493         }
5494
5495         /* Actual ASIC resets if needed.*/
5496         /* Host driver will handle XGMI hive reset for SRIOV */
5497         if (amdgpu_sriov_vf(adev)) {
5498                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5499                 if (r)
5500                         adev->asic_reset_res = r;
5501
5502                 /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5503                 if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
5504                             IP_VERSION(9, 4, 2) ||
5505                     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
5506                         amdgpu_ras_resume(adev);
5507         } else {
5508                 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5509                 if (r && r == -EAGAIN)
5510                         goto retry;
5511
5512                 if (!r && gpu_reset_for_dev_remove)
5513                         goto recover_end;
5514         }
5515
5516 skip_hw_reset:
5517
5518         /* Post ASIC reset for all devs .*/
5519         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5520
5521                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5522                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5523
5524                         if (!ring || !ring->sched.thread)
5525                                 continue;
5526
5527                         drm_sched_start(&ring->sched, true);
5528                 }
5529
5530                 if (adev->enable_mes &&
5531                     amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3))
5532                         amdgpu_mes_self_test(tmp_adev);
5533
5534                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5535                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5536
5537                 if (tmp_adev->asic_reset_res)
5538                         r = tmp_adev->asic_reset_res;
5539
5540                 tmp_adev->asic_reset_res = 0;
5541
5542                 if (r) {
5543                         /* bad news, how to tell it to userspace ? */
5544                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5545                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5546                 } else {
5547                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5548                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5549                                 DRM_WARN("smart shift update failed\n");
5550                 }
5551         }
5552
5553 skip_sched_resume:
5554         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5555                 /* unlock kfd: SRIOV would do it separately */
5556                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5557                         amdgpu_amdkfd_post_reset(tmp_adev);
5558
5559                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5560                  * need to bring up kfd here if it's not be initialized before
5561                  */
5562                 if (!adev->kfd.init_complete)
5563                         amdgpu_amdkfd_device_init(adev);
5564
5565                 if (audio_suspended)
5566                         amdgpu_device_resume_display_audio(tmp_adev);
5567
5568                 amdgpu_device_unset_mp1_state(tmp_adev);
5569
5570                 amdgpu_ras_set_error_query_ready(tmp_adev, true);
5571         }
5572
5573 recover_end:
5574         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5575                                             reset_list);
5576         amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5577
5578         if (hive) {
5579                 mutex_unlock(&hive->hive_lock);
5580                 amdgpu_put_xgmi_hive(hive);
5581         }
5582
5583         if (r)
5584                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5585
5586         atomic_set(&adev->reset_domain->reset_res, r);
5587         return r;
5588 }
5589
5590 /**
5591  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5592  *
5593  * @adev: amdgpu_device pointer
5594  *
5595  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5596  * and lanes) of the slot the device is in. Handles APUs and
5597  * virtualized environments where PCIE config space may not be available.
5598  */
5599 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5600 {
5601         struct pci_dev *pdev;
5602         enum pci_bus_speed speed_cap, platform_speed_cap;
5603         enum pcie_link_width platform_link_width;
5604
5605         if (amdgpu_pcie_gen_cap)
5606                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5607
5608         if (amdgpu_pcie_lane_cap)
5609                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5610
5611         /* covers APUs as well */
5612         if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5613                 if (adev->pm.pcie_gen_mask == 0)
5614                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5615                 if (adev->pm.pcie_mlw_mask == 0)
5616                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5617                 return;
5618         }
5619
5620         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5621                 return;
5622
5623         pcie_bandwidth_available(adev->pdev, NULL,
5624                                  &platform_speed_cap, &platform_link_width);
5625
5626         if (adev->pm.pcie_gen_mask == 0) {
5627                 /* asic caps */
5628                 pdev = adev->pdev;
5629                 speed_cap = pcie_get_speed_cap(pdev);
5630                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5631                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5632                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5633                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5634                 } else {
5635                         if (speed_cap == PCIE_SPEED_32_0GT)
5636                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5637                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5638                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5639                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5640                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5641                         else if (speed_cap == PCIE_SPEED_16_0GT)
5642                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5643                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5644                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5645                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5646                         else if (speed_cap == PCIE_SPEED_8_0GT)
5647                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5648                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5649                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5650                         else if (speed_cap == PCIE_SPEED_5_0GT)
5651                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5652                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5653                         else
5654                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5655                 }
5656                 /* platform caps */
5657                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5658                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5659                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5660                 } else {
5661                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5662                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5663                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5664                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5665                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5666                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5667                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5668                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5669                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5670                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5671                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5672                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5673                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5674                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5675                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5676                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5677                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5678                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5679                         else
5680                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5681
5682                 }
5683         }
5684         if (adev->pm.pcie_mlw_mask == 0) {
5685                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5686                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5687                 } else {
5688                         switch (platform_link_width) {
5689                         case PCIE_LNK_X32:
5690                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5691                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5692                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5693                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5694                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5695                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5696                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5697                                 break;
5698                         case PCIE_LNK_X16:
5699                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5700                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5701                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5702                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5703                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5704                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5705                                 break;
5706                         case PCIE_LNK_X12:
5707                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5708                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5709                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5710                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5711                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5712                                 break;
5713                         case PCIE_LNK_X8:
5714                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5715                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5716                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5717                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5718                                 break;
5719                         case PCIE_LNK_X4:
5720                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5721                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5722                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5723                                 break;
5724                         case PCIE_LNK_X2:
5725                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5726                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5727                                 break;
5728                         case PCIE_LNK_X1:
5729                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5730                                 break;
5731                         default:
5732                                 break;
5733                         }
5734                 }
5735         }
5736 }
5737
5738 /**
5739  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5740  *
5741  * @adev: amdgpu_device pointer
5742  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5743  *
5744  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5745  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5746  * @peer_adev.
5747  */
5748 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5749                                       struct amdgpu_device *peer_adev)
5750 {
5751 #ifdef CONFIG_HSA_AMD_P2P
5752         uint64_t address_mask = peer_adev->dev->dma_mask ?
5753                 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5754         resource_size_t aper_limit =
5755                 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5756         bool p2p_access =
5757                 !adev->gmc.xgmi.connected_to_cpu &&
5758                 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5759
5760         return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5761                 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5762                 !(adev->gmc.aper_base & address_mask ||
5763                   aper_limit & address_mask));
5764 #else
5765         return false;
5766 #endif
5767 }
5768
5769 int amdgpu_device_baco_enter(struct drm_device *dev)
5770 {
5771         struct amdgpu_device *adev = drm_to_adev(dev);
5772         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5773
5774         if (!amdgpu_device_supports_baco(dev))
5775                 return -ENOTSUPP;
5776
5777         if (ras && adev->ras_enabled &&
5778             adev->nbio.funcs->enable_doorbell_interrupt)
5779                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5780
5781         return amdgpu_dpm_baco_enter(adev);
5782 }
5783
5784 int amdgpu_device_baco_exit(struct drm_device *dev)
5785 {
5786         struct amdgpu_device *adev = drm_to_adev(dev);
5787         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5788         int ret = 0;
5789
5790         if (!amdgpu_device_supports_baco(dev))
5791                 return -ENOTSUPP;
5792
5793         ret = amdgpu_dpm_baco_exit(adev);
5794         if (ret)
5795                 return ret;
5796
5797         if (ras && adev->ras_enabled &&
5798             adev->nbio.funcs->enable_doorbell_interrupt)
5799                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5800
5801         if (amdgpu_passthrough(adev) &&
5802             adev->nbio.funcs->clear_doorbell_interrupt)
5803                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5804
5805         return 0;
5806 }
5807
5808 /**
5809  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5810  * @pdev: PCI device struct
5811  * @state: PCI channel state
5812  *
5813  * Description: Called when a PCI error is detected.
5814  *
5815  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5816  */
5817 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5818 {
5819         struct drm_device *dev = pci_get_drvdata(pdev);
5820         struct amdgpu_device *adev = drm_to_adev(dev);
5821         int i;
5822
5823         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5824
5825         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5826                 DRM_WARN("No support for XGMI hive yet...");
5827                 return PCI_ERS_RESULT_DISCONNECT;
5828         }
5829
5830         adev->pci_channel_state = state;
5831
5832         switch (state) {
5833         case pci_channel_io_normal:
5834                 return PCI_ERS_RESULT_CAN_RECOVER;
5835         /* Fatal error, prepare for slot reset */
5836         case pci_channel_io_frozen:
5837                 /*
5838                  * Locking adev->reset_domain->sem will prevent any external access
5839                  * to GPU during PCI error recovery
5840                  */
5841                 amdgpu_device_lock_reset_domain(adev->reset_domain);
5842                 amdgpu_device_set_mp1_state(adev);
5843
5844                 /*
5845                  * Block any work scheduling as we do for regular GPU reset
5846                  * for the duration of the recovery
5847                  */
5848                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5849                         struct amdgpu_ring *ring = adev->rings[i];
5850
5851                         if (!ring || !ring->sched.thread)
5852                                 continue;
5853
5854                         drm_sched_stop(&ring->sched, NULL);
5855                 }
5856                 atomic_inc(&adev->gpu_reset_counter);
5857                 return PCI_ERS_RESULT_NEED_RESET;
5858         case pci_channel_io_perm_failure:
5859                 /* Permanent error, prepare for device removal */
5860                 return PCI_ERS_RESULT_DISCONNECT;
5861         }
5862
5863         return PCI_ERS_RESULT_NEED_RESET;
5864 }
5865
5866 /**
5867  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5868  * @pdev: pointer to PCI device
5869  */
5870 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5871 {
5872
5873         DRM_INFO("PCI error: mmio enabled callback!!\n");
5874
5875         /* TODO - dump whatever for debugging purposes */
5876
5877         /* This called only if amdgpu_pci_error_detected returns
5878          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5879          * works, no need to reset slot.
5880          */
5881
5882         return PCI_ERS_RESULT_RECOVERED;
5883 }
5884
5885 /**
5886  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5887  * @pdev: PCI device struct
5888  *
5889  * Description: This routine is called by the pci error recovery
5890  * code after the PCI slot has been reset, just before we
5891  * should resume normal operations.
5892  */
5893 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5894 {
5895         struct drm_device *dev = pci_get_drvdata(pdev);
5896         struct amdgpu_device *adev = drm_to_adev(dev);
5897         int r, i;
5898         struct amdgpu_reset_context reset_context;
5899         u32 memsize;
5900         struct list_head device_list;
5901
5902         DRM_INFO("PCI error: slot reset callback!!\n");
5903
5904         memset(&reset_context, 0, sizeof(reset_context));
5905
5906         INIT_LIST_HEAD(&device_list);
5907         list_add_tail(&adev->reset_list, &device_list);
5908
5909         /* wait for asic to come out of reset */
5910         msleep(500);
5911
5912         /* Restore PCI confspace */
5913         amdgpu_device_load_pci_state(pdev);
5914
5915         /* confirm  ASIC came out of reset */
5916         for (i = 0; i < adev->usec_timeout; i++) {
5917                 memsize = amdgpu_asic_get_config_memsize(adev);
5918
5919                 if (memsize != 0xffffffff)
5920                         break;
5921                 udelay(1);
5922         }
5923         if (memsize == 0xffffffff) {
5924                 r = -ETIME;
5925                 goto out;
5926         }
5927
5928         reset_context.method = AMD_RESET_METHOD_NONE;
5929         reset_context.reset_req_dev = adev;
5930         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5931         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5932
5933         adev->no_hw_access = true;
5934         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5935         adev->no_hw_access = false;
5936         if (r)
5937                 goto out;
5938
5939         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5940
5941 out:
5942         if (!r) {
5943                 if (amdgpu_device_cache_pci_state(adev->pdev))
5944                         pci_restore_state(adev->pdev);
5945
5946                 DRM_INFO("PCIe error recovery succeeded\n");
5947         } else {
5948                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5949                 amdgpu_device_unset_mp1_state(adev);
5950                 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5951         }
5952
5953         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5954 }
5955
5956 /**
5957  * amdgpu_pci_resume() - resume normal ops after PCI reset
5958  * @pdev: pointer to PCI device
5959  *
5960  * Called when the error recovery driver tells us that its
5961  * OK to resume normal operation.
5962  */
5963 void amdgpu_pci_resume(struct pci_dev *pdev)
5964 {
5965         struct drm_device *dev = pci_get_drvdata(pdev);
5966         struct amdgpu_device *adev = drm_to_adev(dev);
5967         int i;
5968
5969
5970         DRM_INFO("PCI error: resume callback!!\n");
5971
5972         /* Only continue execution for the case of pci_channel_io_frozen */
5973         if (adev->pci_channel_state != pci_channel_io_frozen)
5974                 return;
5975
5976         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5977                 struct amdgpu_ring *ring = adev->rings[i];
5978
5979                 if (!ring || !ring->sched.thread)
5980                         continue;
5981
5982                 drm_sched_start(&ring->sched, true);
5983         }
5984
5985         amdgpu_device_unset_mp1_state(adev);
5986         amdgpu_device_unlock_reset_domain(adev->reset_domain);
5987 }
5988
5989 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5990 {
5991         struct drm_device *dev = pci_get_drvdata(pdev);
5992         struct amdgpu_device *adev = drm_to_adev(dev);
5993         int r;
5994
5995         r = pci_save_state(pdev);
5996         if (!r) {
5997                 kfree(adev->pci_state);
5998
5999                 adev->pci_state = pci_store_saved_state(pdev);
6000
6001                 if (!adev->pci_state) {
6002                         DRM_ERROR("Failed to store PCI saved state");
6003                         return false;
6004                 }
6005         } else {
6006                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
6007                 return false;
6008         }
6009
6010         return true;
6011 }
6012
6013 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
6014 {
6015         struct drm_device *dev = pci_get_drvdata(pdev);
6016         struct amdgpu_device *adev = drm_to_adev(dev);
6017         int r;
6018
6019         if (!adev->pci_state)
6020                 return false;
6021
6022         r = pci_load_saved_state(pdev, adev->pci_state);
6023
6024         if (!r) {
6025                 pci_restore_state(pdev);
6026         } else {
6027                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
6028                 return false;
6029         }
6030
6031         return true;
6032 }
6033
6034 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6035                 struct amdgpu_ring *ring)
6036 {
6037 #ifdef CONFIG_X86_64
6038         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6039                 return;
6040 #endif
6041         if (adev->gmc.xgmi.connected_to_cpu)
6042                 return;
6043
6044         if (ring && ring->funcs->emit_hdp_flush)
6045                 amdgpu_ring_emit_hdp_flush(ring);
6046         else
6047                 amdgpu_asic_flush_hdp(adev, ring);
6048 }
6049
6050 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6051                 struct amdgpu_ring *ring)
6052 {
6053 #ifdef CONFIG_X86_64
6054         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6055                 return;
6056 #endif
6057         if (adev->gmc.xgmi.connected_to_cpu)
6058                 return;
6059
6060         amdgpu_asic_invalidate_hdp(adev, ring);
6061 }
6062
6063 int amdgpu_in_reset(struct amdgpu_device *adev)
6064 {
6065         return atomic_read(&adev->reset_domain->in_gpu_reset);
6066 }
6067
6068 /**
6069  * amdgpu_device_halt() - bring hardware to some kind of halt state
6070  *
6071  * @adev: amdgpu_device pointer
6072  *
6073  * Bring hardware to some kind of halt state so that no one can touch it
6074  * any more. It will help to maintain error context when error occurred.
6075  * Compare to a simple hang, the system will keep stable at least for SSH
6076  * access. Then it should be trivial to inspect the hardware state and
6077  * see what's going on. Implemented as following:
6078  *
6079  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6080  *    clears all CPU mappings to device, disallows remappings through page faults
6081  * 2. amdgpu_irq_disable_all() disables all interrupts
6082  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6083  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6084  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6085  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6086  *    flush any in flight DMA operations
6087  */
6088 void amdgpu_device_halt(struct amdgpu_device *adev)
6089 {
6090         struct pci_dev *pdev = adev->pdev;
6091         struct drm_device *ddev = adev_to_drm(adev);
6092
6093         amdgpu_xcp_dev_unplug(adev);
6094         drm_dev_unplug(ddev);
6095
6096         amdgpu_irq_disable_all(adev);
6097
6098         amdgpu_fence_driver_hw_fini(adev);
6099
6100         adev->no_hw_access = true;
6101
6102         amdgpu_device_unmap_mmio(adev);
6103
6104         pci_disable_device(pdev);
6105         pci_wait_for_pending_transaction(pdev);
6106 }
6107
6108 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6109                                 u32 reg)
6110 {
6111         unsigned long flags, address, data;
6112         u32 r;
6113
6114         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6115         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6116
6117         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6118         WREG32(address, reg * 4);
6119         (void)RREG32(address);
6120         r = RREG32(data);
6121         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6122         return r;
6123 }
6124
6125 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6126                                 u32 reg, u32 v)
6127 {
6128         unsigned long flags, address, data;
6129
6130         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6131         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6132
6133         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6134         WREG32(address, reg * 4);
6135         (void)RREG32(address);
6136         WREG32(data, v);
6137         (void)RREG32(data);
6138         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6139 }
6140
6141 /**
6142  * amdgpu_device_switch_gang - switch to a new gang
6143  * @adev: amdgpu_device pointer
6144  * @gang: the gang to switch to
6145  *
6146  * Try to switch to a new gang.
6147  * Returns: NULL if we switched to the new gang or a reference to the current
6148  * gang leader.
6149  */
6150 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6151                                             struct dma_fence *gang)
6152 {
6153         struct dma_fence *old = NULL;
6154
6155         do {
6156                 dma_fence_put(old);
6157                 rcu_read_lock();
6158                 old = dma_fence_get_rcu_safe(&adev->gang_submit);
6159                 rcu_read_unlock();
6160
6161                 if (old == gang)
6162                         break;
6163
6164                 if (!dma_fence_is_signaled(old))
6165                         return old;
6166
6167         } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6168                          old, gang) != old);
6169
6170         dma_fence_put(old);
6171         return NULL;
6172 }
6173
6174 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6175 {
6176         switch (adev->asic_type) {
6177 #ifdef CONFIG_DRM_AMDGPU_SI
6178         case CHIP_HAINAN:
6179 #endif
6180         case CHIP_TOPAZ:
6181                 /* chips with no display hardware */
6182                 return false;
6183 #ifdef CONFIG_DRM_AMDGPU_SI
6184         case CHIP_TAHITI:
6185         case CHIP_PITCAIRN:
6186         case CHIP_VERDE:
6187         case CHIP_OLAND:
6188 #endif
6189 #ifdef CONFIG_DRM_AMDGPU_CIK
6190         case CHIP_BONAIRE:
6191         case CHIP_HAWAII:
6192         case CHIP_KAVERI:
6193         case CHIP_KABINI:
6194         case CHIP_MULLINS:
6195 #endif
6196         case CHIP_TONGA:
6197         case CHIP_FIJI:
6198         case CHIP_POLARIS10:
6199         case CHIP_POLARIS11:
6200         case CHIP_POLARIS12:
6201         case CHIP_VEGAM:
6202         case CHIP_CARRIZO:
6203         case CHIP_STONEY:
6204                 /* chips with display hardware */
6205                 return true;
6206         default:
6207                 /* IP discovery */
6208                 if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
6209                     (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6210                         return false;
6211                 return true;
6212         }
6213 }
6214
6215 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6216                 uint32_t inst, uint32_t reg_addr, char reg_name[],
6217                 uint32_t expected_value, uint32_t mask)
6218 {
6219         uint32_t ret = 0;
6220         uint32_t old_ = 0;
6221         uint32_t tmp_ = RREG32(reg_addr);
6222         uint32_t loop = adev->usec_timeout;
6223
6224         while ((tmp_ & (mask)) != (expected_value)) {
6225                 if (old_ != tmp_) {
6226                         loop = adev->usec_timeout;
6227                         old_ = tmp_;
6228                 } else
6229                         udelay(1);
6230                 tmp_ = RREG32(reg_addr);
6231                 loop--;
6232                 if (!loop) {
6233                         DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
6234                                   inst, reg_name, (uint32_t)expected_value,
6235                                   (uint32_t)(tmp_ & (mask)));
6236                         ret = -ETIMEDOUT;
6237                         break;
6238                 }
6239         }
6240         return ret;
6241 }