drm/amdgpu: impl sriov detection for vega10
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / soc15.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include "drmP.h"
27 #include "amdgpu.h"
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
33 #include "amdgpu_psp.h"
34 #include "atom.h"
35 #include "amd_pcie.h"
36
37 #include "vega10/soc15ip.h"
38 #include "vega10/UVD/uvd_7_0_offset.h"
39 #include "vega10/GC/gc_9_0_offset.h"
40 #include "vega10/GC/gc_9_0_sh_mask.h"
41 #include "vega10/SDMA0/sdma0_4_0_offset.h"
42 #include "vega10/SDMA1/sdma1_4_0_offset.h"
43 #include "vega10/HDP/hdp_4_0_offset.h"
44 #include "vega10/HDP/hdp_4_0_sh_mask.h"
45 #include "vega10/MP/mp_9_0_offset.h"
46 #include "vega10/MP/mp_9_0_sh_mask.h"
47 #include "vega10/SMUIO/smuio_9_0_offset.h"
48 #include "vega10/SMUIO/smuio_9_0_sh_mask.h"
49
50 #include "soc15.h"
51 #include "soc15_common.h"
52 #include "gfx_v9_0.h"
53 #include "gmc_v9_0.h"
54 #include "gfxhub_v1_0.h"
55 #include "mmhub_v1_0.h"
56 #include "vega10_ih.h"
57 #include "sdma_v4_0.h"
58 #include "uvd_v7_0.h"
59 #include "vce_v4_0.h"
60 #include "amdgpu_powerplay.h"
61
62 MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
63
64 #define mmFabricConfigAccessControl                                                                    0x0410
65 #define mmFabricConfigAccessControl_BASE_IDX                                                           0
66 #define mmFabricConfigAccessControl_DEFAULT                                      0x00000000
67 //FabricConfigAccessControl
68 #define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT                                                     0x0
69 #define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT                                                0x1
70 #define FabricConfigAccessControl__CfgRegInstID__SHIFT                                                        0x10
71 #define FabricConfigAccessControl__CfgRegInstAccEn_MASK                                                       0x00000001L
72 #define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK                                                  0x00000002L
73 #define FabricConfigAccessControl__CfgRegInstID_MASK                                                          0x00FF0000L
74
75
76 #define mmDF_PIE_AON0_DfGlobalClkGater                                                                 0x00fc
77 #define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX                                                        0
78 //DF_PIE_AON0_DfGlobalClkGater
79 #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT                                                         0x0
80 #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK                                                           0x0000000FL
81
82 enum {
83         DF_MGCG_DISABLE = 0,
84         DF_MGCG_ENABLE_00_CYCLE_DELAY =1,
85         DF_MGCG_ENABLE_01_CYCLE_DELAY =2,
86         DF_MGCG_ENABLE_15_CYCLE_DELAY =13,
87         DF_MGCG_ENABLE_31_CYCLE_DELAY =14,
88         DF_MGCG_ENABLE_63_CYCLE_DELAY =15
89 };
90
91 #define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
92 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
93 #define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
94 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
95
96 /*
97  * Indirect registers accessor
98  */
99 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
100 {
101         unsigned long flags, address, data;
102         u32 r;
103         struct nbio_pcie_index_data *nbio_pcie_id;
104
105         if (adev->asic_type == CHIP_VEGA10)
106                 nbio_pcie_id = &nbio_v6_1_pcie_index_data;
107
108         address = nbio_pcie_id->index_offset;
109         data = nbio_pcie_id->data_offset;
110
111         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
112         WREG32(address, reg);
113         (void)RREG32(address);
114         r = RREG32(data);
115         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
116         return r;
117 }
118
119 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
120 {
121         unsigned long flags, address, data;
122         struct nbio_pcie_index_data *nbio_pcie_id;
123
124         if (adev->asic_type == CHIP_VEGA10)
125                 nbio_pcie_id = &nbio_v6_1_pcie_index_data;
126
127         address = nbio_pcie_id->index_offset;
128         data = nbio_pcie_id->data_offset;
129
130         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
131         WREG32(address, reg);
132         (void)RREG32(address);
133         WREG32(data, v);
134         (void)RREG32(data);
135         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
136 }
137
138 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
139 {
140         unsigned long flags, address, data;
141         u32 r;
142
143         address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
144         data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
145
146         spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
147         WREG32(address, ((reg) & 0x1ff));
148         r = RREG32(data);
149         spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
150         return r;
151 }
152
153 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
154 {
155         unsigned long flags, address, data;
156
157         address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
158         data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
159
160         spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
161         WREG32(address, ((reg) & 0x1ff));
162         WREG32(data, (v));
163         spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
164 }
165
166 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
167 {
168         unsigned long flags, address, data;
169         u32 r;
170
171         address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
172         data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
173
174         spin_lock_irqsave(&adev->didt_idx_lock, flags);
175         WREG32(address, (reg));
176         r = RREG32(data);
177         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
178         return r;
179 }
180
181 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
182 {
183         unsigned long flags, address, data;
184
185         address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
186         data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
187
188         spin_lock_irqsave(&adev->didt_idx_lock, flags);
189         WREG32(address, (reg));
190         WREG32(data, (v));
191         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
192 }
193
194 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
195 {
196         return nbio_v6_1_get_memsize(adev);
197 }
198
199 static const u32 vega10_golden_init[] =
200 {
201 };
202
203 static void soc15_init_golden_registers(struct amdgpu_device *adev)
204 {
205         /* Some of the registers might be dependent on GRBM_GFX_INDEX */
206         mutex_lock(&adev->grbm_idx_mutex);
207
208         switch (adev->asic_type) {
209         case CHIP_VEGA10:
210                 amdgpu_program_register_sequence(adev,
211                                                  vega10_golden_init,
212                                                  (const u32)ARRAY_SIZE(vega10_golden_init));
213                 break;
214         default:
215                 break;
216         }
217         mutex_unlock(&adev->grbm_idx_mutex);
218 }
219 static u32 soc15_get_xclk(struct amdgpu_device *adev)
220 {
221         if (adev->asic_type == CHIP_VEGA10)
222                 return adev->clock.spll.reference_freq/4;
223         else
224                 return adev->clock.spll.reference_freq;
225 }
226
227
228 void soc15_grbm_select(struct amdgpu_device *adev,
229                      u32 me, u32 pipe, u32 queue, u32 vmid)
230 {
231         u32 grbm_gfx_cntl = 0;
232         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
233         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
234         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
235         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
236
237         WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
238 }
239
240 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
241 {
242         /* todo */
243 }
244
245 static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
246 {
247         /* todo */
248         return false;
249 }
250
251 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
252                                      u8 *bios, u32 length_bytes)
253 {
254         u32 *dw_ptr;
255         u32 i, length_dw;
256
257         if (bios == NULL)
258                 return false;
259         if (length_bytes == 0)
260                 return false;
261         /* APU vbios image is part of sbios image */
262         if (adev->flags & AMD_IS_APU)
263                 return false;
264
265         dw_ptr = (u32 *)bios;
266         length_dw = ALIGN(length_bytes, 4) / 4;
267
268         /* set rom index to 0 */
269         WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
270         /* read out the rom data */
271         for (i = 0; i < length_dw; i++)
272                 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
273
274         return true;
275 }
276
277 static struct amdgpu_allowed_register_entry vega10_allowed_read_registers[] = {
278         /* todo */
279 };
280
281 static struct amdgpu_allowed_register_entry soc15_allowed_read_registers[] = {
282         { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS), false},
283         { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2), false},
284         { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0), false},
285         { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1), false},
286         { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2), false},
287         { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3), false},
288         { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_STATUS_REG), false},
289         { SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_STATUS_REG), false},
290         { SOC15_REG_OFFSET(GC, 0, mmCP_STAT), false},
291         { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1), false},
292         { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2), false},
293         { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3), false},
294         { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT), false},
295         { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1), false},
296         { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS), false},
297         { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT), false},
298         { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1), false},
299         { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS), false},
300         { SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), false},
301         { SOC15_REG_OFFSET(GC, 0, mmCC_RB_BACKEND_DISABLE), false, true},
302         { SOC15_REG_OFFSET(GC, 0, mmGC_USER_RB_BACKEND_DISABLE), false, true},
303         { SOC15_REG_OFFSET(GC, 0, mmGB_BACKEND_MAP), false, false},
304 };
305
306 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
307                                          u32 sh_num, u32 reg_offset)
308 {
309         uint32_t val;
310
311         mutex_lock(&adev->grbm_idx_mutex);
312         if (se_num != 0xffffffff || sh_num != 0xffffffff)
313                 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
314
315         val = RREG32(reg_offset);
316
317         if (se_num != 0xffffffff || sh_num != 0xffffffff)
318                 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
319         mutex_unlock(&adev->grbm_idx_mutex);
320         return val;
321 }
322
323 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
324                             u32 sh_num, u32 reg_offset, u32 *value)
325 {
326         struct amdgpu_allowed_register_entry *asic_register_table = NULL;
327         struct amdgpu_allowed_register_entry *asic_register_entry;
328         uint32_t size, i;
329
330         *value = 0;
331         switch (adev->asic_type) {
332         case CHIP_VEGA10:
333                 asic_register_table = vega10_allowed_read_registers;
334                 size = ARRAY_SIZE(vega10_allowed_read_registers);
335                 break;
336         default:
337                 return -EINVAL;
338         }
339
340         if (asic_register_table) {
341                 for (i = 0; i < size; i++) {
342                         asic_register_entry = asic_register_table + i;
343                         if (reg_offset != asic_register_entry->reg_offset)
344                                 continue;
345                         if (!asic_register_entry->untouched)
346                                 *value = asic_register_entry->grbm_indexed ?
347                                         soc15_read_indexed_register(adev, se_num,
348                                                                  sh_num, reg_offset) :
349                                         RREG32(reg_offset);
350                         return 0;
351                 }
352         }
353
354         for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
355                 if (reg_offset != soc15_allowed_read_registers[i].reg_offset)
356                         continue;
357
358                 if (!soc15_allowed_read_registers[i].untouched)
359                         *value = soc15_allowed_read_registers[i].grbm_indexed ?
360                                 soc15_read_indexed_register(adev, se_num,
361                                                          sh_num, reg_offset) :
362                                 RREG32(reg_offset);
363                 return 0;
364         }
365         return -EINVAL;
366 }
367
368 static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
369 {
370         u32 i;
371
372         dev_info(adev->dev, "GPU pci config reset\n");
373
374         /* disable BM */
375         pci_clear_master(adev->pdev);
376         /* reset */
377         amdgpu_pci_config_reset(adev);
378
379         udelay(100);
380
381         /* wait for asic to come out of reset */
382         for (i = 0; i < adev->usec_timeout; i++) {
383                 if (nbio_v6_1_get_memsize(adev) != 0xffffffff)
384                         break;
385                 udelay(1);
386         }
387
388 }
389
390 static int soc15_asic_reset(struct amdgpu_device *adev)
391 {
392         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
393
394         soc15_gpu_pci_config_reset(adev);
395
396         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
397
398         return 0;
399 }
400
401 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
402                         u32 cntl_reg, u32 status_reg)
403 {
404         return 0;
405 }*/
406
407 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
408 {
409         /*int r;
410
411         r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
412         if (r)
413                 return r;
414
415         r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
416         */
417         return 0;
418 }
419
420 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
421 {
422         /* todo */
423
424         return 0;
425 }
426
427 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
428 {
429         if (pci_is_root_bus(adev->pdev->bus))
430                 return;
431
432         if (amdgpu_pcie_gen2 == 0)
433                 return;
434
435         if (adev->flags & AMD_IS_APU)
436                 return;
437
438         if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
439                                         CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
440                 return;
441
442         /* todo */
443 }
444
445 static void soc15_program_aspm(struct amdgpu_device *adev)
446 {
447
448         if (amdgpu_aspm == 0)
449                 return;
450
451         /* todo */
452 }
453
454 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
455                                         bool enable)
456 {
457         nbio_v6_1_enable_doorbell_aperture(adev, enable);
458         nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
459 }
460
461 static const struct amdgpu_ip_block_version vega10_common_ip_block =
462 {
463         .type = AMD_IP_BLOCK_TYPE_COMMON,
464         .major = 2,
465         .minor = 0,
466         .rev = 0,
467         .funcs = &soc15_common_ip_funcs,
468 };
469
470 int soc15_set_ip_blocks(struct amdgpu_device *adev)
471 {
472         nbio_v6_1_detect_hw_virt(adev);
473
474         switch (adev->asic_type) {
475         case CHIP_VEGA10:
476                 amdgpu_ip_block_add(adev, &vega10_common_ip_block);
477                 amdgpu_ip_block_add(adev, &gfxhub_v1_0_ip_block);
478                 amdgpu_ip_block_add(adev, &mmhub_v1_0_ip_block);
479                 amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
480                 amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
481                 amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
482                 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
483                 amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
484                 amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
485                 amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
486                 amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
487                 break;
488         default:
489                 return -EINVAL;
490         }
491
492         return 0;
493 }
494
495 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
496 {
497         return nbio_v6_1_get_rev_id(adev);
498 }
499
500
501 int gmc_v9_0_mc_wait_for_idle(struct amdgpu_device *adev)
502 {
503         /* to be implemented in MC IP*/
504         return 0;
505 }
506
507 static const struct amdgpu_asic_funcs soc15_asic_funcs =
508 {
509         .read_disabled_bios = &soc15_read_disabled_bios,
510         .read_bios_from_rom = &soc15_read_bios_from_rom,
511         .read_register = &soc15_read_register,
512         .reset = &soc15_asic_reset,
513         .set_vga_state = &soc15_vga_set_state,
514         .get_xclk = &soc15_get_xclk,
515         .set_uvd_clocks = &soc15_set_uvd_clocks,
516         .set_vce_clocks = &soc15_set_vce_clocks,
517         .get_config_memsize = &soc15_get_config_memsize,
518 };
519
520 static int soc15_common_early_init(void *handle)
521 {
522         bool psp_enabled = false;
523         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
524
525         adev->smc_rreg = NULL;
526         adev->smc_wreg = NULL;
527         adev->pcie_rreg = &soc15_pcie_rreg;
528         adev->pcie_wreg = &soc15_pcie_wreg;
529         adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
530         adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
531         adev->didt_rreg = &soc15_didt_rreg;
532         adev->didt_wreg = &soc15_didt_wreg;
533
534         adev->asic_funcs = &soc15_asic_funcs;
535
536         if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
537                 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
538                 psp_enabled = true;
539
540         /*
541          * nbio need be used for both sdma and gfx9, but only
542          * initializes once
543          */
544         switch(adev->asic_type) {
545         case CHIP_VEGA10:
546                 nbio_v6_1_init(adev);
547                 break;
548         default:
549                 return -EINVAL;
550         }
551
552         adev->rev_id = soc15_get_rev_id(adev);
553         adev->external_rev_id = 0xFF;
554         switch (adev->asic_type) {
555         case CHIP_VEGA10:
556                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
557                         AMD_CG_SUPPORT_GFX_MGLS |
558                         AMD_CG_SUPPORT_GFX_RLC_LS |
559                         AMD_CG_SUPPORT_GFX_CP_LS |
560                         AMD_CG_SUPPORT_GFX_3D_CGCG |
561                         AMD_CG_SUPPORT_GFX_3D_CGLS |
562                         AMD_CG_SUPPORT_GFX_CGCG |
563                         AMD_CG_SUPPORT_GFX_CGLS |
564                         AMD_CG_SUPPORT_BIF_MGCG |
565                         AMD_CG_SUPPORT_BIF_LS |
566                         AMD_CG_SUPPORT_HDP_LS |
567                         AMD_CG_SUPPORT_DRM_MGCG |
568                         AMD_CG_SUPPORT_DRM_LS |
569                         AMD_CG_SUPPORT_ROM_MGCG |
570                         AMD_CG_SUPPORT_DF_MGCG |
571                         AMD_CG_SUPPORT_SDMA_MGCG |
572                         AMD_CG_SUPPORT_SDMA_LS |
573                         AMD_CG_SUPPORT_MC_MGCG |
574                         AMD_CG_SUPPORT_MC_LS;
575                 adev->pg_flags = 0;
576                 adev->external_rev_id = 0x1;
577                 break;
578         default:
579                 /* FIXME: not supported yet */
580                 return -EINVAL;
581         }
582
583         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
584
585         amdgpu_get_pcie_info(adev);
586
587         return 0;
588 }
589
590 static int soc15_common_sw_init(void *handle)
591 {
592         return 0;
593 }
594
595 static int soc15_common_sw_fini(void *handle)
596 {
597         return 0;
598 }
599
600 static int soc15_common_hw_init(void *handle)
601 {
602         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
603
604         /* move the golden regs per IP block */
605         soc15_init_golden_registers(adev);
606         /* enable pcie gen2/3 link */
607         soc15_pcie_gen3_enable(adev);
608         /* enable aspm */
609         soc15_program_aspm(adev);
610         /* enable the doorbell aperture */
611         soc15_enable_doorbell_aperture(adev, true);
612
613         return 0;
614 }
615
616 static int soc15_common_hw_fini(void *handle)
617 {
618         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
619
620         /* disable the doorbell aperture */
621         soc15_enable_doorbell_aperture(adev, false);
622
623         return 0;
624 }
625
626 static int soc15_common_suspend(void *handle)
627 {
628         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
629
630         return soc15_common_hw_fini(adev);
631 }
632
633 static int soc15_common_resume(void *handle)
634 {
635         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
636
637         return soc15_common_hw_init(adev);
638 }
639
640 static bool soc15_common_is_idle(void *handle)
641 {
642         return true;
643 }
644
645 static int soc15_common_wait_for_idle(void *handle)
646 {
647         return 0;
648 }
649
650 static int soc15_common_soft_reset(void *handle)
651 {
652         return 0;
653 }
654
655 static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
656 {
657         uint32_t def, data;
658
659         def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
660
661         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
662                 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
663         else
664                 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
665
666         if (def != data)
667                 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
668 }
669
670 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
671 {
672         uint32_t def, data;
673
674         def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
675
676         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
677                 data &= ~(0x01000000 |
678                           0x02000000 |
679                           0x04000000 |
680                           0x08000000 |
681                           0x10000000 |
682                           0x20000000 |
683                           0x40000000 |
684                           0x80000000);
685         else
686                 data |= (0x01000000 |
687                          0x02000000 |
688                          0x04000000 |
689                          0x08000000 |
690                          0x10000000 |
691                          0x20000000 |
692                          0x40000000 |
693                          0x80000000);
694
695         if (def != data)
696                 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
697 }
698
699 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
700 {
701         uint32_t def, data;
702
703         def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
704
705         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
706                 data |= 1;
707         else
708                 data &= ~1;
709
710         if (def != data)
711                 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
712 }
713
714 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
715                                                        bool enable)
716 {
717         uint32_t def, data;
718
719         def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
720
721         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
722                 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
723                         CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
724         else
725                 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
726                         CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
727
728         if (def != data)
729                 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
730 }
731
732 static void soc15_update_df_medium_grain_clock_gating(struct amdgpu_device *adev,
733                                                        bool enable)
734 {
735         uint32_t data;
736
737         /* Put DF on broadcast mode */
738         data = RREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl));
739         data &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
740         WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl), data);
741
742         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
743                 data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
744                 data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
745                 data |= DF_MGCG_ENABLE_15_CYCLE_DELAY;
746                 WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
747         } else {
748                 data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
749                 data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
750                 data |= DF_MGCG_DISABLE;
751                 WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
752         }
753
754         WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl),
755                mmFabricConfigAccessControl_DEFAULT);
756 }
757
758 static int soc15_common_set_clockgating_state(void *handle,
759                                             enum amd_clockgating_state state)
760 {
761         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
762
763         switch (adev->asic_type) {
764         case CHIP_VEGA10:
765                 nbio_v6_1_update_medium_grain_clock_gating(adev,
766                                 state == AMD_CG_STATE_GATE ? true : false);
767                 nbio_v6_1_update_medium_grain_light_sleep(adev,
768                                 state == AMD_CG_STATE_GATE ? true : false);
769                 soc15_update_hdp_light_sleep(adev,
770                                 state == AMD_CG_STATE_GATE ? true : false);
771                 soc15_update_drm_clock_gating(adev,
772                                 state == AMD_CG_STATE_GATE ? true : false);
773                 soc15_update_drm_light_sleep(adev,
774                                 state == AMD_CG_STATE_GATE ? true : false);
775                 soc15_update_rom_medium_grain_clock_gating(adev,
776                                 state == AMD_CG_STATE_GATE ? true : false);
777                 soc15_update_df_medium_grain_clock_gating(adev,
778                                 state == AMD_CG_STATE_GATE ? true : false);
779                 break;
780         default:
781                 break;
782         }
783         return 0;
784 }
785
786 static int soc15_common_set_powergating_state(void *handle,
787                                             enum amd_powergating_state state)
788 {
789         /* todo */
790         return 0;
791 }
792
793 const struct amd_ip_funcs soc15_common_ip_funcs = {
794         .name = "soc15_common",
795         .early_init = soc15_common_early_init,
796         .late_init = NULL,
797         .sw_init = soc15_common_sw_init,
798         .sw_fini = soc15_common_sw_fini,
799         .hw_init = soc15_common_hw_init,
800         .hw_fini = soc15_common_hw_fini,
801         .suspend = soc15_common_suspend,
802         .resume = soc15_common_resume,
803         .is_idle = soc15_common_is_idle,
804         .wait_for_idle = soc15_common_wait_for_idle,
805         .soft_reset = soc15_common_soft_reset,
806         .set_clockgating_state = soc15_common_set_clockgating_state,
807         .set_powergating_state = soc15_common_set_powergating_state,
808 };