Commit | Line | Data |
---|---|---|
df70502e KW |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
47b757fb | 23 | |
df70502e | 24 | #include <linux/firmware.h> |
47b757fb SR |
25 | #include <linux/module.h> |
26 | #include <linux/pci.h> | |
27 | ||
fd5fd480 | 28 | #include <drm/drm_cache.h> |
df70502e KW |
29 | #include "amdgpu.h" |
30 | #include "gmc_v6_0.h" | |
31 | #include "amdgpu_ucode.h" | |
2cddc50e | 32 | #include "amdgpu_gem.h" |
72518269 TSD |
33 | |
34 | #include "bif/bif_3_0_d.h" | |
35 | #include "bif/bif_3_0_sh_mask.h" | |
36 | #include "oss/oss_1_0_d.h" | |
37 | #include "oss/oss_1_0_sh_mask.h" | |
38 | #include "gmc/gmc_6_0_d.h" | |
39 | #include "gmc/gmc_6_0_sh_mask.h" | |
40 | #include "dce/dce_6_0_d.h" | |
41 | #include "dce/dce_6_0_sh_mask.h" | |
42 | #include "si_enums.h" | |
df70502e | 43 | |
132f34e4 | 44 | static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev); |
df70502e KW |
45 | static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev); |
46 | static int gmc_v6_0_wait_for_idle(void *handle); | |
47 | ||
8eaf2b1f AD |
48 | MODULE_FIRMWARE("amdgpu/tahiti_mc.bin"); |
49 | MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin"); | |
50 | MODULE_FIRMWARE("amdgpu/verde_mc.bin"); | |
51 | MODULE_FIRMWARE("amdgpu/oland_mc.bin"); | |
8d4d7c58 | 52 | MODULE_FIRMWARE("amdgpu/hainan_mc.bin"); |
8eaf2b1f | 53 | MODULE_FIRMWARE("amdgpu/si58_mc.bin"); |
df70502e | 54 | |
72518269 TSD |
55 | #define MC_SEQ_MISC0__MT__MASK 0xf0000000 |
56 | #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 | |
57 | #define MC_SEQ_MISC0__MT__DDR2 0x20000000 | |
58 | #define MC_SEQ_MISC0__MT__GDDR3 0x30000000 | |
59 | #define MC_SEQ_MISC0__MT__GDDR4 0x40000000 | |
60 | #define MC_SEQ_MISC0__MT__GDDR5 0x50000000 | |
61 | #define MC_SEQ_MISC0__MT__HBM 0x60000000 | |
62 | #define MC_SEQ_MISC0__MT__DDR3 0xB0000000 | |
63 | ||
e4f6b39e | 64 | static void gmc_v6_0_mc_stop(struct amdgpu_device *adev) |
df70502e KW |
65 | { |
66 | u32 blackout; | |
67 | ||
df70502e KW |
68 | gmc_v6_0_wait_for_idle((void *)adev); |
69 | ||
72518269 TSD |
70 | blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); |
71 | if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { | |
df70502e | 72 | /* Block CPU access */ |
72518269 | 73 | WREG32(mmBIF_FB_EN, 0); |
df70502e KW |
74 | /* blackout the MC */ |
75 | blackout = REG_SET_FIELD(blackout, | |
72518269 TSD |
76 | MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); |
77 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); | |
df70502e KW |
78 | } |
79 | /* wait for the MC to settle */ | |
80 | udelay(100); | |
81 | ||
82 | } | |
83 | ||
e4f6b39e | 84 | static void gmc_v6_0_mc_resume(struct amdgpu_device *adev) |
df70502e KW |
85 | { |
86 | u32 tmp; | |
87 | ||
88 | /* unblackout the MC */ | |
72518269 TSD |
89 | tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL); |
90 | tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); | |
91 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp); | |
df70502e | 92 | /* allow CPU access */ |
72518269 TSD |
93 | tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); |
94 | tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); | |
95 | WREG32(mmBIF_FB_EN, tmp); | |
df70502e KW |
96 | } |
97 | ||
98 | static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) | |
99 | { | |
100 | const char *chip_name; | |
101 | char fw_name[30]; | |
102 | int err; | |
f1d877be | 103 | bool is_58_fw = false; |
df70502e KW |
104 | |
105 | DRM_DEBUG("\n"); | |
106 | ||
107 | switch (adev->asic_type) { | |
108 | case CHIP_TAHITI: | |
109 | chip_name = "tahiti"; | |
110 | break; | |
111 | case CHIP_PITCAIRN: | |
112 | chip_name = "pitcairn"; | |
113 | break; | |
114 | case CHIP_VERDE: | |
115 | chip_name = "verde"; | |
116 | break; | |
117 | case CHIP_OLAND: | |
118 | chip_name = "oland"; | |
119 | break; | |
120 | case CHIP_HAINAN: | |
121 | chip_name = "hainan"; | |
122 | break; | |
0cfc1d68 SS |
123 | default: |
124 | BUG(); | |
df70502e KW |
125 | } |
126 | ||
f1d877be AD |
127 | /* this memory configuration requires special firmware */ |
128 | if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) | |
129 | is_58_fw = true; | |
130 | ||
131 | if (is_58_fw) | |
8eaf2b1f | 132 | snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin"); |
f1d877be | 133 | else |
8eaf2b1f | 134 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); |
2d70575b | 135 | err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name); |
df70502e | 136 | if (err) { |
075719c3 | 137 | dev_err(adev->dev, |
df70502e KW |
138 | "si_mc: Failed to load firmware \"%s\"\n", |
139 | fw_name); | |
2d70575b | 140 | amdgpu_ucode_release(&adev->gmc.fw); |
df70502e KW |
141 | } |
142 | return err; | |
143 | } | |
144 | ||
145 | static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev) | |
146 | { | |
147 | const __le32 *new_fw_data = NULL; | |
148 | u32 running; | |
149 | const __le32 *new_io_mc_regs = NULL; | |
150 | int i, regs_size, ucode_size; | |
151 | const struct mc_firmware_header_v1_0 *hdr; | |
152 | ||
770d13b1 | 153 | if (!adev->gmc.fw) |
df70502e KW |
154 | return -EINVAL; |
155 | ||
770d13b1 | 156 | hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; |
df70502e KW |
157 | |
158 | amdgpu_ucode_print_mc_hdr(&hdr->header); | |
159 | ||
770d13b1 | 160 | adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); |
df70502e KW |
161 | regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); |
162 | new_io_mc_regs = (const __le32 *) | |
770d13b1 | 163 | (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); |
df70502e KW |
164 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; |
165 | new_fw_data = (const __le32 *) | |
770d13b1 | 166 | (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
df70502e | 167 | |
72518269 | 168 | running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK; |
df70502e KW |
169 | |
170 | if (running == 0) { | |
171 | ||
172 | /* reset the engine and set to writable */ | |
72518269 TSD |
173 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); |
174 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); | |
df70502e KW |
175 | |
176 | /* load mc io regs */ | |
177 | for (i = 0; i < regs_size; i++) { | |
72518269 TSD |
178 | WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++)); |
179 | WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++)); | |
df70502e KW |
180 | } |
181 | /* load the MC ucode */ | |
0cfc1d68 | 182 | for (i = 0; i < ucode_size; i++) |
72518269 | 183 | WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++)); |
df70502e KW |
184 | |
185 | /* put the engine back into the active state */ | |
72518269 TSD |
186 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); |
187 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); | |
188 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); | |
df70502e KW |
189 | |
190 | /* wait for training to complete */ | |
191 | for (i = 0; i < adev->usec_timeout; i++) { | |
72518269 | 192 | if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK) |
df70502e KW |
193 | break; |
194 | udelay(1); | |
195 | } | |
196 | for (i = 0; i < adev->usec_timeout; i++) { | |
72518269 | 197 | if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK) |
df70502e KW |
198 | break; |
199 | udelay(1); | |
200 | } | |
201 | ||
202 | } | |
203 | ||
204 | return 0; | |
205 | } | |
206 | ||
207 | static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, | |
770d13b1 | 208 | struct amdgpu_gmc *mc) |
df70502e | 209 | { |
ba3a5b83 | 210 | u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; |
0cfc1d68 | 211 | |
ba3a5b83 AD |
212 | base <<= 24; |
213 | ||
bff3315b | 214 | amdgpu_gmc_set_agp_default(adev, mc); |
83afe835 | 215 | amdgpu_gmc_vram_location(adev, mc, base); |
917f91d8 | 216 | amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); |
df70502e KW |
217 | } |
218 | ||
219 | static void gmc_v6_0_mc_program(struct amdgpu_device *adev) | |
220 | { | |
df70502e KW |
221 | int i, j; |
222 | ||
223 | /* Initialize HDP */ | |
224 | for (i = 0, j = 0; i < 32; i++, j += 0x6) { | |
225 | WREG32((0xb05 + j), 0x00000000); | |
226 | WREG32((0xb06 + j), 0x00000000); | |
227 | WREG32((0xb07 + j), 0x00000000); | |
228 | WREG32((0xb08 + j), 0x00000000); | |
229 | WREG32((0xb09 + j), 0x00000000); | |
230 | } | |
72518269 | 231 | WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); |
df70502e | 232 | |
0cfc1d68 | 233 | if (gmc_v6_0_wait_for_idle((void *)adev)) |
df70502e | 234 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); |
df70502e | 235 | |
03ba88cf AD |
236 | if (adev->mode_info.num_crtc) { |
237 | u32 tmp; | |
238 | ||
239 | /* Lockout access through VGA aperture*/ | |
240 | tmp = RREG32(mmVGA_HDP_CONTROL); | |
241 | tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK; | |
242 | WREG32(mmVGA_HDP_CONTROL, tmp); | |
243 | ||
244 | /* disable VGA render */ | |
245 | tmp = RREG32(mmVGA_RENDER_CONTROL); | |
246 | tmp &= ~VGA_VSTATUS_CNTL; | |
247 | WREG32(mmVGA_RENDER_CONTROL, tmp); | |
248 | } | |
df70502e | 249 | /* Update configuration */ |
72518269 | 250 | WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, |
770d13b1 | 251 | adev->gmc.vram_start >> 12); |
72518269 | 252 | WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
770d13b1 | 253 | adev->gmc.vram_end >> 12); |
72518269 | 254 | WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, |
7ccfd79f | 255 | adev->mem_scratch.gpu_addr >> 12); |
72518269 | 256 | WREG32(mmMC_VM_AGP_BASE, 0); |
de59b699 AD |
257 | WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22); |
258 | WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22); | |
df70502e | 259 | |
0cfc1d68 | 260 | if (gmc_v6_0_wait_for_idle((void *)adev)) |
df70502e | 261 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); |
df70502e KW |
262 | } |
263 | ||
264 | static int gmc_v6_0_mc_init(struct amdgpu_device *adev) | |
265 | { | |
266 | ||
267 | u32 tmp; | |
268 | int chansize, numchan; | |
d6895ad3 | 269 | int r; |
df70502e | 270 | |
72518269 | 271 | tmp = RREG32(mmMC_ARB_RAMCFG); |
0cfc1d68 | 272 | if (tmp & (1 << 11)) |
df70502e | 273 | chansize = 16; |
0cfc1d68 | 274 | else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) |
df70502e | 275 | chansize = 64; |
0cfc1d68 | 276 | else |
df70502e | 277 | chansize = 32; |
0cfc1d68 | 278 | |
72518269 TSD |
279 | tmp = RREG32(mmMC_SHARED_CHMAP); |
280 | switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) { | |
df70502e KW |
281 | case 0: |
282 | default: | |
283 | numchan = 1; | |
284 | break; | |
285 | case 1: | |
286 | numchan = 2; | |
287 | break; | |
288 | case 2: | |
289 | numchan = 4; | |
290 | break; | |
291 | case 3: | |
292 | numchan = 8; | |
293 | break; | |
294 | case 4: | |
295 | numchan = 3; | |
296 | break; | |
297 | case 5: | |
298 | numchan = 6; | |
299 | break; | |
300 | case 6: | |
301 | numchan = 10; | |
302 | break; | |
303 | case 7: | |
304 | numchan = 12; | |
305 | break; | |
306 | case 8: | |
307 | numchan = 16; | |
308 | break; | |
309 | } | |
770d13b1 | 310 | adev->gmc.vram_width = numchan * chansize; |
df70502e | 311 | /* size in MB on si */ |
770d13b1 CK |
312 | adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; |
313 | adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; | |
d6895ad3 CK |
314 | |
315 | if (!(adev->flags & AMD_IS_APU)) { | |
316 | r = amdgpu_device_resize_fb_bar(adev); | |
317 | if (r) | |
318 | return r; | |
319 | } | |
770d13b1 CK |
320 | adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); |
321 | adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); | |
322 | adev->gmc.visible_vram_size = adev->gmc.aper_size; | |
df70502e | 323 | |
c3db7b5a AD |
324 | /* set the gart size */ |
325 | if (amdgpu_gart_size == -1) { | |
326 | switch (adev->asic_type) { | |
327 | case CHIP_HAINAN: /* no MM engines */ | |
328 | default: | |
770d13b1 | 329 | adev->gmc.gart_size = 256ULL << 20; |
c3db7b5a AD |
330 | break; |
331 | case CHIP_VERDE: /* UVD, VCE do not support GPUVM */ | |
332 | case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */ | |
333 | case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */ | |
334 | case CHIP_OLAND: /* UVD, VCE do not support GPUVM */ | |
770d13b1 | 335 | adev->gmc.gart_size = 1024ULL << 20; |
c3db7b5a AD |
336 | break; |
337 | } | |
338 | } else { | |
770d13b1 | 339 | adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; |
c3db7b5a AD |
340 | } |
341 | ||
f1dc12ca | 342 | adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; |
770d13b1 | 343 | gmc_v6_0_vram_gtt_location(adev, &adev->gmc); |
df70502e KW |
344 | |
345 | return 0; | |
346 | } | |
347 | ||
3ff98548 OZ |
348 | static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, |
349 | uint32_t vmhub, uint32_t flush_type) | |
df70502e | 350 | { |
72518269 | 351 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); |
df70502e KW |
352 | } |
353 | ||
4fef88bd | 354 | static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, |
0cfc1d68 | 355 | unsigned int vmid, uint64_t pd_addr) |
4fef88bd CK |
356 | { |
357 | uint32_t reg; | |
358 | ||
359 | /* write new base address */ | |
360 | if (vmid < 8) | |
361 | reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid; | |
362 | else | |
363 | reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8); | |
364 | amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12); | |
365 | ||
366 | /* bits 0-15 are the VM contexts0-15 */ | |
367 | amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid); | |
368 | ||
369 | return pd_addr; | |
370 | } | |
371 | ||
3de676d8 CK |
372 | static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level, |
373 | uint64_t *addr, uint64_t *flags) | |
b1166325 | 374 | { |
3de676d8 | 375 | BUG_ON(*addr & 0xFFFFFF0000000FFFULL); |
b1166325 CK |
376 | } |
377 | ||
cbfae36c CK |
378 | static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev, |
379 | struct amdgpu_bo_va_mapping *mapping, | |
380 | uint64_t *flags) | |
381 | { | |
382 | *flags &= ~AMDGPU_PTE_EXECUTABLE; | |
383 | *flags &= ~AMDGPU_PTE_PRT; | |
384 | } | |
385 | ||
df70502e KW |
386 | static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev, |
387 | bool value) | |
388 | { | |
389 | u32 tmp; | |
390 | ||
72518269 TSD |
391 | tmp = RREG32(mmVM_CONTEXT1_CNTL); |
392 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
393 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
394 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
395 | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
396 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
397 | PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
398 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
399 | VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
400 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
401 | READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
402 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
403 | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
404 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | |
df70502e KW |
405 | } |
406 | ||
f7c35abe | 407 | /** |
0cfc1d68 SS |
408 | * gmc_v8_0_set_prt() - set PRT VM fault |
409 | * | |
410 | * @adev: amdgpu_device pointer | |
411 | * @enable: enable/disable VM fault handling for PRT | |
412 | */ | |
f7c35abe CK |
413 | static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable) |
414 | { | |
415 | u32 tmp; | |
416 | ||
770d13b1 | 417 | if (enable && !adev->gmc.prt_warning) { |
f7c35abe | 418 | dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); |
770d13b1 | 419 | adev->gmc.prt_warning = true; |
f7c35abe CK |
420 | } |
421 | ||
422 | tmp = RREG32(mmVM_PRT_CNTL); | |
423 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
424 | CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS, | |
425 | enable); | |
426 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
427 | TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS, | |
428 | enable); | |
429 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
430 | L2_CACHE_STORE_INVALID_ENTRIES, | |
431 | enable); | |
432 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
433 | L1_TLB_STORE_INVALID_ENTRIES, | |
434 | enable); | |
435 | WREG32(mmVM_PRT_CNTL, tmp); | |
436 | ||
437 | if (enable) { | |
438 | uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; | |
a3e9a15a CK |
439 | uint32_t high = adev->vm_manager.max_pfn - |
440 | (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT); | |
f7c35abe CK |
441 | |
442 | WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); | |
443 | WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); | |
444 | WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low); | |
445 | WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low); | |
446 | WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high); | |
447 | WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high); | |
448 | WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high); | |
449 | WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high); | |
450 | } else { | |
451 | WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff); | |
452 | WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff); | |
453 | WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff); | |
454 | WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff); | |
455 | WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0); | |
456 | WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0); | |
457 | WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0); | |
458 | WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0); | |
459 | } | |
460 | } | |
461 | ||
df70502e KW |
462 | static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) |
463 | { | |
bdb1922a | 464 | uint64_t table_addr; |
e618d306 | 465 | u32 field; |
1b08dfb8 | 466 | int i; |
df70502e | 467 | |
1123b989 | 468 | if (adev->gart.bo == NULL) { |
df70502e KW |
469 | dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); |
470 | return -EINVAL; | |
471 | } | |
1b08dfb8 | 472 | amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); |
bdb1922a MD |
473 | |
474 | table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); | |
475 | ||
df70502e | 476 | /* Setup TLB control */ |
72518269 | 477 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, |
df70502e | 478 | (0xA << 7) | |
72518269 TSD |
479 | MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK | |
480 | MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK | | |
481 | MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK | | |
482 | MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK | | |
483 | (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT)); | |
df70502e | 484 | /* Setup L2 cache */ |
72518269 TSD |
485 | WREG32(mmVM_L2_CNTL, |
486 | VM_L2_CNTL__ENABLE_L2_CACHE_MASK | | |
487 | VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK | | |
488 | VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK | | |
489 | VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK | | |
490 | (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) | | |
491 | (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT)); | |
492 | WREG32(mmVM_L2_CNTL2, | |
493 | VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK | | |
494 | VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK); | |
e618d306 RH |
495 | |
496 | field = adev->vm_manager.fragment_size; | |
72518269 TSD |
497 | WREG32(mmVM_L2_CNTL3, |
498 | VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK | | |
e618d306 RH |
499 | (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) | |
500 | (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); | |
df70502e | 501 | /* setup context0 */ |
770d13b1 CK |
502 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12); |
503 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12); | |
4e830fb1 | 504 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12); |
72518269 | 505 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, |
92e71b06 | 506 | (u32)(adev->dummy_page_addr >> 12)); |
72518269 TSD |
507 | WREG32(mmVM_CONTEXT0_CNTL2, 0); |
508 | WREG32(mmVM_CONTEXT0_CNTL, | |
509 | VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK | | |
510 | (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) | | |
511 | VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK); | |
df70502e KW |
512 | |
513 | WREG32(0x575, 0); | |
514 | WREG32(0x576, 0); | |
515 | WREG32(0x577, 0); | |
516 | ||
517 | /* empty context1-15 */ | |
518 | /* set vm size, must be a multiple of 4 */ | |
72518269 TSD |
519 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); |
520 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); | |
df70502e KW |
521 | /* Assign the pt base to something valid for now; the pts used for |
522 | * the VMs are determined by the application and setup and assigned | |
523 | * on the fly in the vm part of radeon_gart.c | |
524 | */ | |
68fce5f0 | 525 | for (i = 1; i < AMDGPU_NUM_VMID; i++) { |
df70502e | 526 | if (i < 8) |
72518269 | 527 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, |
4e830fb1 | 528 | table_addr >> 12); |
df70502e | 529 | else |
72518269 | 530 | WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, |
4e830fb1 | 531 | table_addr >> 12); |
df70502e KW |
532 | } |
533 | ||
534 | /* enable context1-15 */ | |
72518269 | 535 | WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, |
92e71b06 | 536 | (u32)(adev->dummy_page_addr >> 12)); |
72518269 TSD |
537 | WREG32(mmVM_CONTEXT1_CNTL2, 4); |
538 | WREG32(mmVM_CONTEXT1_CNTL, | |
539 | VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | | |
540 | (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | | |
36b32a68 ZJ |
541 | ((adev->vm_manager.block_size - 9) |
542 | << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT)); | |
a8447647 FC |
543 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) |
544 | gmc_v6_0_set_fault_enable_default(adev, false); | |
545 | else | |
546 | gmc_v6_0_set_fault_enable_default(adev, true); | |
df70502e | 547 | |
3ff98548 | 548 | gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0); |
075719c3 | 549 | dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", |
0cfc1d68 | 550 | (unsigned int)(adev->gmc.gart_size >> 20), |
4e830fb1 | 551 | (unsigned long long)table_addr); |
df70502e KW |
552 | return 0; |
553 | } | |
554 | ||
555 | static int gmc_v6_0_gart_init(struct amdgpu_device *adev) | |
556 | { | |
557 | int r; | |
558 | ||
1123b989 | 559 | if (adev->gart.bo) { |
075719c3 | 560 | dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n"); |
df70502e KW |
561 | return 0; |
562 | } | |
563 | r = amdgpu_gart_init(adev); | |
564 | if (r) | |
565 | return r; | |
566 | adev->gart.table_size = adev->gart.num_gpu_pages * 8; | |
4b98e0c4 | 567 | adev->gart.gart_pte_flags = 0; |
df70502e KW |
568 | return amdgpu_gart_table_vram_alloc(adev); |
569 | } | |
570 | ||
571 | static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) | |
572 | { | |
573 | /*unsigned i; | |
574 | ||
575 | for (i = 1; i < 16; ++i) { | |
576 | uint32_t reg; | |
577 | if (i < 8) | |
578 | reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ; | |
579 | else | |
580 | reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8); | |
581 | adev->vm_manager.saved_table_addr[i] = RREG32(reg); | |
582 | }*/ | |
583 | ||
584 | /* Disable all tables */ | |
72518269 TSD |
585 | WREG32(mmVM_CONTEXT0_CNTL, 0); |
586 | WREG32(mmVM_CONTEXT1_CNTL, 0); | |
df70502e | 587 | /* Setup TLB control */ |
72518269 TSD |
588 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, |
589 | MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK | | |
590 | (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT)); | |
df70502e | 591 | /* Setup L2 cache */ |
72518269 TSD |
592 | WREG32(mmVM_L2_CNTL, |
593 | VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK | | |
594 | VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK | | |
595 | (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) | | |
596 | (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT)); | |
597 | WREG32(mmVM_L2_CNTL2, 0); | |
598 | WREG32(mmVM_L2_CNTL3, | |
599 | VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK | | |
600 | (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); | |
df70502e KW |
601 | } |
602 | ||
df70502e KW |
603 | static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, |
604 | u32 status, u32 addr, u32 mc_client) | |
605 | { | |
606 | u32 mc_id; | |
72518269 TSD |
607 | u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); |
608 | u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, | |
609 | PROTECTIONS); | |
df70502e KW |
610 | char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, |
611 | (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; | |
612 | ||
72518269 TSD |
613 | mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, |
614 | MEMORY_CLIENT_ID); | |
df70502e | 615 | |
075719c3 | 616 | dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", |
df70502e | 617 | protections, vmid, addr, |
72518269 TSD |
618 | REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, |
619 | MEMORY_CLIENT_RW) ? | |
df70502e KW |
620 | "write" : "read", block, mc_client, mc_id); |
621 | } | |
622 | ||
623 | /* | |
624 | static const u32 mc_cg_registers[] = { | |
625 | MC_HUB_MISC_HUB_CG, | |
626 | MC_HUB_MISC_SIP_CG, | |
627 | MC_HUB_MISC_VM_CG, | |
628 | MC_XPB_CLK_GAT, | |
629 | ATC_MISC_CG, | |
630 | MC_CITF_MISC_WR_CG, | |
631 | MC_CITF_MISC_RD_CG, | |
632 | MC_CITF_MISC_VM_CG, | |
633 | VM_L2_CG, | |
634 | }; | |
635 | ||
636 | static const u32 mc_cg_ls_en[] = { | |
637 | MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK, | |
638 | MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK, | |
639 | MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK, | |
640 | MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK, | |
641 | ATC_MISC_CG__MEM_LS_ENABLE_MASK, | |
642 | MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK, | |
643 | MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK, | |
644 | MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK, | |
645 | VM_L2_CG__MEM_LS_ENABLE_MASK, | |
646 | }; | |
647 | ||
648 | static const u32 mc_cg_en[] = { | |
649 | MC_HUB_MISC_HUB_CG__ENABLE_MASK, | |
650 | MC_HUB_MISC_SIP_CG__ENABLE_MASK, | |
651 | MC_HUB_MISC_VM_CG__ENABLE_MASK, | |
652 | MC_XPB_CLK_GAT__ENABLE_MASK, | |
653 | ATC_MISC_CG__ENABLE_MASK, | |
654 | MC_CITF_MISC_WR_CG__ENABLE_MASK, | |
655 | MC_CITF_MISC_RD_CG__ENABLE_MASK, | |
656 | MC_CITF_MISC_VM_CG__ENABLE_MASK, | |
657 | VM_L2_CG__ENABLE_MASK, | |
658 | }; | |
659 | ||
660 | static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev, | |
661 | bool enable) | |
662 | { | |
663 | int i; | |
664 | u32 orig, data; | |
665 | ||
666 | for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { | |
667 | orig = data = RREG32(mc_cg_registers[i]); | |
668 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS)) | |
669 | data |= mc_cg_ls_en[i]; | |
670 | else | |
671 | data &= ~mc_cg_ls_en[i]; | |
672 | if (data != orig) | |
673 | WREG32(mc_cg_registers[i], data); | |
674 | } | |
675 | } | |
676 | ||
677 | static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev, | |
678 | bool enable) | |
679 | { | |
680 | int i; | |
681 | u32 orig, data; | |
682 | ||
683 | for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { | |
684 | orig = data = RREG32(mc_cg_registers[i]); | |
685 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG)) | |
686 | data |= mc_cg_en[i]; | |
687 | else | |
688 | data &= ~mc_cg_en[i]; | |
689 | if (data != orig) | |
690 | WREG32(mc_cg_registers[i], data); | |
691 | } | |
692 | } | |
693 | ||
694 | static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev, | |
695 | bool enable) | |
696 | { | |
697 | u32 orig, data; | |
698 | ||
699 | orig = data = RREG32_PCIE(ixPCIE_CNTL2); | |
700 | ||
701 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) { | |
702 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); | |
703 | data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); | |
704 | data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); | |
705 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1); | |
706 | } else { | |
707 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0); | |
708 | data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0); | |
709 | data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0); | |
710 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0); | |
711 | } | |
712 | ||
713 | if (orig != data) | |
714 | WREG32_PCIE(ixPCIE_CNTL2, data); | |
715 | } | |
716 | ||
717 | static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev, | |
718 | bool enable) | |
719 | { | |
720 | u32 orig, data; | |
721 | ||
72518269 | 722 | orig = data = RREG32(mmHDP_HOST_PATH_CNTL); |
df70502e KW |
723 | |
724 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) | |
725 | data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); | |
726 | else | |
727 | data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); | |
728 | ||
729 | if (orig != data) | |
72518269 | 730 | WREG32(mmHDP_HOST_PATH_CNTL, data); |
df70502e KW |
731 | } |
732 | ||
733 | static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev, | |
734 | bool enable) | |
735 | { | |
736 | u32 orig, data; | |
737 | ||
72518269 | 738 | orig = data = RREG32(mmHDP_MEM_POWER_LS); |
df70502e KW |
739 | |
740 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) | |
741 | data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); | |
742 | else | |
743 | data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); | |
744 | ||
745 | if (orig != data) | |
72518269 | 746 | WREG32(mmHDP_MEM_POWER_LS, data); |
df70502e KW |
747 | } |
748 | */ | |
749 | ||
750 | static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type) | |
751 | { | |
752 | switch (mc_seq_vram_type) { | |
753 | case MC_SEQ_MISC0__MT__GDDR1: | |
754 | return AMDGPU_VRAM_TYPE_GDDR1; | |
755 | case MC_SEQ_MISC0__MT__DDR2: | |
756 | return AMDGPU_VRAM_TYPE_DDR2; | |
757 | case MC_SEQ_MISC0__MT__GDDR3: | |
758 | return AMDGPU_VRAM_TYPE_GDDR3; | |
759 | case MC_SEQ_MISC0__MT__GDDR4: | |
760 | return AMDGPU_VRAM_TYPE_GDDR4; | |
761 | case MC_SEQ_MISC0__MT__GDDR5: | |
762 | return AMDGPU_VRAM_TYPE_GDDR5; | |
763 | case MC_SEQ_MISC0__MT__DDR3: | |
764 | return AMDGPU_VRAM_TYPE_DDR3; | |
765 | default: | |
766 | return AMDGPU_VRAM_TYPE_UNKNOWN; | |
767 | } | |
768 | } | |
769 | ||
770 | static int gmc_v6_0_early_init(void *handle) | |
771 | { | |
772 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
773 | ||
132f34e4 | 774 | gmc_v6_0_set_gmc_funcs(adev); |
df70502e KW |
775 | gmc_v6_0_set_irq_funcs(adev); |
776 | ||
df70502e KW |
777 | return 0; |
778 | } | |
779 | ||
780 | static int gmc_v6_0_late_init(void *handle) | |
781 | { | |
782 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
783 | ||
a8447647 | 784 | if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) |
770d13b1 | 785 | return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); |
a8447647 FC |
786 | else |
787 | return 0; | |
df70502e KW |
788 | } |
789 | ||
0cfc1d68 | 790 | static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) |
ebdef28e AD |
791 | { |
792 | u32 d1vga_control = RREG32(mmD1VGA_CONTROL); | |
0cfc1d68 | 793 | unsigned int size; |
ebdef28e AD |
794 | |
795 | if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { | |
81b54fb7 | 796 | size = AMDGPU_VBIOS_VGA_ALLOCATION; |
ebdef28e AD |
797 | } else { |
798 | u32 viewport = RREG32(mmVIEWPORT_SIZE); | |
0cfc1d68 | 799 | |
ebdef28e AD |
800 | size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * |
801 | REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * | |
802 | 4); | |
803 | } | |
ebdef28e AD |
804 | return size; |
805 | } | |
806 | ||
df70502e KW |
807 | static int gmc_v6_0_sw_init(void *handle) |
808 | { | |
809 | int r; | |
df70502e KW |
810 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
811 | ||
d9426c3d | 812 | set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); |
bad4c3e6 | 813 | |
b8691c76 | 814 | if (adev->flags & AMD_IS_APU) { |
770d13b1 | 815 | adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; |
b8691c76 JQ |
816 | } else { |
817 | u32 tmp = RREG32(mmMC_SEQ_MISC0); | |
0cfc1d68 | 818 | |
b8691c76 | 819 | tmp &= MC_SEQ_MISC0__MT__MASK; |
770d13b1 | 820 | adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp); |
b8691c76 JQ |
821 | } |
822 | ||
1ffdeca6 | 823 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault); |
df70502e KW |
824 | if (r) |
825 | return r; | |
826 | ||
1ffdeca6 | 827 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault); |
df70502e KW |
828 | if (r) |
829 | return r; | |
830 | ||
f3368128 | 831 | amdgpu_vm_adjust_size(adev, 64, 9, 1, 40); |
36b32a68 | 832 | |
770d13b1 | 833 | adev->gmc.mc_mask = 0xffffffffffULL; |
df70502e | 834 | |
403475be | 835 | r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40)); |
df70502e | 836 | if (r) { |
dd4fa6c1 | 837 | dev_warn(adev->dev, "No suitable DMA available.\n"); |
244511f3 | 838 | return r; |
df70502e | 839 | } |
403475be | 840 | adev->need_swiotlb = drm_need_swiotlb(40); |
df70502e KW |
841 | |
842 | r = gmc_v6_0_init_microcode(adev); | |
843 | if (r) { | |
075719c3 | 844 | dev_err(adev->dev, "Failed to load mc firmware!\n"); |
df70502e KW |
845 | return r; |
846 | } | |
847 | ||
df70502e KW |
848 | r = gmc_v6_0_mc_init(adev); |
849 | if (r) | |
850 | return r; | |
851 | ||
422fe8d2 | 852 | amdgpu_gmc_get_vbios_allocations(adev); |
ebdef28e | 853 | |
df70502e KW |
854 | r = amdgpu_bo_init(adev); |
855 | if (r) | |
856 | return r; | |
857 | ||
858 | r = gmc_v6_0_gart_init(adev); | |
859 | if (r) | |
860 | return r; | |
861 | ||
05ec3eda CK |
862 | /* |
863 | * number of VMs | |
864 | * VMID 0 is reserved for System | |
865 | * amdgpu graphics/compute will use VMIDs 1-7 | |
866 | * amdkfd will use VMIDs 8-15 | |
867 | */ | |
40111ec2 | 868 | adev->vm_manager.first_kfd_vmid = 8; |
05ec3eda CK |
869 | amdgpu_vm_manager_init(adev); |
870 | ||
871 | /* base offset of vram pages */ | |
872 | if (adev->flags & AMD_IS_APU) { | |
873 | u64 tmp = RREG32(mmMC_VM_FB_OFFSET); | |
874 | ||
875 | tmp <<= 22; | |
876 | adev->vm_manager.vram_base_offset = tmp; | |
877 | } else { | |
878 | adev->vm_manager.vram_base_offset = 0; | |
df70502e KW |
879 | } |
880 | ||
05ec3eda | 881 | return 0; |
df70502e KW |
882 | } |
883 | ||
884 | static int gmc_v6_0_sw_fini(void *handle) | |
885 | { | |
886 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
887 | ||
f59548c8 | 888 | amdgpu_gem_force_release(adev); |
05ec3eda | 889 | amdgpu_vm_manager_fini(adev); |
a3d9103e | 890 | amdgpu_gart_table_vram_free(adev); |
df70502e | 891 | amdgpu_bo_fini(adev); |
2d70575b | 892 | amdgpu_ucode_release(&adev->gmc.fw); |
df70502e KW |
893 | |
894 | return 0; | |
895 | } | |
896 | ||
897 | static int gmc_v6_0_hw_init(void *handle) | |
898 | { | |
899 | int r; | |
900 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
901 | ||
902 | gmc_v6_0_mc_program(adev); | |
903 | ||
904 | if (!(adev->flags & AMD_IS_APU)) { | |
905 | r = gmc_v6_0_mc_load_microcode(adev); | |
906 | if (r) { | |
075719c3 | 907 | dev_err(adev->dev, "Failed to load MC firmware!\n"); |
df70502e KW |
908 | return r; |
909 | } | |
910 | } | |
911 | ||
912 | r = gmc_v6_0_gart_enable(adev); | |
913 | if (r) | |
914 | return r; | |
915 | ||
479e3b02 XD |
916 | if (amdgpu_emu_mode == 1) |
917 | return amdgpu_gmc_vram_checking(adev); | |
16da3990 SS |
918 | |
919 | return 0; | |
df70502e KW |
920 | } |
921 | ||
922 | static int gmc_v6_0_hw_fini(void *handle) | |
923 | { | |
924 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
925 | ||
770d13b1 | 926 | amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); |
df70502e KW |
927 | gmc_v6_0_gart_disable(adev); |
928 | ||
929 | return 0; | |
930 | } | |
931 | ||
932 | static int gmc_v6_0_suspend(void *handle) | |
933 | { | |
934 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
935 | ||
df70502e KW |
936 | gmc_v6_0_hw_fini(adev); |
937 | ||
938 | return 0; | |
939 | } | |
940 | ||
941 | static int gmc_v6_0_resume(void *handle) | |
942 | { | |
943 | int r; | |
944 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
945 | ||
946 | r = gmc_v6_0_hw_init(adev); | |
947 | if (r) | |
948 | return r; | |
949 | ||
620f774f | 950 | amdgpu_vmid_reset_all(adev); |
df70502e | 951 | |
b3c85a0f | 952 | return 0; |
df70502e KW |
953 | } |
954 | ||
955 | static bool gmc_v6_0_is_idle(void *handle) | |
956 | { | |
957 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
72518269 | 958 | u32 tmp = RREG32(mmSRBM_STATUS); |
df70502e KW |
959 | |
960 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | |
961 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK)) | |
962 | return false; | |
963 | ||
964 | return true; | |
965 | } | |
966 | ||
967 | static int gmc_v6_0_wait_for_idle(void *handle) | |
968 | { | |
0cfc1d68 | 969 | unsigned int i; |
df70502e KW |
970 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
971 | ||
972 | for (i = 0; i < adev->usec_timeout; i++) { | |
9c2e1ae3 | 973 | if (gmc_v6_0_is_idle(handle)) |
df70502e KW |
974 | return 0; |
975 | udelay(1); | |
976 | } | |
977 | return -ETIMEDOUT; | |
978 | ||
979 | } | |
980 | ||
981 | static int gmc_v6_0_soft_reset(void *handle) | |
982 | { | |
983 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
df70502e | 984 | u32 srbm_soft_reset = 0; |
72518269 | 985 | u32 tmp = RREG32(mmSRBM_STATUS); |
df70502e KW |
986 | |
987 | if (tmp & SRBM_STATUS__VMC_BUSY_MASK) | |
988 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, | |
72518269 | 989 | SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); |
df70502e KW |
990 | |
991 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | |
992 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { | |
993 | if (!(adev->flags & AMD_IS_APU)) | |
994 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, | |
72518269 | 995 | SRBM_SOFT_RESET, SOFT_RESET_MC, 1); |
df70502e KW |
996 | } |
997 | ||
998 | if (srbm_soft_reset) { | |
e4f6b39e | 999 | gmc_v6_0_mc_stop(adev); |
0cfc1d68 | 1000 | if (gmc_v6_0_wait_for_idle(adev)) |
df70502e | 1001 | dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); |
df70502e | 1002 | |
72518269 | 1003 | tmp = RREG32(mmSRBM_SOFT_RESET); |
df70502e KW |
1004 | tmp |= srbm_soft_reset; |
1005 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | |
72518269 TSD |
1006 | WREG32(mmSRBM_SOFT_RESET, tmp); |
1007 | tmp = RREG32(mmSRBM_SOFT_RESET); | |
df70502e KW |
1008 | |
1009 | udelay(50); | |
1010 | ||
1011 | tmp &= ~srbm_soft_reset; | |
72518269 TSD |
1012 | WREG32(mmSRBM_SOFT_RESET, tmp); |
1013 | tmp = RREG32(mmSRBM_SOFT_RESET); | |
df70502e KW |
1014 | |
1015 | udelay(50); | |
1016 | ||
e4f6b39e | 1017 | gmc_v6_0_mc_resume(adev); |
df70502e KW |
1018 | udelay(50); |
1019 | } | |
1020 | ||
1021 | return 0; | |
1022 | } | |
1023 | ||
1024 | static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev, | |
1025 | struct amdgpu_irq_src *src, | |
0cfc1d68 | 1026 | unsigned int type, |
df70502e KW |
1027 | enum amdgpu_interrupt_state state) |
1028 | { | |
1029 | u32 tmp; | |
1030 | u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1031 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1032 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1033 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1034 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1035 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); | |
1036 | ||
1037 | switch (state) { | |
1038 | case AMDGPU_IRQ_STATE_DISABLE: | |
72518269 | 1039 | tmp = RREG32(mmVM_CONTEXT0_CNTL); |
df70502e | 1040 | tmp &= ~bits; |
72518269 TSD |
1041 | WREG32(mmVM_CONTEXT0_CNTL, tmp); |
1042 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | |
df70502e | 1043 | tmp &= ~bits; |
72518269 | 1044 | WREG32(mmVM_CONTEXT1_CNTL, tmp); |
df70502e KW |
1045 | break; |
1046 | case AMDGPU_IRQ_STATE_ENABLE: | |
72518269 | 1047 | tmp = RREG32(mmVM_CONTEXT0_CNTL); |
df70502e | 1048 | tmp |= bits; |
72518269 TSD |
1049 | WREG32(mmVM_CONTEXT0_CNTL, tmp); |
1050 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | |
df70502e | 1051 | tmp |= bits; |
72518269 | 1052 | WREG32(mmVM_CONTEXT1_CNTL, tmp); |
df70502e KW |
1053 | break; |
1054 | default: | |
1055 | break; | |
1056 | } | |
1057 | ||
1058 | return 0; | |
1059 | } | |
1060 | ||
1061 | static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev, | |
1062 | struct amdgpu_irq_src *source, | |
1063 | struct amdgpu_iv_entry *entry) | |
1064 | { | |
1065 | u32 addr, status; | |
1066 | ||
72518269 TSD |
1067 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); |
1068 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); | |
1069 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); | |
df70502e KW |
1070 | |
1071 | if (!addr && !status) | |
1072 | return 0; | |
1073 | ||
1074 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST) | |
1075 | gmc_v6_0_set_fault_enable_default(adev, false); | |
1076 | ||
01615881 EC |
1077 | if (printk_ratelimit()) { |
1078 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", | |
7ccf5aa8 | 1079 | entry->src_id, entry->src_data[0]); |
01615881 EC |
1080 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
1081 | addr); | |
1082 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | |
1083 | status); | |
1084 | gmc_v6_0_vm_decode_fault(adev, status, addr, 0); | |
1085 | } | |
df70502e KW |
1086 | |
1087 | return 0; | |
1088 | } | |
1089 | ||
1090 | static int gmc_v6_0_set_clockgating_state(void *handle, | |
1091 | enum amd_clockgating_state state) | |
1092 | { | |
1093 | return 0; | |
1094 | } | |
1095 | ||
1096 | static int gmc_v6_0_set_powergating_state(void *handle, | |
1097 | enum amd_powergating_state state) | |
1098 | { | |
1099 | return 0; | |
1100 | } | |
1101 | ||
a1255107 | 1102 | static const struct amd_ip_funcs gmc_v6_0_ip_funcs = { |
df70502e KW |
1103 | .name = "gmc_v6_0", |
1104 | .early_init = gmc_v6_0_early_init, | |
1105 | .late_init = gmc_v6_0_late_init, | |
1106 | .sw_init = gmc_v6_0_sw_init, | |
1107 | .sw_fini = gmc_v6_0_sw_fini, | |
1108 | .hw_init = gmc_v6_0_hw_init, | |
1109 | .hw_fini = gmc_v6_0_hw_fini, | |
1110 | .suspend = gmc_v6_0_suspend, | |
1111 | .resume = gmc_v6_0_resume, | |
1112 | .is_idle = gmc_v6_0_is_idle, | |
1113 | .wait_for_idle = gmc_v6_0_wait_for_idle, | |
1114 | .soft_reset = gmc_v6_0_soft_reset, | |
1115 | .set_clockgating_state = gmc_v6_0_set_clockgating_state, | |
1116 | .set_powergating_state = gmc_v6_0_set_powergating_state, | |
1117 | }; | |
1118 | ||
132f34e4 CK |
1119 | static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = { |
1120 | .flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb, | |
4fef88bd | 1121 | .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb, |
f7c35abe | 1122 | .set_prt = gmc_v6_0_set_prt, |
b1166325 | 1123 | .get_vm_pde = gmc_v6_0_get_vm_pde, |
cbfae36c | 1124 | .get_vm_pte = gmc_v6_0_get_vm_pte, |
422fe8d2 | 1125 | .get_vbios_fb_size = gmc_v6_0_get_vbios_fb_size, |
df70502e KW |
1126 | }; |
1127 | ||
1128 | static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = { | |
1129 | .set = gmc_v6_0_vm_fault_interrupt_state, | |
1130 | .process = gmc_v6_0_process_interrupt, | |
1131 | }; | |
1132 | ||
132f34e4 | 1133 | static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev) |
df70502e | 1134 | { |
f54b30d7 | 1135 | adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs; |
df70502e KW |
1136 | } |
1137 | ||
1138 | static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev) | |
1139 | { | |
770d13b1 CK |
1140 | adev->gmc.vm_fault.num_types = 1; |
1141 | adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs; | |
df70502e KW |
1142 | } |
1143 | ||
0cfc1d68 | 1144 | const struct amdgpu_ip_block_version gmc_v6_0_ip_block = { |
a1255107 AD |
1145 | .type = AMD_IP_BLOCK_TYPE_GMC, |
1146 | .major = 6, | |
1147 | .minor = 0, | |
1148 | .rev = 0, | |
1149 | .funcs = &gmc_v6_0_ip_funcs, | |
1150 | }; |