drm/amdgpu: limit visible vram if it's smaller than the BAR
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / gmc_v8_0.c
CommitLineData
aaa36a97
AD
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include "drmP.h"
25#include "amdgpu.h"
26#include "gmc_v8_0.h"
27#include "amdgpu_ucode.h"
28
29#include "gmc/gmc_8_1_d.h"
30#include "gmc/gmc_8_1_sh_mask.h"
31
32#include "bif/bif_5_0_d.h"
33#include "bif/bif_5_0_sh_mask.h"
34
35#include "oss/oss_3_0_d.h"
36#include "oss/oss_3_0_sh_mask.h"
37
38#include "vid.h"
39#include "vi.h"
40
81c59f54 41
aaa36a97
AD
42static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
43static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
44
c65444fe
JZ
45MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
46MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
127a2628 47MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
aaa36a97
AD
48
49static const u32 golden_settings_tonga_a11[] =
50{
51 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
52 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
53 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
54 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
55 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
56 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
57 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
58};
59
60static const u32 tonga_mgcg_cgcg_init[] =
61{
62 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
63};
64
127a2628
DZ
65static const u32 golden_settings_fiji_a10[] =
66{
67 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
68 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
69 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
70 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
71};
72
73static const u32 fiji_mgcg_cgcg_init[] =
74{
75 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
76};
77
aaa36a97
AD
78static const u32 golden_settings_iceland_a11[] =
79{
80 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
81 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
82 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
83 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
84};
85
86static const u32 iceland_mgcg_cgcg_init[] =
87{
c61bf649 88 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
aaa36a97
AD
89};
90
91static const u32 cz_mgcg_cgcg_init[] =
92{
93 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
94};
95
aade2f04
SL
96static const u32 stoney_mgcg_cgcg_init[] =
97{
98 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
99};
100
101
aaa36a97
AD
102static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
103{
104 switch (adev->asic_type) {
105 case CHIP_TOPAZ:
106 amdgpu_program_register_sequence(adev,
107 iceland_mgcg_cgcg_init,
108 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
109 amdgpu_program_register_sequence(adev,
110 golden_settings_iceland_a11,
111 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
112 break;
127a2628
DZ
113 case CHIP_FIJI:
114 amdgpu_program_register_sequence(adev,
115 fiji_mgcg_cgcg_init,
116 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
117 amdgpu_program_register_sequence(adev,
118 golden_settings_fiji_a10,
119 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
120 break;
aaa36a97
AD
121 case CHIP_TONGA:
122 amdgpu_program_register_sequence(adev,
123 tonga_mgcg_cgcg_init,
124 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
125 amdgpu_program_register_sequence(adev,
126 golden_settings_tonga_a11,
127 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
128 break;
129 case CHIP_CARRIZO:
130 amdgpu_program_register_sequence(adev,
131 cz_mgcg_cgcg_init,
132 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
133 break;
aade2f04
SL
134 case CHIP_STONEY:
135 amdgpu_program_register_sequence(adev,
136 stoney_mgcg_cgcg_init,
137 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
138 break;
aaa36a97
AD
139 default:
140 break;
141 }
142}
143
144/**
145 * gmc8_mc_wait_for_idle - wait for MC idle callback.
146 *
147 * @adev: amdgpu_device pointer
148 *
149 * Wait for the MC (memory controller) to be idle.
150 * (evergreen+).
151 * Returns 0 if the MC is idle, -1 if not.
152 */
153int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev)
154{
155 unsigned i;
156 u32 tmp;
157
158 for (i = 0; i < adev->usec_timeout; i++) {
159 /* read MC_STATUS */
160 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK |
161 SRBM_STATUS__MCB_BUSY_MASK |
162 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
163 SRBM_STATUS__MCC_BUSY_MASK |
164 SRBM_STATUS__MCD_BUSY_MASK |
165 SRBM_STATUS__VMC1_BUSY_MASK);
166 if (!tmp)
167 return 0;
168 udelay(1);
169 }
170 return -1;
171}
172
173void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
174 struct amdgpu_mode_mc_save *save)
175{
176 u32 blackout;
177
178 if (adev->mode_info.num_crtc)
179 amdgpu_display_stop_mc_access(adev, save);
180
181 amdgpu_asic_wait_for_mc_idle(adev);
182
183 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
184 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
185 /* Block CPU access */
186 WREG32(mmBIF_FB_EN, 0);
187 /* blackout the MC */
188 blackout = REG_SET_FIELD(blackout,
189 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
190 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
191 }
192 /* wait for the MC to settle */
193 udelay(100);
194}
195
196void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
197 struct amdgpu_mode_mc_save *save)
198{
199 u32 tmp;
200
201 /* unblackout the MC */
202 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
203 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
204 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
205 /* allow CPU access */
206 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
207 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
208 WREG32(mmBIF_FB_EN, tmp);
209
210 if (adev->mode_info.num_crtc)
211 amdgpu_display_resume_mc_access(adev, save);
212}
213
214/**
215 * gmc_v8_0_init_microcode - load ucode images from disk
216 *
217 * @adev: amdgpu_device pointer
218 *
219 * Use the firmware interface to load the ucode images into
220 * the driver (not loaded into hw).
221 * Returns 0 on success, error on failure.
222 */
223static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
224{
225 const char *chip_name;
226 char fw_name[30];
227 int err;
228
229 DRM_DEBUG("\n");
230
231 switch (adev->asic_type) {
232 case CHIP_TOPAZ:
233 chip_name = "topaz";
234 break;
235 case CHIP_TONGA:
236 chip_name = "tonga";
237 break;
127a2628
DZ
238 case CHIP_FIJI:
239 chip_name = "fiji";
240 break;
aaa36a97 241 case CHIP_CARRIZO:
aade2f04 242 case CHIP_STONEY:
aaa36a97
AD
243 return 0;
244 default: BUG();
245 }
246
c65444fe 247 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
aaa36a97
AD
248 err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
249 if (err)
250 goto out;
251 err = amdgpu_ucode_validate(adev->mc.fw);
252
253out:
254 if (err) {
255 printk(KERN_ERR
256 "mc: Failed to load firmware \"%s\"\n",
257 fw_name);
258 release_firmware(adev->mc.fw);
259 adev->mc.fw = NULL;
260 }
261 return err;
262}
263
264/**
265 * gmc_v8_0_mc_load_microcode - load MC ucode into the hw
266 *
267 * @adev: amdgpu_device pointer
268 *
269 * Load the GDDR MC ucode into the hw (CIK).
270 * Returns 0 on success, error on failure.
271 */
272static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
273{
274 const struct mc_firmware_header_v1_0 *hdr;
275 const __le32 *fw_data = NULL;
276 const __le32 *io_mc_regs = NULL;
277 u32 running, blackout = 0;
278 int i, ucode_size, regs_size;
279
280 if (!adev->mc.fw)
281 return -EINVAL;
282
283 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
284 amdgpu_ucode_print_mc_hdr(&hdr->header);
285
286 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
287 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
288 io_mc_regs = (const __le32 *)
289 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
290 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
291 fw_data = (const __le32 *)
292 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
293
294 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
295
296 if (running == 0) {
297 if (running) {
298 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
299 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
300 }
301
302 /* reset the engine and set to writable */
303 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
304 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
305
306 /* load mc io regs */
307 for (i = 0; i < regs_size; i++) {
308 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
309 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
310 }
311 /* load the MC ucode */
312 for (i = 0; i < ucode_size; i++)
313 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
314
315 /* put the engine back into the active state */
316 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
317 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
318 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
319
320 /* wait for training to complete */
321 for (i = 0; i < adev->usec_timeout; i++) {
322 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
323 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
324 break;
325 udelay(1);
326 }
327 for (i = 0; i < adev->usec_timeout; i++) {
328 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
329 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
330 break;
331 udelay(1);
332 }
333
334 if (running)
335 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
336 }
337
338 return 0;
339}
340
341static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
342 struct amdgpu_mc *mc)
343{
344 if (mc->mc_vram_size > 0xFFC0000000ULL) {
345 /* leave room for at least 1024M GTT */
346 dev_warn(adev->dev, "limiting VRAM\n");
347 mc->real_vram_size = 0xFFC0000000ULL;
348 mc->mc_vram_size = 0xFFC0000000ULL;
349 }
350 amdgpu_vram_location(adev, &adev->mc, 0);
351 adev->mc.gtt_base_align = 0;
352 amdgpu_gtt_location(adev, mc);
353}
354
355/**
356 * gmc_v8_0_mc_program - program the GPU memory controller
357 *
358 * @adev: amdgpu_device pointer
359 *
360 * Set the location of vram, gart, and AGP in the GPU's
361 * physical address space (CIK).
362 */
363static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
364{
365 struct amdgpu_mode_mc_save save;
366 u32 tmp;
367 int i, j;
368
369 /* Initialize HDP */
370 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
371 WREG32((0xb05 + j), 0x00000000);
372 WREG32((0xb06 + j), 0x00000000);
373 WREG32((0xb07 + j), 0x00000000);
374 WREG32((0xb08 + j), 0x00000000);
375 WREG32((0xb09 + j), 0x00000000);
376 }
377 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
378
379 if (adev->mode_info.num_crtc)
380 amdgpu_display_set_vga_render_state(adev, false);
381
382 gmc_v8_0_mc_stop(adev, &save);
383 if (amdgpu_asic_wait_for_mc_idle(adev)) {
384 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
385 }
386 /* Update configuration */
387 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
388 adev->mc.vram_start >> 12);
389 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
390 adev->mc.vram_end >> 12);
391 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
392 adev->vram_scratch.gpu_addr >> 12);
393 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
394 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
395 WREG32(mmMC_VM_FB_LOCATION, tmp);
396 /* XXX double check these! */
397 WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
398 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
399 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
400 WREG32(mmMC_VM_AGP_BASE, 0);
401 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
402 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
403 if (amdgpu_asic_wait_for_mc_idle(adev)) {
404 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
405 }
406 gmc_v8_0_mc_resume(adev, &save);
407
408 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
409
410 tmp = RREG32(mmHDP_MISC_CNTL);
411 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
412 WREG32(mmHDP_MISC_CNTL, tmp);
413
414 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
415 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
416}
417
418/**
419 * gmc_v8_0_mc_init - initialize the memory controller driver params
420 *
421 * @adev: amdgpu_device pointer
422 *
423 * Look up the amount of vram, vram width, and decide how to place
424 * vram and gart within the GPU's physical address space (CIK).
425 * Returns 0 for success.
426 */
427static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
428{
429 u32 tmp;
430 int chansize, numchan;
431
432 /* Get VRAM informations */
433 tmp = RREG32(mmMC_ARB_RAMCFG);
434 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
435 chansize = 64;
436 } else {
437 chansize = 32;
438 }
439 tmp = RREG32(mmMC_SHARED_CHMAP);
440 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
441 case 0:
442 default:
443 numchan = 1;
444 break;
445 case 1:
446 numchan = 2;
447 break;
448 case 2:
449 numchan = 4;
450 break;
451 case 3:
452 numchan = 8;
453 break;
454 case 4:
455 numchan = 3;
456 break;
457 case 5:
458 numchan = 6;
459 break;
460 case 6:
461 numchan = 10;
462 break;
463 case 7:
464 numchan = 12;
465 break;
466 case 8:
467 numchan = 16;
468 break;
469 }
470 adev->mc.vram_width = numchan * chansize;
471 /* Could aper size report 0 ? */
472 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
473 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
474 /* size in MB on si */
475 adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
476 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
477 adev->mc.visible_vram_size = adev->mc.aper_size;
478
a1493cd5
AD
479 /* In case the PCI BAR is larger than the actual amount of vram */
480 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
481 adev->mc.visible_vram_size = adev->mc.real_vram_size;
482
aaa36a97
AD
483 /* unless the user had overridden it, set the gart
484 * size equal to the 1024 or vram, whichever is larger.
485 */
486 if (amdgpu_gart_size == -1)
487 adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
488 else
489 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
490
491 gmc_v8_0_vram_gtt_location(adev, &adev->mc);
492
493 return 0;
494}
495
496/*
497 * GART
498 * VMID 0 is the physical GPU addresses as used by the kernel.
499 * VMIDs 1-15 are used for userspace clients and are handled
500 * by the amdgpu vm/hsa code.
501 */
502
503/**
504 * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
505 *
506 * @adev: amdgpu_device pointer
507 * @vmid: vm instance to flush
508 *
509 * Flush the TLB for the requested page table (CIK).
510 */
511static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
512 uint32_t vmid)
513{
514 /* flush hdp cache */
515 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
516
517 /* bits 0-15 are the VM contexts0-15 */
518 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
519}
520
521/**
522 * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
523 *
524 * @adev: amdgpu_device pointer
525 * @cpu_pt_addr: cpu address of the page table
526 * @gpu_page_idx: entry in the page table to update
527 * @addr: dst addr to write into pte/pde
528 * @flags: access flags
529 *
530 * Update the page tables using the CPU.
531 */
532static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
533 void *cpu_pt_addr,
534 uint32_t gpu_page_idx,
535 uint64_t addr,
536 uint32_t flags)
537{
538 void __iomem *ptr = (void *)cpu_pt_addr;
539 uint64_t value;
540
541 /*
542 * PTE format on VI:
543 * 63:40 reserved
544 * 39:12 4k physical page base address
545 * 11:7 fragment
546 * 6 write
547 * 5 read
548 * 4 exe
549 * 3 reserved
550 * 2 snooped
551 * 1 system
552 * 0 valid
553 *
554 * PDE format on VI:
555 * 63:59 block fragment size
556 * 58:40 reserved
557 * 39:1 physical base address of PTE
558 * bits 5:1 must be 0.
559 * 0 valid
560 */
561 value = addr & 0x000000FFFFFFF000ULL;
562 value |= flags;
563 writeq(value, ptr + (gpu_page_idx * 8));
564
565 return 0;
566}
567
d9c13156
CK
568/**
569 * gmc_v8_0_set_fault_enable_default - update VM fault handling
570 *
571 * @adev: amdgpu_device pointer
572 * @value: true redirects VM faults to the default page
573 */
574static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
575 bool value)
576{
577 u32 tmp;
578
579 tmp = RREG32(mmVM_CONTEXT1_CNTL);
580 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
581 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
582 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
583 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
584 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
585 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
586 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
587 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
588 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
589 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
590 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
591 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
592 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
593 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
594 WREG32(mmVM_CONTEXT1_CNTL, tmp);
595}
596
aaa36a97
AD
597/**
598 * gmc_v8_0_gart_enable - gart enable
599 *
600 * @adev: amdgpu_device pointer
601 *
602 * This sets up the TLBs, programs the page tables for VMID0,
603 * sets up the hw for VMIDs 1-15 which are allocated on
604 * demand, and sets up the global locations for the LDS, GDS,
605 * and GPUVM for FSA64 clients (CIK).
606 * Returns 0 for success, errors for failure.
607 */
608static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
609{
610 int r, i;
611 u32 tmp;
612
613 if (adev->gart.robj == NULL) {
614 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
615 return -EINVAL;
616 }
617 r = amdgpu_gart_table_vram_pin(adev);
618 if (r)
619 return r;
620 /* Setup TLB control */
621 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
622 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
623 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
624 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
625 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
626 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
627 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
628 /* Setup L2 cache */
629 tmp = RREG32(mmVM_L2_CNTL);
630 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
631 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
632 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
633 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
634 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
635 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
a80b3047 636 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
aaa36a97
AD
637 WREG32(mmVM_L2_CNTL, tmp);
638 tmp = RREG32(mmVM_L2_CNTL2);
639 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
640 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
641 WREG32(mmVM_L2_CNTL2, tmp);
642 tmp = RREG32(mmVM_L2_CNTL3);
643 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
644 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
645 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
646 WREG32(mmVM_L2_CNTL3, tmp);
647 /* XXX: set to enable PTE/PDE in system memory */
648 tmp = RREG32(mmVM_L2_CNTL4);
649 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
650 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
651 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
652 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
653 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
654 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
655 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
656 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
657 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
658 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
659 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
660 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
661 WREG32(mmVM_L2_CNTL4, tmp);
662 /* setup context0 */
663 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
25a595e4 664 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1);
aaa36a97
AD
665 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
666 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
667 (u32)(adev->dummy_page.addr >> 12));
668 WREG32(mmVM_CONTEXT0_CNTL2, 0);
669 tmp = RREG32(mmVM_CONTEXT0_CNTL);
670 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
671 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
672 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
673 WREG32(mmVM_CONTEXT0_CNTL, tmp);
674
675 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
676 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
677 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
678
679 /* empty context1-15 */
680 /* FIXME start with 4G, once using 2 level pt switch to full
681 * vm size space
682 */
683 /* set vm size, must be a multiple of 4 */
684 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
25a595e4 685 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
aaa36a97
AD
686 for (i = 1; i < 16; i++) {
687 if (i < 8)
688 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
689 adev->gart.table_addr >> 12);
690 else
691 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
692 adev->gart.table_addr >> 12);
693 }
694
695 /* enable context1-15 */
696 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
697 (u32)(adev->dummy_page.addr >> 12));
698 WREG32(mmVM_CONTEXT1_CNTL2, 4);
699 tmp = RREG32(mmVM_CONTEXT1_CNTL);
700 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
701 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
aaa36a97 702 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
aaa36a97 703 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
aaa36a97 704 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
aaa36a97 705 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
aaa36a97 706 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
aaa36a97 707 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
aaa36a97
AD
708 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
709 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
710 amdgpu_vm_block_size - 9);
711 WREG32(mmVM_CONTEXT1_CNTL, tmp);
d9c13156
CK
712 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
713 gmc_v8_0_set_fault_enable_default(adev, false);
714 else
715 gmc_v8_0_set_fault_enable_default(adev, true);
aaa36a97
AD
716
717 gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
718 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
719 (unsigned)(adev->mc.gtt_size >> 20),
720 (unsigned long long)adev->gart.table_addr);
721 adev->gart.ready = true;
722 return 0;
723}
724
725static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
726{
727 int r;
728
729 if (adev->gart.robj) {
730 WARN(1, "R600 PCIE GART already initialized\n");
731 return 0;
732 }
733 /* Initialize common gart structure */
734 r = amdgpu_gart_init(adev);
735 if (r)
736 return r;
737 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
738 return amdgpu_gart_table_vram_alloc(adev);
739}
740
741/**
742 * gmc_v8_0_gart_disable - gart disable
743 *
744 * @adev: amdgpu_device pointer
745 *
746 * This disables all VM page table (CIK).
747 */
748static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
749{
750 u32 tmp;
751
752 /* Disable all tables */
753 WREG32(mmVM_CONTEXT0_CNTL, 0);
754 WREG32(mmVM_CONTEXT1_CNTL, 0);
755 /* Setup TLB control */
756 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
757 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
758 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
759 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
760 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
761 /* Setup L2 cache */
762 tmp = RREG32(mmVM_L2_CNTL);
763 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
764 WREG32(mmVM_L2_CNTL, tmp);
765 WREG32(mmVM_L2_CNTL2, 0);
766 amdgpu_gart_table_vram_unpin(adev);
767}
768
769/**
770 * gmc_v8_0_gart_fini - vm fini callback
771 *
772 * @adev: amdgpu_device pointer
773 *
774 * Tears down the driver GART/VM setup (CIK).
775 */
776static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
777{
778 amdgpu_gart_table_vram_free(adev);
779 amdgpu_gart_fini(adev);
780}
781
782/*
783 * vm
784 * VMID 0 is the physical GPU addresses as used by the kernel.
785 * VMIDs 1-15 are used for userspace clients and are handled
786 * by the amdgpu vm/hsa code.
787 */
788/**
789 * gmc_v8_0_vm_init - cik vm init callback
790 *
791 * @adev: amdgpu_device pointer
792 *
793 * Inits cik specific vm parameters (number of VMs, base of vram for
794 * VMIDs 1-15) (CIK).
795 * Returns 0 for success.
796 */
797static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
798{
799 /*
800 * number of VMs
801 * VMID 0 is reserved for System
802 * amdgpu graphics/compute will use VMIDs 1-7
803 * amdkfd will use VMIDs 8-15
804 */
805 adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
806
807 /* base offset of vram pages */
2f7d10b3 808 if (adev->flags & AMD_IS_APU) {
aaa36a97
AD
809 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
810 tmp <<= 22;
811 adev->vm_manager.vram_base_offset = tmp;
812 } else
813 adev->vm_manager.vram_base_offset = 0;
814
815 return 0;
816}
817
818/**
819 * gmc_v8_0_vm_fini - cik vm fini callback
820 *
821 * @adev: amdgpu_device pointer
822 *
823 * Tear down any asic specific VM setup (CIK).
824 */
825static void gmc_v8_0_vm_fini(struct amdgpu_device *adev)
826{
827}
828
829/**
830 * gmc_v8_0_vm_decode_fault - print human readable fault info
831 *
832 * @adev: amdgpu_device pointer
833 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
834 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
835 *
836 * Print human readable fault information (CIK).
837 */
838static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev,
839 u32 status, u32 addr, u32 mc_client)
840{
841 u32 mc_id;
842 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
843 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
844 PROTECTIONS);
845 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
846 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
847
848 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
849 MEMORY_CLIENT_ID);
850
851 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
852 protections, vmid, addr,
853 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
854 MEMORY_CLIENT_RW) ?
855 "write" : "read", block, mc_client, mc_id);
856}
857
81c59f54
KW
858static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
859{
860 switch (mc_seq_vram_type) {
861 case MC_SEQ_MISC0__MT__GDDR1:
862 return AMDGPU_VRAM_TYPE_GDDR1;
863 case MC_SEQ_MISC0__MT__DDR2:
864 return AMDGPU_VRAM_TYPE_DDR2;
865 case MC_SEQ_MISC0__MT__GDDR3:
866 return AMDGPU_VRAM_TYPE_GDDR3;
867 case MC_SEQ_MISC0__MT__GDDR4:
868 return AMDGPU_VRAM_TYPE_GDDR4;
869 case MC_SEQ_MISC0__MT__GDDR5:
870 return AMDGPU_VRAM_TYPE_GDDR5;
871 case MC_SEQ_MISC0__MT__HBM:
872 return AMDGPU_VRAM_TYPE_HBM;
873 case MC_SEQ_MISC0__MT__DDR3:
874 return AMDGPU_VRAM_TYPE_DDR3;
875 default:
876 return AMDGPU_VRAM_TYPE_UNKNOWN;
877 }
878}
879
5fc3aeeb 880static int gmc_v8_0_early_init(void *handle)
aaa36a97 881{
5fc3aeeb 882 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
883
aaa36a97
AD
884 gmc_v8_0_set_gart_funcs(adev);
885 gmc_v8_0_set_irq_funcs(adev);
886
2f7d10b3 887 if (adev->flags & AMD_IS_APU) {
81c59f54 888 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
aaa36a97
AD
889 } else {
890 u32 tmp = RREG32(mmMC_SEQ_MISC0);
81c59f54
KW
891 tmp &= MC_SEQ_MISC0__MT__MASK;
892 adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
aaa36a97
AD
893 }
894
895 return 0;
896}
897
140b519f
CK
898static int gmc_v8_0_late_init(void *handle)
899{
900 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
901
902 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
903}
904
5fc3aeeb 905static int gmc_v8_0_sw_init(void *handle)
aaa36a97
AD
906{
907 int r;
908 int dma_bits;
5fc3aeeb 909 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
910
911 r = amdgpu_gem_init(adev);
912 if (r)
913 return r;
914
915 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
916 if (r)
917 return r;
918
919 r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
920 if (r)
921 return r;
922
923 /* Adjust VM size here.
924 * Currently set to 4GB ((1 << 20) 4k pages).
925 * Max GPUVM size for cayman and SI is 40 bits.
926 */
927 adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
928
929 /* Set the internal MC address mask
930 * This is the max address of the GPU's
931 * internal address space.
932 */
933 adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
934
935 /* set DMA mask + need_dma32 flags.
936 * PCIE - can handle 40-bits.
937 * IGP - can handle 40-bits
938 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
939 */
940 adev->need_dma32 = false;
941 dma_bits = adev->need_dma32 ? 32 : 40;
942 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
943 if (r) {
944 adev->need_dma32 = true;
945 dma_bits = 32;
946 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
947 }
948 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
949 if (r) {
950 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
951 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
952 }
953
954 r = gmc_v8_0_init_microcode(adev);
955 if (r) {
956 DRM_ERROR("Failed to load mc firmware!\n");
957 return r;
958 }
959
960 r = gmc_v8_0_mc_init(adev);
961 if (r)
962 return r;
963
964 /* Memory manager */
965 r = amdgpu_bo_init(adev);
966 if (r)
967 return r;
968
969 r = gmc_v8_0_gart_init(adev);
970 if (r)
971 return r;
972
973 if (!adev->vm_manager.enabled) {
974 r = gmc_v8_0_vm_init(adev);
975 if (r) {
976 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
977 return r;
978 }
979 adev->vm_manager.enabled = true;
980 }
981
982 return r;
983}
984
5fc3aeeb 985static int gmc_v8_0_sw_fini(void *handle)
aaa36a97 986{
5fc3aeeb 987 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
988
989 if (adev->vm_manager.enabled) {
ea89f8c9 990 amdgpu_vm_manager_fini(adev);
aaa36a97
AD
991 gmc_v8_0_vm_fini(adev);
992 adev->vm_manager.enabled = false;
993 }
994 gmc_v8_0_gart_fini(adev);
995 amdgpu_gem_fini(adev);
996 amdgpu_bo_fini(adev);
997
998 return 0;
999}
1000
5fc3aeeb 1001static int gmc_v8_0_hw_init(void *handle)
aaa36a97
AD
1002{
1003 int r;
5fc3aeeb 1004 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1005
1006 gmc_v8_0_init_golden_registers(adev);
1007
1008 gmc_v8_0_mc_program(adev);
1009
2f7d10b3 1010 if (!(adev->flags & AMD_IS_APU)) {
aaa36a97
AD
1011 r = gmc_v8_0_mc_load_microcode(adev);
1012 if (r) {
1013 DRM_ERROR("Failed to load MC firmware!\n");
1014 return r;
1015 }
1016 }
1017
1018 r = gmc_v8_0_gart_enable(adev);
1019 if (r)
1020 return r;
1021
1022 return r;
1023}
1024
5fc3aeeb 1025static int gmc_v8_0_hw_fini(void *handle)
aaa36a97 1026{
5fc3aeeb 1027 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1028
140b519f 1029 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
aaa36a97
AD
1030 gmc_v8_0_gart_disable(adev);
1031
1032 return 0;
1033}
1034
5fc3aeeb 1035static int gmc_v8_0_suspend(void *handle)
aaa36a97 1036{
5fc3aeeb 1037 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1038
1039 if (adev->vm_manager.enabled) {
ea89f8c9 1040 amdgpu_vm_manager_fini(adev);
aaa36a97
AD
1041 gmc_v8_0_vm_fini(adev);
1042 adev->vm_manager.enabled = false;
1043 }
1044 gmc_v8_0_hw_fini(adev);
1045
1046 return 0;
1047}
1048
5fc3aeeb 1049static int gmc_v8_0_resume(void *handle)
aaa36a97
AD
1050{
1051 int r;
5fc3aeeb 1052 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1053
1054 r = gmc_v8_0_hw_init(adev);
1055 if (r)
1056 return r;
1057
1058 if (!adev->vm_manager.enabled) {
1059 r = gmc_v8_0_vm_init(adev);
1060 if (r) {
1061 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1062 return r;
1063 }
1064 adev->vm_manager.enabled = true;
1065 }
1066
1067 return r;
1068}
1069
5fc3aeeb 1070static bool gmc_v8_0_is_idle(void *handle)
aaa36a97 1071{
5fc3aeeb 1072 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1073 u32 tmp = RREG32(mmSRBM_STATUS);
1074
1075 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1076 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1077 return false;
1078
1079 return true;
1080}
1081
5fc3aeeb 1082static int gmc_v8_0_wait_for_idle(void *handle)
aaa36a97
AD
1083{
1084 unsigned i;
1085 u32 tmp;
5fc3aeeb 1086 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1087
1088 for (i = 0; i < adev->usec_timeout; i++) {
1089 /* read MC_STATUS */
1090 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1091 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1092 SRBM_STATUS__MCC_BUSY_MASK |
1093 SRBM_STATUS__MCD_BUSY_MASK |
1094 SRBM_STATUS__VMC_BUSY_MASK |
1095 SRBM_STATUS__VMC1_BUSY_MASK);
1096 if (!tmp)
1097 return 0;
1098 udelay(1);
1099 }
1100 return -ETIMEDOUT;
1101
1102}
1103
5fc3aeeb 1104static void gmc_v8_0_print_status(void *handle)
aaa36a97
AD
1105{
1106 int i, j;
5fc3aeeb 1107 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1108
1109 dev_info(adev->dev, "GMC 8.x registers\n");
1110 dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
1111 RREG32(mmSRBM_STATUS));
1112 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
1113 RREG32(mmSRBM_STATUS2));
1114
1115 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1116 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
1117 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1118 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
1119 dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
1120 RREG32(mmMC_VM_MX_L1_TLB_CNTL));
1121 dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n",
1122 RREG32(mmVM_L2_CNTL));
1123 dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n",
1124 RREG32(mmVM_L2_CNTL2));
1125 dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n",
1126 RREG32(mmVM_L2_CNTL3));
1127 dev_info(adev->dev, " VM_L2_CNTL4=0x%08X\n",
1128 RREG32(mmVM_L2_CNTL4));
1129 dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
1130 RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
1131 dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
1132 RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
1133 dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1134 RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
1135 dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n",
1136 RREG32(mmVM_CONTEXT0_CNTL2));
1137 dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n",
1138 RREG32(mmVM_CONTEXT0_CNTL));
1139 dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n",
1140 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR));
1141 dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n",
1142 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR));
1143 dev_info(adev->dev, " mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n",
1144 RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET));
1145 dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
1146 RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
1147 dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
1148 RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
1149 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1150 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
1151 dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n",
1152 RREG32(mmVM_CONTEXT1_CNTL2));
1153 dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n",
1154 RREG32(mmVM_CONTEXT1_CNTL));
1155 for (i = 0; i < 16; i++) {
1156 if (i < 8)
1157 dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1158 i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
1159 else
1160 dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1161 i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
1162 }
1163 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
1164 RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
1165 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
1166 RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
1167 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
1168 RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
1169 dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n",
1170 RREG32(mmMC_VM_FB_LOCATION));
1171 dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n",
1172 RREG32(mmMC_VM_AGP_BASE));
1173 dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n",
1174 RREG32(mmMC_VM_AGP_TOP));
1175 dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n",
1176 RREG32(mmMC_VM_AGP_BOT));
1177
1178 dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
1179 RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
1180 dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n",
1181 RREG32(mmHDP_NONSURFACE_BASE));
1182 dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n",
1183 RREG32(mmHDP_NONSURFACE_INFO));
1184 dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n",
1185 RREG32(mmHDP_NONSURFACE_SIZE));
1186 dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n",
1187 RREG32(mmHDP_MISC_CNTL));
1188 dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n",
1189 RREG32(mmHDP_HOST_PATH_CNTL));
1190
1191 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
1192 dev_info(adev->dev, " %d:\n", i);
1193 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1194 0xb05 + j, RREG32(0xb05 + j));
1195 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1196 0xb06 + j, RREG32(0xb06 + j));
1197 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1198 0xb07 + j, RREG32(0xb07 + j));
1199 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1200 0xb08 + j, RREG32(0xb08 + j));
1201 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1202 0xb09 + j, RREG32(0xb09 + j));
1203 }
1204
1205 dev_info(adev->dev, " BIF_FB_EN=0x%08X\n",
1206 RREG32(mmBIF_FB_EN));
1207}
1208
5fc3aeeb 1209static int gmc_v8_0_soft_reset(void *handle)
aaa36a97
AD
1210{
1211 struct amdgpu_mode_mc_save save;
1212 u32 srbm_soft_reset = 0;
5fc3aeeb 1213 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1214 u32 tmp = RREG32(mmSRBM_STATUS);
1215
1216 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1217 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1218 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1219
1220 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1221 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
2f7d10b3 1222 if (!(adev->flags & AMD_IS_APU))
aaa36a97
AD
1223 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1224 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1225 }
1226
1227 if (srbm_soft_reset) {
5fc3aeeb 1228 gmc_v8_0_print_status((void *)adev);
aaa36a97
AD
1229
1230 gmc_v8_0_mc_stop(adev, &save);
1231 if (gmc_v8_0_wait_for_idle(adev)) {
1232 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1233 }
1234
1235
1236 tmp = RREG32(mmSRBM_SOFT_RESET);
1237 tmp |= srbm_soft_reset;
1238 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1239 WREG32(mmSRBM_SOFT_RESET, tmp);
1240 tmp = RREG32(mmSRBM_SOFT_RESET);
1241
1242 udelay(50);
1243
1244 tmp &= ~srbm_soft_reset;
1245 WREG32(mmSRBM_SOFT_RESET, tmp);
1246 tmp = RREG32(mmSRBM_SOFT_RESET);
1247
1248 /* Wait a little for things to settle down */
1249 udelay(50);
1250
1251 gmc_v8_0_mc_resume(adev, &save);
1252 udelay(50);
1253
5fc3aeeb 1254 gmc_v8_0_print_status((void *)adev);
aaa36a97
AD
1255 }
1256
1257 return 0;
1258}
1259
1260static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1261 struct amdgpu_irq_src *src,
1262 unsigned type,
1263 enum amdgpu_interrupt_state state)
1264{
1265 u32 tmp;
1266 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1267 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1268 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1269 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1270 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1271 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1272 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1273
1274 switch (state) {
1275 case AMDGPU_IRQ_STATE_DISABLE:
1276 /* system context */
1277 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1278 tmp &= ~bits;
1279 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1280 /* VMs */
1281 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1282 tmp &= ~bits;
1283 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1284 break;
1285 case AMDGPU_IRQ_STATE_ENABLE:
1286 /* system context */
1287 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1288 tmp |= bits;
1289 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1290 /* VMs */
1291 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1292 tmp |= bits;
1293 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1294 break;
1295 default:
1296 break;
1297 }
1298
1299 return 0;
1300}
1301
1302static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1303 struct amdgpu_irq_src *source,
1304 struct amdgpu_iv_entry *entry)
1305{
1306 u32 addr, status, mc_client;
1307
1308 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1309 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1310 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
ce0c6bcd
CK
1311 /* reset addr and status */
1312 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1313
1314 if (!addr && !status)
1315 return 0;
1316
d9c13156
CK
1317 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1318 gmc_v8_0_set_fault_enable_default(adev, false);
1319
aaa36a97
AD
1320 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1321 entry->src_id, entry->src_data);
1322 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1323 addr);
1324 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1325 status);
1326 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
aaa36a97
AD
1327
1328 return 0;
1329}
1330
5fc3aeeb 1331static int gmc_v8_0_set_clockgating_state(void *handle,
1332 enum amd_clockgating_state state)
aaa36a97 1333{
aaa36a97
AD
1334 return 0;
1335}
1336
5fc3aeeb 1337static int gmc_v8_0_set_powergating_state(void *handle,
1338 enum amd_powergating_state state)
aaa36a97
AD
1339{
1340 return 0;
1341}
1342
5fc3aeeb 1343const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
aaa36a97 1344 .early_init = gmc_v8_0_early_init,
140b519f 1345 .late_init = gmc_v8_0_late_init,
aaa36a97
AD
1346 .sw_init = gmc_v8_0_sw_init,
1347 .sw_fini = gmc_v8_0_sw_fini,
1348 .hw_init = gmc_v8_0_hw_init,
1349 .hw_fini = gmc_v8_0_hw_fini,
1350 .suspend = gmc_v8_0_suspend,
1351 .resume = gmc_v8_0_resume,
1352 .is_idle = gmc_v8_0_is_idle,
1353 .wait_for_idle = gmc_v8_0_wait_for_idle,
1354 .soft_reset = gmc_v8_0_soft_reset,
1355 .print_status = gmc_v8_0_print_status,
1356 .set_clockgating_state = gmc_v8_0_set_clockgating_state,
1357 .set_powergating_state = gmc_v8_0_set_powergating_state,
1358};
1359
1360static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
1361 .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
1362 .set_pte_pde = gmc_v8_0_gart_set_pte_pde,
1363};
1364
1365static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1366 .set = gmc_v8_0_vm_fault_interrupt_state,
1367 .process = gmc_v8_0_process_interrupt,
1368};
1369
1370static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
1371{
1372 if (adev->gart.gart_funcs == NULL)
1373 adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
1374}
1375
1376static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1377{
1378 adev->mc.vm_fault.num_types = 1;
1379 adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1380}