drm/amdgpu: Use function for IP version check
[linux-block.git] / drivers / gpu / drm / amd / amdgpu / gmc_v11_0.c
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25
26 #include <drm/drm_cache.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "gmc_v11_0.h"
31 #include "umc_v8_10.h"
32 #include "athub/athub_3_0_0_sh_mask.h"
33 #include "athub/athub_3_0_0_offset.h"
34 #include "dcn/dcn_3_2_0_offset.h"
35 #include "dcn/dcn_3_2_0_sh_mask.h"
36 #include "oss/osssys_6_0_0_offset.h"
37 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
38 #include "navi10_enum.h"
39 #include "soc15.h"
40 #include "soc15d.h"
41 #include "soc15_common.h"
42 #include "nbio_v4_3.h"
43 #include "gfxhub_v3_0.h"
44 #include "gfxhub_v3_0_3.h"
45 #include "gfxhub_v11_5_0.h"
46 #include "mmhub_v3_0.h"
47 #include "mmhub_v3_0_1.h"
48 #include "mmhub_v3_0_2.h"
49 #include "mmhub_v3_3.h"
50 #include "athub_v3_0.h"
51
52
53 static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,
54                                          struct amdgpu_irq_src *src,
55                                          unsigned int type,
56                                          enum amdgpu_interrupt_state state)
57 {
58         return 0;
59 }
60
61 static int
62 gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
63                                    struct amdgpu_irq_src *src, unsigned int type,
64                                    enum amdgpu_interrupt_state state)
65 {
66         switch (state) {
67         case AMDGPU_IRQ_STATE_DISABLE:
68                 /* MM HUB */
69                 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
70                 /* GFX HUB */
71                 /* This works because this interrupt is only
72                  * enabled at init/resume and disabled in
73                  * fini/suspend, so the overall state doesn't
74                  * change over the course of suspend/resume.
75                  */
76                 if (!adev->in_s0ix)
77                         amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
78                 break;
79         case AMDGPU_IRQ_STATE_ENABLE:
80                 /* MM HUB */
81                 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
82                 /* GFX HUB */
83                 /* This works because this interrupt is only
84                  * enabled at init/resume and disabled in
85                  * fini/suspend, so the overall state doesn't
86                  * change over the course of suspend/resume.
87                  */
88                 if (!adev->in_s0ix)
89                         amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
90                 break;
91         default:
92                 break;
93         }
94
95         return 0;
96 }
97
98 static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
99                                        struct amdgpu_irq_src *source,
100                                        struct amdgpu_iv_entry *entry)
101 {
102         uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ?
103                                AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
104         struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
105         uint32_t status = 0;
106         u64 addr;
107
108         addr = (u64)entry->src_data[0] << 12;
109         addr |= ((u64)entry->src_data[1] & 0xf) << 44;
110
111         if (!amdgpu_sriov_vf(adev)) {
112                 /*
113                  * Issue a dummy read to wait for the status register to
114                  * be updated to avoid reading an incorrect value due to
115                  * the new fast GRBM interface.
116                  */
117                 if (entry->vmid_src == AMDGPU_GFXHUB(0))
118                         RREG32(hub->vm_l2_pro_fault_status);
119
120                 status = RREG32(hub->vm_l2_pro_fault_status);
121                 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
122         }
123
124         if (printk_ratelimit()) {
125                 struct amdgpu_task_info task_info;
126
127                 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
128                 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
129
130                 dev_err(adev->dev,
131                         "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
132                         entry->vmid_src ? "mmhub" : "gfxhub",
133                         entry->src_id, entry->ring_id, entry->vmid,
134                         entry->pasid, task_info.process_name, task_info.tgid,
135                         task_info.task_name, task_info.pid);
136                 dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
137                         addr, entry->client_id);
138                 if (!amdgpu_sriov_vf(adev))
139                         hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
140         }
141
142         return 0;
143 }
144
145 static const struct amdgpu_irq_src_funcs gmc_v11_0_irq_funcs = {
146         .set = gmc_v11_0_vm_fault_interrupt_state,
147         .process = gmc_v11_0_process_interrupt,
148 };
149
150 static const struct amdgpu_irq_src_funcs gmc_v11_0_ecc_funcs = {
151         .set = gmc_v11_0_ecc_interrupt_state,
152         .process = amdgpu_umc_process_ecc_irq,
153 };
154
155 static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev)
156 {
157         adev->gmc.vm_fault.num_types = 1;
158         adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs;
159
160         if (!amdgpu_sriov_vf(adev)) {
161                 adev->gmc.ecc_irq.num_types = 1;
162                 adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs;
163         }
164 }
165
166 /**
167  * gmc_v11_0_use_invalidate_semaphore - judge whether to use semaphore
168  *
169  * @adev: amdgpu_device pointer
170  * @vmhub: vmhub type
171  *
172  */
173 static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev,
174                                        uint32_t vmhub)
175 {
176         return ((vmhub == AMDGPU_MMHUB0(0)) &&
177                 (!amdgpu_sriov_vf(adev)));
178 }
179
180 static bool gmc_v11_0_get_vmid_pasid_mapping_info(
181                                         struct amdgpu_device *adev,
182                                         uint8_t vmid, uint16_t *p_pasid)
183 {
184         *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
185
186         return !!(*p_pasid);
187 }
188
189 /*
190  * GART
191  * VMID 0 is the physical GPU addresses as used by the kernel.
192  * VMIDs 1-15 are used for userspace clients and are handled
193  * by the amdgpu vm/hsa code.
194  */
195
196 static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
197                                    unsigned int vmhub, uint32_t flush_type)
198 {
199         bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub);
200         struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
201         u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
202         u32 tmp;
203         /* Use register 17 for GART */
204         const unsigned int eng = 17;
205         unsigned int i;
206         unsigned char hub_ip = 0;
207
208         hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ?
209                    GC_HWIP : MMHUB_HWIP;
210
211         spin_lock(&adev->gmc.invalidate_lock);
212         /*
213          * It may lose gpuvm invalidate acknowldege state across power-gating
214          * off cycle, add semaphore acquire before invalidation and semaphore
215          * release after invalidation to avoid entering power gated state
216          * to WA the Issue
217          */
218
219         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
220         if (use_semaphore) {
221                 for (i = 0; i < adev->usec_timeout; i++) {
222                         /* a read return value of 1 means semaphore acuqire */
223                         tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
224                                             hub->eng_distance * eng, hub_ip);
225                         if (tmp & 0x1)
226                                 break;
227                         udelay(1);
228                 }
229
230                 if (i >= adev->usec_timeout)
231                         DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
232         }
233
234         WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
235
236         /* Wait for ACK with a delay.*/
237         for (i = 0; i < adev->usec_timeout; i++) {
238                 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
239                                     hub->eng_distance * eng, hub_ip);
240                 tmp &= 1 << vmid;
241                 if (tmp)
242                         break;
243
244                 udelay(1);
245         }
246
247         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
248         if (use_semaphore)
249                 /*
250                  * add semaphore release after invalidation,
251                  * write with 0 means semaphore release
252                  */
253                 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
254                               hub->eng_distance * eng, 0, hub_ip);
255
256         /* Issue additional private vm invalidation to MMHUB */
257         if ((vmhub != AMDGPU_GFXHUB(0)) &&
258             (hub->vm_l2_bank_select_reserved_cid2) &&
259                 !amdgpu_sriov_vf(adev)) {
260                 inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
261                 /* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
262                 inv_req |= (1 << 25);
263                 /* Issue private invalidation */
264                 WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
265                 /* Read back to ensure invalidation is done*/
266                 RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
267         }
268
269         spin_unlock(&adev->gmc.invalidate_lock);
270
271         if (i < adev->usec_timeout)
272                 return;
273
274         DRM_ERROR("Timeout waiting for VM flush ACK!\n");
275 }
276
277 /**
278  * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback
279  *
280  * @adev: amdgpu_device pointer
281  * @vmid: vm instance to flush
282  * @vmhub: which hub to flush
283  * @flush_type: the flush type
284  *
285  * Flush the TLB for the requested page table.
286  */
287 static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
288                                         uint32_t vmhub, uint32_t flush_type)
289 {
290         if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron)
291                 return;
292
293         /* flush hdp cache */
294         adev->hdp.funcs->flush_hdp(adev, NULL);
295
296         /* For SRIOV run time, driver shouldn't access the register through MMIO
297          * Directly use kiq to do the vm invalidation instead
298          */
299         if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
300             (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
301                 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
302                 const unsigned int eng = 17;
303                 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
304                 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
305                 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
306
307                 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
308                                 1 << vmid);
309                 return;
310         }
311
312         mutex_lock(&adev->mman.gtt_window_lock);
313         gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0);
314         mutex_unlock(&adev->mman.gtt_window_lock);
315 }
316
317 /**
318  * gmc_v11_0_flush_gpu_tlb_pasid - tlb flush via pasid
319  *
320  * @adev: amdgpu_device pointer
321  * @pasid: pasid to be flush
322  * @flush_type: the flush type
323  * @all_hub: flush all hubs
324  * @inst: is used to select which instance of KIQ to use for the invalidation
325  *
326  * Flush the TLB for the requested pasid.
327  */
328 static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
329                                         uint16_t pasid, uint32_t flush_type,
330                                         bool all_hub, uint32_t inst)
331 {
332         int vmid, i;
333         signed long r;
334         uint32_t seq;
335         uint16_t queried_pasid;
336         bool ret;
337         struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
338         struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
339
340         if (amdgpu_emu_mode == 0 && ring->sched.ready) {
341                 spin_lock(&adev->gfx.kiq[0].ring_lock);
342                 /* 2 dwords flush + 8 dwords fence */
343                 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
344                 kiq->pmf->kiq_invalidate_tlbs(ring,
345                                         pasid, flush_type, all_hub);
346                 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
347                 if (r) {
348                         amdgpu_ring_undo(ring);
349                         spin_unlock(&adev->gfx.kiq[0].ring_lock);
350                         return -ETIME;
351                 }
352
353                 amdgpu_ring_commit(ring);
354                 spin_unlock(&adev->gfx.kiq[0].ring_lock);
355                 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
356                 if (r < 1) {
357                         dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
358                         return -ETIME;
359                 }
360
361                 return 0;
362         }
363
364         for (vmid = 1; vmid < 16; vmid++) {
365
366                 ret = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid,
367                                 &queried_pasid);
368                 if (ret && queried_pasid == pasid) {
369                         if (all_hub) {
370                                 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
371                                         gmc_v11_0_flush_gpu_tlb(adev, vmid,
372                                                         i, flush_type);
373                         } else {
374                                 gmc_v11_0_flush_gpu_tlb(adev, vmid,
375                                                 AMDGPU_GFXHUB(0), flush_type);
376                         }
377                 }
378         }
379
380         return 0;
381 }
382
383 static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
384                                              unsigned int vmid, uint64_t pd_addr)
385 {
386         bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
387         struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
388         uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
389         unsigned int eng = ring->vm_inv_eng;
390
391         /*
392          * It may lose gpuvm invalidate acknowldege state across power-gating
393          * off cycle, add semaphore acquire before invalidation and semaphore
394          * release after invalidation to avoid entering power gated state
395          * to WA the Issue
396          */
397
398         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
399         if (use_semaphore)
400                 /* a read return value of 1 means semaphore acuqire */
401                 amdgpu_ring_emit_reg_wait(ring,
402                                           hub->vm_inv_eng0_sem +
403                                           hub->eng_distance * eng, 0x1, 0x1);
404
405         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
406                               (hub->ctx_addr_distance * vmid),
407                               lower_32_bits(pd_addr));
408
409         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
410                               (hub->ctx_addr_distance * vmid),
411                               upper_32_bits(pd_addr));
412
413         amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
414                                             hub->eng_distance * eng,
415                                             hub->vm_inv_eng0_ack +
416                                             hub->eng_distance * eng,
417                                             req, 1 << vmid);
418
419         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
420         if (use_semaphore)
421                 /*
422                  * add semaphore release after invalidation,
423                  * write with 0 means semaphore release
424                  */
425                 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
426                                       hub->eng_distance * eng, 0);
427
428         return pd_addr;
429 }
430
431 static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
432                                          unsigned int pasid)
433 {
434         struct amdgpu_device *adev = ring->adev;
435         uint32_t reg;
436
437         /* MES fw manages IH_VMID_x_LUT updating */
438         if (ring->is_mes_queue)
439                 return;
440
441         if (ring->vm_hub == AMDGPU_GFXHUB(0))
442                 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
443         else
444                 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
445
446         amdgpu_ring_emit_wreg(ring, reg, pasid);
447 }
448
449 /*
450  * PTE format:
451  * 63:59 reserved
452  * 58:57 reserved
453  * 56 F
454  * 55 L
455  * 54 reserved
456  * 53:52 SW
457  * 51 T
458  * 50:48 mtype
459  * 47:12 4k physical page base address
460  * 11:7 fragment
461  * 6 write
462  * 5 read
463  * 4 exe
464  * 3 Z
465  * 2 snooped
466  * 1 system
467  * 0 valid
468  *
469  * PDE format:
470  * 63:59 block fragment size
471  * 58:55 reserved
472  * 54 P
473  * 53:48 reserved
474  * 47:6 physical base address of PD or PTE
475  * 5:3 reserved
476  * 2 C
477  * 1 system
478  * 0 valid
479  */
480
481 static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
482 {
483         switch (flags) {
484         case AMDGPU_VM_MTYPE_DEFAULT:
485                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
486         case AMDGPU_VM_MTYPE_NC:
487                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
488         case AMDGPU_VM_MTYPE_WC:
489                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
490         case AMDGPU_VM_MTYPE_CC:
491                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
492         case AMDGPU_VM_MTYPE_UC:
493                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
494         default:
495                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
496         }
497 }
498
499 static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
500                                  uint64_t *addr, uint64_t *flags)
501 {
502         if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
503                 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
504         BUG_ON(*addr & 0xFFFF00000000003FULL);
505
506         if (!adev->gmc.translate_further)
507                 return;
508
509         if (level == AMDGPU_VM_PDB1) {
510                 /* Set the block fragment size */
511                 if (!(*flags & AMDGPU_PDE_PTE))
512                         *flags |= AMDGPU_PDE_BFS(0x9);
513
514         } else if (level == AMDGPU_VM_PDB0) {
515                 if (*flags & AMDGPU_PDE_PTE)
516                         *flags &= ~AMDGPU_PDE_PTE;
517                 else
518                         *flags |= AMDGPU_PTE_TF;
519         }
520 }
521
522 static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
523                                  struct amdgpu_bo_va_mapping *mapping,
524                                  uint64_t *flags)
525 {
526         struct amdgpu_bo *bo = mapping->bo_va->base.bo;
527
528         *flags &= ~AMDGPU_PTE_EXECUTABLE;
529         *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
530
531         *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
532         *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
533
534         *flags &= ~AMDGPU_PTE_NOALLOC;
535         *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
536
537         if (mapping->flags & AMDGPU_PTE_PRT) {
538                 *flags |= AMDGPU_PTE_PRT;
539                 *flags |= AMDGPU_PTE_SNOOPED;
540                 *flags |= AMDGPU_PTE_LOG;
541                 *flags |= AMDGPU_PTE_SYSTEM;
542                 *flags &= ~AMDGPU_PTE_VALID;
543         }
544
545         if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
546                                AMDGPU_GEM_CREATE_UNCACHED))
547                 *flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) |
548                          AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
549 }
550
551 static unsigned int gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
552 {
553         u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL);
554         unsigned int size;
555
556         if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
557                 size = AMDGPU_VBIOS_VGA_ALLOCATION;
558         } else {
559                 u32 viewport;
560                 u32 pitch;
561
562                 viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
563                 pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH);
564                 size = (REG_GET_FIELD(viewport,
565                                         HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
566                                 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
567                                 4);
568         }
569
570         return size;
571 }
572
573 static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
574         .flush_gpu_tlb = gmc_v11_0_flush_gpu_tlb,
575         .flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
576         .emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
577         .emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
578         .map_mtype = gmc_v11_0_map_mtype,
579         .get_vm_pde = gmc_v11_0_get_vm_pde,
580         .get_vm_pte = gmc_v11_0_get_vm_pte,
581         .get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
582 };
583
584 static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev)
585 {
586         adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs;
587 }
588
589 static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
590 {
591         switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
592         case IP_VERSION(8, 10, 0):
593                 adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;
594                 adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;
595                 adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
596                 adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
597                 adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM;
598                 if (adev->umc.node_inst_num == 4)
599                         adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0];
600                 else
601                         adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0];
602                 adev->umc.ras = &umc_v8_10_ras;
603                 break;
604         case IP_VERSION(8, 11, 0):
605                 break;
606         default:
607                 break;
608         }
609 }
610
611
612 static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
613 {
614         switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
615         case IP_VERSION(3, 0, 1):
616                 adev->mmhub.funcs = &mmhub_v3_0_1_funcs;
617                 break;
618         case IP_VERSION(3, 0, 2):
619                 adev->mmhub.funcs = &mmhub_v3_0_2_funcs;
620                 break;
621         case IP_VERSION(3, 3, 0):
622                 adev->mmhub.funcs = &mmhub_v3_3_funcs;
623                 break;
624         default:
625                 adev->mmhub.funcs = &mmhub_v3_0_funcs;
626                 break;
627         }
628 }
629
630 static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
631 {
632         switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
633         case IP_VERSION(11, 0, 3):
634                 adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs;
635                 break;
636         case IP_VERSION(11, 5, 0):
637                 adev->gfxhub.funcs = &gfxhub_v11_5_0_funcs;
638                 break;
639         default:
640                 adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
641                 break;
642         }
643 }
644
645 static int gmc_v11_0_early_init(void *handle)
646 {
647         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
648
649         gmc_v11_0_set_gfxhub_funcs(adev);
650         gmc_v11_0_set_mmhub_funcs(adev);
651         gmc_v11_0_set_gmc_funcs(adev);
652         gmc_v11_0_set_irq_funcs(adev);
653         gmc_v11_0_set_umc_funcs(adev);
654
655         adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
656         adev->gmc.shared_aperture_end =
657                 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
658         adev->gmc.private_aperture_start = 0x1000000000000000ULL;
659         adev->gmc.private_aperture_end =
660                 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
661         adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
662
663         return 0;
664 }
665
666 static int gmc_v11_0_late_init(void *handle)
667 {
668         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
669         int r;
670
671         r = amdgpu_gmc_allocate_vm_inv_eng(adev);
672         if (r)
673                 return r;
674
675         r = amdgpu_gmc_ras_late_init(adev);
676         if (r)
677                 return r;
678
679         return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
680 }
681
682 static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
683                                         struct amdgpu_gmc *mc)
684 {
685         u64 base = 0;
686
687         base = adev->mmhub.funcs->get_fb_location(adev);
688
689         amdgpu_gmc_vram_location(adev, &adev->gmc, base);
690         amdgpu_gmc_gart_location(adev, mc);
691         amdgpu_gmc_agp_location(adev, mc);
692
693         /* base offset of vram pages */
694         if (amdgpu_sriov_vf(adev))
695                 adev->vm_manager.vram_base_offset = 0;
696         else
697                 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
698 }
699
700 /**
701  * gmc_v11_0_mc_init - initialize the memory controller driver params
702  *
703  * @adev: amdgpu_device pointer
704  *
705  * Look up the amount of vram, vram width, and decide how to place
706  * vram and gart within the GPU's physical address space.
707  * Returns 0 for success.
708  */
709 static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
710 {
711         int r;
712
713         /* size in MB on si */
714         adev->gmc.mc_vram_size =
715                 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
716         adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
717
718         if (!(adev->flags & AMD_IS_APU)) {
719                 r = amdgpu_device_resize_fb_bar(adev);
720                 if (r)
721                         return r;
722         }
723         adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
724         adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
725
726 #ifdef CONFIG_X86_64
727         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
728                 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
729                 adev->gmc.aper_size = adev->gmc.real_vram_size;
730         }
731 #endif
732         /* In case the PCI BAR is larger than the actual amount of vram */
733         adev->gmc.visible_vram_size = adev->gmc.aper_size;
734         if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
735                 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
736
737         /* set the gart size */
738         if (amdgpu_gart_size == -1)
739                 adev->gmc.gart_size = 512ULL << 20;
740         else
741                 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
742
743         gmc_v11_0_vram_gtt_location(adev, &adev->gmc);
744
745         return 0;
746 }
747
748 static int gmc_v11_0_gart_init(struct amdgpu_device *adev)
749 {
750         int r;
751
752         if (adev->gart.bo) {
753                 WARN(1, "PCIE GART already initialized\n");
754                 return 0;
755         }
756
757         /* Initialize common gart structure */
758         r = amdgpu_gart_init(adev);
759         if (r)
760                 return r;
761
762         adev->gart.table_size = adev->gart.num_gpu_pages * 8;
763         adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
764                                  AMDGPU_PTE_EXECUTABLE;
765
766         return amdgpu_gart_table_vram_alloc(adev);
767 }
768
769 static int gmc_v11_0_sw_init(void *handle)
770 {
771         int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
772         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
773
774         adev->mmhub.funcs->init(adev);
775
776         spin_lock_init(&adev->gmc.invalidate_lock);
777
778         r = amdgpu_atomfirmware_get_vram_info(adev,
779                                               &vram_width, &vram_type, &vram_vendor);
780         adev->gmc.vram_width = vram_width;
781
782         adev->gmc.vram_type = vram_type;
783         adev->gmc.vram_vendor = vram_vendor;
784
785         switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
786         case IP_VERSION(11, 0, 0):
787         case IP_VERSION(11, 0, 1):
788         case IP_VERSION(11, 0, 2):
789         case IP_VERSION(11, 0, 3):
790         case IP_VERSION(11, 0, 4):
791         case IP_VERSION(11, 5, 0):
792                 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
793                 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
794                 /*
795                  * To fulfill 4-level page support,
796                  * vm size is 256TB (48bit), maximum size,
797                  * block size 512 (9bit)
798                  */
799                 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
800                 break;
801         default:
802                 break;
803         }
804
805         /* This interrupt is VMC page fault.*/
806         r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
807                               VMC_1_0__SRCID__VM_FAULT,
808                               &adev->gmc.vm_fault);
809
810         if (r)
811                 return r;
812
813         r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
814                               UTCL2_1_0__SRCID__FAULT,
815                               &adev->gmc.vm_fault);
816         if (r)
817                 return r;
818
819         if (!amdgpu_sriov_vf(adev)) {
820                 /* interrupt sent to DF. */
821                 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
822                                       &adev->gmc.ecc_irq);
823                 if (r)
824                         return r;
825         }
826
827         /*
828          * Set the internal MC address mask This is the max address of the GPU's
829          * internal address space.
830          */
831         adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
832
833         r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
834         if (r) {
835                 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
836                 return r;
837         }
838
839         adev->need_swiotlb = drm_need_swiotlb(44);
840
841         r = gmc_v11_0_mc_init(adev);
842         if (r)
843                 return r;
844
845         amdgpu_gmc_get_vbios_allocations(adev);
846
847         /* Memory manager */
848         r = amdgpu_bo_init(adev);
849         if (r)
850                 return r;
851
852         r = gmc_v11_0_gart_init(adev);
853         if (r)
854                 return r;
855
856         /*
857          * number of VMs
858          * VMID 0 is reserved for System
859          * amdgpu graphics/compute will use VMIDs 1-7
860          * amdkfd will use VMIDs 8-15
861          */
862         adev->vm_manager.first_kfd_vmid = 8;
863
864         amdgpu_vm_manager_init(adev);
865
866         r = amdgpu_gmc_ras_sw_init(adev);
867         if (r)
868                 return r;
869
870         return 0;
871 }
872
873 /**
874  * gmc_v11_0_gart_fini - vm fini callback
875  *
876  * @adev: amdgpu_device pointer
877  *
878  * Tears down the driver GART/VM setup (CIK).
879  */
880 static void gmc_v11_0_gart_fini(struct amdgpu_device *adev)
881 {
882         amdgpu_gart_table_vram_free(adev);
883 }
884
885 static int gmc_v11_0_sw_fini(void *handle)
886 {
887         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
888
889         amdgpu_vm_manager_fini(adev);
890         gmc_v11_0_gart_fini(adev);
891         amdgpu_gem_force_release(adev);
892         amdgpu_bo_fini(adev);
893
894         return 0;
895 }
896
897 static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev)
898 {
899         if (amdgpu_sriov_vf(adev)) {
900                 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
901
902                 WREG32(hub->vm_contexts_disable, 0);
903                 return;
904         }
905 }
906
907 /**
908  * gmc_v11_0_gart_enable - gart enable
909  *
910  * @adev: amdgpu_device pointer
911  */
912 static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
913 {
914         int r;
915         bool value;
916
917         if (adev->gart.bo == NULL) {
918                 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
919                 return -EINVAL;
920         }
921
922         amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
923
924         r = adev->mmhub.funcs->gart_enable(adev);
925         if (r)
926                 return r;
927
928         /* Flush HDP after it is initialized */
929         adev->hdp.funcs->flush_hdp(adev, NULL);
930
931         value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
932                 false : true;
933
934         adev->mmhub.funcs->set_fault_enable_default(adev, value);
935         gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
936
937         DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
938                  (unsigned int)(adev->gmc.gart_size >> 20),
939                  (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
940
941         return 0;
942 }
943
944 static int gmc_v11_0_hw_init(void *handle)
945 {
946         int r;
947         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
948
949         /* The sequence of these two function calls matters.*/
950         gmc_v11_0_init_golden_registers(adev);
951
952         r = gmc_v11_0_gart_enable(adev);
953         if (r)
954                 return r;
955
956         if (adev->umc.funcs && adev->umc.funcs->init_registers)
957                 adev->umc.funcs->init_registers(adev);
958
959         return 0;
960 }
961
962 /**
963  * gmc_v11_0_gart_disable - gart disable
964  *
965  * @adev: amdgpu_device pointer
966  *
967  * This disables all VM page table.
968  */
969 static void gmc_v11_0_gart_disable(struct amdgpu_device *adev)
970 {
971         adev->mmhub.funcs->gart_disable(adev);
972 }
973
974 static int gmc_v11_0_hw_fini(void *handle)
975 {
976         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
977
978         if (amdgpu_sriov_vf(adev)) {
979                 /* full access mode, so don't touch any GMC register */
980                 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
981                 return 0;
982         }
983
984         amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
985         gmc_v11_0_gart_disable(adev);
986
987         return 0;
988 }
989
990 static int gmc_v11_0_suspend(void *handle)
991 {
992         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
993
994         gmc_v11_0_hw_fini(adev);
995
996         return 0;
997 }
998
999 static int gmc_v11_0_resume(void *handle)
1000 {
1001         int r;
1002         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1003
1004         r = gmc_v11_0_hw_init(adev);
1005         if (r)
1006                 return r;
1007
1008         amdgpu_vmid_reset_all(adev);
1009
1010         return 0;
1011 }
1012
1013 static bool gmc_v11_0_is_idle(void *handle)
1014 {
1015         /* MC is always ready in GMC v11.*/
1016         return true;
1017 }
1018
1019 static int gmc_v11_0_wait_for_idle(void *handle)
1020 {
1021         /* There is no need to wait for MC idle in GMC v11.*/
1022         return 0;
1023 }
1024
1025 static int gmc_v11_0_soft_reset(void *handle)
1026 {
1027         return 0;
1028 }
1029
1030 static int gmc_v11_0_set_clockgating_state(void *handle,
1031                                            enum amd_clockgating_state state)
1032 {
1033         int r;
1034         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1035
1036         r = adev->mmhub.funcs->set_clockgating(adev, state);
1037         if (r)
1038                 return r;
1039
1040         return athub_v3_0_set_clockgating(adev, state);
1041 }
1042
1043 static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags)
1044 {
1045         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1046
1047         adev->mmhub.funcs->get_clockgating(adev, flags);
1048
1049         athub_v3_0_get_clockgating(adev, flags);
1050 }
1051
1052 static int gmc_v11_0_set_powergating_state(void *handle,
1053                                            enum amd_powergating_state state)
1054 {
1055         return 0;
1056 }
1057
1058 const struct amd_ip_funcs gmc_v11_0_ip_funcs = {
1059         .name = "gmc_v11_0",
1060         .early_init = gmc_v11_0_early_init,
1061         .sw_init = gmc_v11_0_sw_init,
1062         .hw_init = gmc_v11_0_hw_init,
1063         .late_init = gmc_v11_0_late_init,
1064         .sw_fini = gmc_v11_0_sw_fini,
1065         .hw_fini = gmc_v11_0_hw_fini,
1066         .suspend = gmc_v11_0_suspend,
1067         .resume = gmc_v11_0_resume,
1068         .is_idle = gmc_v11_0_is_idle,
1069         .wait_for_idle = gmc_v11_0_wait_for_idle,
1070         .soft_reset = gmc_v11_0_soft_reset,
1071         .set_clockgating_state = gmc_v11_0_set_clockgating_state,
1072         .set_powergating_state = gmc_v11_0_set_powergating_state,
1073         .get_clockgating_state = gmc_v11_0_get_clockgating_state,
1074 };
1075
1076 const struct amdgpu_ip_block_version gmc_v11_0_ip_block = {
1077         .type = AMD_IP_BLOCK_TYPE_GMC,
1078         .major = 11,
1079         .minor = 0,
1080         .rev = 0,
1081         .funcs = &gmc_v11_0_ip_funcs,
1082 };