Commit | Line | Data |
---|---|---|
57692c94 EA |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* Copyright (C) 2017-2018 Broadcom */ | |
3 | ||
4 | /** | |
5 | * DOC: Broadcom V3D MMU | |
6 | * | |
7 | * The V3D 3.x hardware (compared to VC4) now includes an MMU. It has | |
8 | * a single level of page tables for the V3D's 4GB address space to | |
9 | * map to AXI bus addresses, thus it could need up to 4MB of | |
10 | * physically contiguous memory to store the PTEs. | |
11 | * | |
12 | * Because the 4MB of contiguous memory for page tables is precious, | |
13 | * and switching between them is expensive, we load all BOs into the | |
14 | * same 4GB address space. | |
15 | * | |
16 | * To protect clients from each other, we should use the GMP to | |
17 | * quickly mask out (at 128kb granularity) what pages are available to | |
18 | * each client. This is not yet implemented. | |
19 | */ | |
20 | ||
21 | #include "v3d_drv.h" | |
22 | #include "v3d_regs.h" | |
23 | ||
24 | #define V3D_MMU_PAGE_SHIFT 12 | |
25 | ||
26 | /* Note: All PTEs for the 1MB superpage must be filled with the | |
27 | * superpage bit set. | |
28 | */ | |
29 | #define V3D_PTE_SUPERPAGE BIT(31) | |
30 | #define V3D_PTE_WRITEABLE BIT(29) | |
31 | #define V3D_PTE_VALID BIT(28) | |
32 | ||
33 | static int v3d_mmu_flush_all(struct v3d_dev *v3d) | |
34 | { | |
35 | int ret; | |
36 | ||
37 | /* Make sure that another flush isn't already running when we | |
38 | * start this one. | |
39 | */ | |
40 | ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & | |
41 | V3D_MMU_CTL_TLB_CLEARING), 100); | |
42 | if (ret) | |
bc662528 | 43 | dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n"); |
57692c94 EA |
44 | |
45 | V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) | | |
46 | V3D_MMU_CTL_TLB_CLEAR); | |
47 | ||
48 | V3D_WRITE(V3D_MMUC_CONTROL, | |
49 | V3D_MMUC_CONTROL_FLUSH | | |
50 | V3D_MMUC_CONTROL_ENABLE); | |
51 | ||
52 | ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & | |
53 | V3D_MMU_CTL_TLB_CLEARING), 100); | |
54 | if (ret) { | |
bc662528 | 55 | dev_err(v3d->drm.dev, "TLB clear wait idle failed\n"); |
57692c94 EA |
56 | return ret; |
57 | } | |
58 | ||
59 | ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) & | |
60 | V3D_MMUC_CONTROL_FLUSHING), 100); | |
61 | if (ret) | |
bc662528 | 62 | dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n"); |
57692c94 EA |
63 | |
64 | return ret; | |
65 | } | |
66 | ||
67 | int v3d_mmu_set_page_table(struct v3d_dev *v3d) | |
68 | { | |
69 | V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT); | |
70 | V3D_WRITE(V3D_MMU_CTL, | |
71 | V3D_MMU_CTL_ENABLE | | |
38c2c791 | 72 | V3D_MMU_CTL_PT_INVALID_ENABLE | |
57692c94 | 73 | V3D_MMU_CTL_PT_INVALID_ABORT | |
38c2c791 | 74 | V3D_MMU_CTL_PT_INVALID_INT | |
57692c94 | 75 | V3D_MMU_CTL_WRITE_VIOLATION_ABORT | |
38c2c791 EA |
76 | V3D_MMU_CTL_WRITE_VIOLATION_INT | |
77 | V3D_MMU_CTL_CAP_EXCEEDED_ABORT | | |
78 | V3D_MMU_CTL_CAP_EXCEEDED_INT); | |
57692c94 EA |
79 | V3D_WRITE(V3D_MMU_ILLEGAL_ADDR, |
80 | (v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) | | |
81 | V3D_MMU_ILLEGAL_ADDR_ENABLE); | |
82 | V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_ENABLE); | |
83 | ||
84 | return v3d_mmu_flush_all(v3d); | |
85 | } | |
86 | ||
87 | void v3d_mmu_insert_ptes(struct v3d_bo *bo) | |
88 | { | |
40609d48 EA |
89 | struct drm_gem_shmem_object *shmem_obj = &bo->base; |
90 | struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev); | |
57692c94 EA |
91 | u32 page = bo->node.start; |
92 | u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; | |
e96418da | 93 | struct sg_dma_page_iter dma_iter; |
57692c94 | 94 | |
e96418da MS |
95 | for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) { |
96 | dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter); | |
97 | u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT; | |
57692c94 EA |
98 | u32 pte = page_prot | page_address; |
99 | u32 i; | |
100 | ||
e96418da | 101 | BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >= |
57692c94 | 102 | BIT(24)); |
e96418da | 103 | for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++) |
57692c94 EA |
104 | v3d->pt[page++] = pte + i; |
105 | } | |
106 | ||
107 | WARN_ON_ONCE(page - bo->node.start != | |
40609d48 | 108 | shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT); |
57692c94 EA |
109 | |
110 | if (v3d_mmu_flush_all(v3d)) | |
bc662528 | 111 | dev_err(v3d->drm.dev, "MMU flush timeout\n"); |
57692c94 EA |
112 | } |
113 | ||
114 | void v3d_mmu_remove_ptes(struct v3d_bo *bo) | |
115 | { | |
40609d48 EA |
116 | struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev); |
117 | u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT; | |
57692c94 EA |
118 | u32 page; |
119 | ||
120 | for (page = bo->node.start; page < bo->node.start + npages; page++) | |
121 | v3d->pt[page] = 0; | |
122 | ||
123 | if (v3d_mmu_flush_all(v3d)) | |
bc662528 | 124 | dev_err(v3d->drm.dev, "MMU flush timeout\n"); |
57692c94 | 125 | } |