Commit | Line | Data |
---|---|---|
fdb1d7be WD |
1 | #ifndef __IO_PGTABLE_H |
2 | #define __IO_PGTABLE_H | |
e5fc9753 | 3 | #include <linux/bitops.h> |
fdb1d7be WD |
4 | |
5 | /* | |
6 | * Public API for use by IOMMU drivers | |
7 | */ | |
8 | enum io_pgtable_fmt { | |
e1d3c0fd WD |
9 | ARM_32_LPAE_S1, |
10 | ARM_32_LPAE_S2, | |
11 | ARM_64_LPAE_S1, | |
12 | ARM_64_LPAE_S2, | |
e5fc9753 | 13 | ARM_V7S, |
fdb1d7be WD |
14 | IO_PGTABLE_NUM_FMTS, |
15 | }; | |
16 | ||
17 | /** | |
18 | * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management. | |
19 | * | |
20 | * @tlb_flush_all: Synchronously invalidate the entire TLB context. | |
21 | * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. | |
87a91b15 RM |
22 | * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and |
23 | * any corresponding page table updates are visible to the | |
24 | * IOMMU. | |
fdb1d7be WD |
25 | * |
26 | * Note that these can all be called in atomic context and must therefore | |
27 | * not block. | |
28 | */ | |
29 | struct iommu_gather_ops { | |
30 | void (*tlb_flush_all)(void *cookie); | |
06c610e8 RM |
31 | void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, |
32 | bool leaf, void *cookie); | |
fdb1d7be | 33 | void (*tlb_sync)(void *cookie); |
fdb1d7be WD |
34 | }; |
35 | ||
36 | /** | |
37 | * struct io_pgtable_cfg - Configuration data for a set of page tables. | |
38 | * | |
39 | * @quirks: A bitmap of hardware quirks that require some special | |
40 | * action by the low-level page table allocator. | |
41 | * @pgsize_bitmap: A bitmap of page sizes supported by this set of page | |
42 | * tables. | |
43 | * @ias: Input address (iova) size, in bits. | |
44 | * @oas: Output address (paddr) size, in bits. | |
45 | * @tlb: TLB management callbacks for this set of tables. | |
f8d54961 RM |
46 | * @iommu_dev: The device representing the DMA configuration for the |
47 | * page table walker. | |
fdb1d7be WD |
48 | */ |
49 | struct io_pgtable_cfg { | |
3850db49 RM |
50 | /* |
51 | * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in | |
52 | * stage 1 PTEs, for hardware which insists on validating them | |
53 | * even in non-secure state where they should normally be ignored. | |
54 | * | |
55 | * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and | |
56 | * IOMMU_NOEXEC flags and map everything with full access, for | |
57 | * hardware which does not implement the permissions of a given | |
58 | * format, and/or requires some format-specific default value. | |
59 | * | |
60 | * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid | |
61 | * (unmapped) entries but the hardware might do so anyway, perform | |
62 | * TLB maintenance when mapping as well as when unmapping. | |
1afe2319 YW |
63 | * |
64 | * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all | |
65 | * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit | |
66 | * when the SoC is in "4GB mode" and they can only access the high | |
67 | * remap of DRAM (0x1_00000000 to 0x1_ffffffff). | |
81b3c252 RM |
68 | * |
69 | * IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever | |
70 | * be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a | |
71 | * software-emulated IOMMU), such that pagetable updates need not | |
72 | * be treated as explicit DMA data. | |
3850db49 RM |
73 | */ |
74 | #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) | |
75 | #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) | |
76 | #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) | |
1afe2319 | 77 | #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) |
81b3c252 | 78 | #define IO_PGTABLE_QUIRK_NO_DMA BIT(4) |
3850db49 | 79 | unsigned long quirks; |
fdb1d7be WD |
80 | unsigned long pgsize_bitmap; |
81 | unsigned int ias; | |
82 | unsigned int oas; | |
83 | const struct iommu_gather_ops *tlb; | |
f8d54961 | 84 | struct device *iommu_dev; |
fdb1d7be WD |
85 | |
86 | /* Low-level data specific to the table format */ | |
87 | union { | |
e1d3c0fd WD |
88 | struct { |
89 | u64 ttbr[2]; | |
90 | u64 tcr; | |
91 | u64 mair[2]; | |
92 | } arm_lpae_s1_cfg; | |
93 | ||
94 | struct { | |
95 | u64 vttbr; | |
96 | u64 vtcr; | |
97 | } arm_lpae_s2_cfg; | |
e5fc9753 RM |
98 | |
99 | struct { | |
100 | u32 ttbr[2]; | |
101 | u32 tcr; | |
102 | u32 nmrr; | |
103 | u32 prrr; | |
104 | } arm_v7s_cfg; | |
fdb1d7be WD |
105 | }; |
106 | }; | |
107 | ||
108 | /** | |
109 | * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. | |
110 | * | |
111 | * @map: Map a physically contiguous memory region. | |
112 | * @unmap: Unmap a physically contiguous memory region. | |
113 | * @iova_to_phys: Translate iova to physical address. | |
114 | * | |
115 | * These functions map directly onto the iommu_ops member functions with | |
116 | * the same names. | |
117 | */ | |
118 | struct io_pgtable_ops { | |
119 | int (*map)(struct io_pgtable_ops *ops, unsigned long iova, | |
120 | phys_addr_t paddr, size_t size, int prot); | |
121 | int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, | |
122 | size_t size); | |
123 | phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, | |
124 | unsigned long iova); | |
125 | }; | |
126 | ||
127 | /** | |
128 | * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU. | |
129 | * | |
130 | * @fmt: The page table format. | |
131 | * @cfg: The page table configuration. This will be modified to represent | |
132 | * the configuration actually provided by the allocator (e.g. the | |
133 | * pgsize_bitmap may be restricted). | |
134 | * @cookie: An opaque token provided by the IOMMU driver and passed back to | |
135 | * the callback routines in cfg->tlb. | |
136 | */ | |
137 | struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, | |
138 | struct io_pgtable_cfg *cfg, | |
139 | void *cookie); | |
140 | ||
141 | /** | |
142 | * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller | |
143 | * *must* ensure that the page table is no longer | |
144 | * live, but the TLB can be dirty. | |
145 | * | |
146 | * @ops: The ops returned from alloc_io_pgtable_ops. | |
147 | */ | |
148 | void free_io_pgtable_ops(struct io_pgtable_ops *ops); | |
149 | ||
150 | ||
151 | /* | |
152 | * Internal structures for page table allocator implementations. | |
153 | */ | |
154 | ||
155 | /** | |
156 | * struct io_pgtable - Internal structure describing a set of page tables. | |
157 | * | |
158 | * @fmt: The page table format. | |
159 | * @cookie: An opaque token provided by the IOMMU driver and passed back to | |
160 | * any callback routines. | |
88492a47 | 161 | * @tlb_sync_pending: Private flag for optimising out redundant syncs. |
fdb1d7be WD |
162 | * @cfg: A copy of the page table configuration. |
163 | * @ops: The page table operations in use for this set of page tables. | |
164 | */ | |
165 | struct io_pgtable { | |
166 | enum io_pgtable_fmt fmt; | |
167 | void *cookie; | |
88492a47 | 168 | bool tlb_sync_pending; |
fdb1d7be WD |
169 | struct io_pgtable_cfg cfg; |
170 | struct io_pgtable_ops ops; | |
171 | }; | |
172 | ||
fdc38967 RM |
173 | #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops) |
174 | ||
507e4c9d RM |
175 | static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) |
176 | { | |
177 | iop->cfg.tlb->tlb_flush_all(iop->cookie); | |
88492a47 | 178 | iop->tlb_sync_pending = true; |
507e4c9d RM |
179 | } |
180 | ||
181 | static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, | |
182 | unsigned long iova, size_t size, size_t granule, bool leaf) | |
183 | { | |
184 | iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); | |
88492a47 | 185 | iop->tlb_sync_pending = true; |
507e4c9d RM |
186 | } |
187 | ||
188 | static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) | |
189 | { | |
88492a47 RM |
190 | if (iop->tlb_sync_pending) { |
191 | iop->cfg.tlb->tlb_sync(iop->cookie); | |
192 | iop->tlb_sync_pending = false; | |
193 | } | |
507e4c9d RM |
194 | } |
195 | ||
fdb1d7be WD |
196 | /** |
197 | * struct io_pgtable_init_fns - Alloc/free a set of page tables for a | |
198 | * particular format. | |
199 | * | |
200 | * @alloc: Allocate a set of page tables described by cfg. | |
201 | * @free: Free the page tables associated with iop. | |
202 | */ | |
203 | struct io_pgtable_init_fns { | |
204 | struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie); | |
205 | void (*free)(struct io_pgtable *iop); | |
206 | }; | |
207 | ||
2e169bb3 JR |
208 | extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns; |
209 | extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns; | |
210 | extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns; | |
211 | extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; | |
e5fc9753 | 212 | extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns; |
2e169bb3 | 213 | |
fdb1d7be | 214 | #endif /* __IO_PGTABLE_H */ |