Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e5fc9753 RM |
2 | /* |
3 | * CPU-agnostic ARM page table allocator. | |
4 | * | |
5 | * ARMv7 Short-descriptor format, supporting | |
6 | * - Basic memory attributes | |
7 | * - Simplified access permissions (AP[2:1] model) | |
8 | * - Backwards-compatible TEX remap | |
9 | * - Large pages/supersections (if indicated by the caller) | |
10 | * | |
11 | * Not supporting: | |
12 | * - Legacy access permissions (AP[2:0] model) | |
13 | * | |
14 | * Almost certainly never supporting: | |
15 | * - PXN | |
16 | * - Domains | |
17 | * | |
e5fc9753 RM |
18 | * Copyright (C) 2014-2015 ARM Limited |
19 | * Copyright (c) 2014-2015 MediaTek Inc. | |
20 | */ | |
21 | ||
22 | #define pr_fmt(fmt) "arm-v7s io-pgtable: " fmt | |
23 | ||
119ff305 | 24 | #include <linux/atomic.h> |
e5fc9753 RM |
25 | #include <linux/dma-mapping.h> |
26 | #include <linux/gfp.h> | |
b77cf11f | 27 | #include <linux/io-pgtable.h> |
e5fc9753 RM |
28 | #include <linux/iommu.h> |
29 | #include <linux/kernel.h> | |
30 | #include <linux/kmemleak.h> | |
31 | #include <linux/sizes.h> | |
32 | #include <linux/slab.h> | |
119ff305 | 33 | #include <linux/spinlock.h> |
e5fc9753 RM |
34 | #include <linux/types.h> |
35 | ||
36 | #include <asm/barrier.h> | |
37 | ||
e5fc9753 RM |
38 | /* Struct accessors */ |
39 | #define io_pgtable_to_data(x) \ | |
40 | container_of((x), struct arm_v7s_io_pgtable, iop) | |
41 | ||
42 | #define io_pgtable_ops_to_data(x) \ | |
43 | io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) | |
44 | ||
45 | /* | |
46 | * We have 32 bits total; 12 bits resolved at level 1, 8 bits at level 2, | |
47 | * and 12 bits in a page. With some carefully-chosen coefficients we can | |
48 | * hide the ugly inconsistencies behind these macros and at least let the | |
49 | * rest of the code pretend to be somewhat sane. | |
50 | */ | |
51 | #define ARM_V7S_ADDR_BITS 32 | |
52 | #define _ARM_V7S_LVL_BITS(lvl) (16 - (lvl) * 4) | |
53 | #define ARM_V7S_LVL_SHIFT(lvl) (ARM_V7S_ADDR_BITS - (4 + 8 * (lvl))) | |
54 | #define ARM_V7S_TABLE_SHIFT 10 | |
55 | ||
56 | #define ARM_V7S_PTES_PER_LVL(lvl) (1 << _ARM_V7S_LVL_BITS(lvl)) | |
57 | #define ARM_V7S_TABLE_SIZE(lvl) \ | |
58 | (ARM_V7S_PTES_PER_LVL(lvl) * sizeof(arm_v7s_iopte)) | |
59 | ||
60 | #define ARM_V7S_BLOCK_SIZE(lvl) (1UL << ARM_V7S_LVL_SHIFT(lvl)) | |
61 | #define ARM_V7S_LVL_MASK(lvl) ((u32)(~0U << ARM_V7S_LVL_SHIFT(lvl))) | |
62 | #define ARM_V7S_TABLE_MASK ((u32)(~0U << ARM_V7S_TABLE_SHIFT)) | |
63 | #define _ARM_V7S_IDX_MASK(lvl) (ARM_V7S_PTES_PER_LVL(lvl) - 1) | |
64 | #define ARM_V7S_LVL_IDX(addr, lvl) ({ \ | |
65 | int _l = lvl; \ | |
66 | ((u32)(addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l); \ | |
67 | }) | |
68 | ||
69 | /* | |
70 | * Large page/supersection entries are effectively a block of 16 page/section | |
71 | * entries, along the lines of the LPAE contiguous hint, but all with the | |
72 | * same output address. For want of a better common name we'll call them | |
73 | * "contiguous" versions of their respective page/section entries here, but | |
74 | * noting the distinction (WRT to TLB maintenance) that they represent *one* | |
75 | * entry repeated 16 times, not 16 separate entries (as in the LPAE case). | |
76 | */ | |
77 | #define ARM_V7S_CONT_PAGES 16 | |
78 | ||
79 | /* PTE type bits: these are all mixed up with XN/PXN bits in most cases */ | |
80 | #define ARM_V7S_PTE_TYPE_TABLE 0x1 | |
81 | #define ARM_V7S_PTE_TYPE_PAGE 0x2 | |
82 | #define ARM_V7S_PTE_TYPE_CONT_PAGE 0x1 | |
83 | ||
84 | #define ARM_V7S_PTE_IS_VALID(pte) (((pte) & 0x3) != 0) | |
9db829d2 RM |
85 | #define ARM_V7S_PTE_IS_TABLE(pte, lvl) \ |
86 | ((lvl) == 1 && (((pte) & 0x3) == ARM_V7S_PTE_TYPE_TABLE)) | |
e5fc9753 RM |
87 | |
88 | /* Page table bits */ | |
89 | #define ARM_V7S_ATTR_XN(lvl) BIT(4 * (2 - (lvl))) | |
90 | #define ARM_V7S_ATTR_B BIT(2) | |
91 | #define ARM_V7S_ATTR_C BIT(3) | |
92 | #define ARM_V7S_ATTR_NS_TABLE BIT(3) | |
93 | #define ARM_V7S_ATTR_NS_SECTION BIT(19) | |
94 | ||
95 | #define ARM_V7S_CONT_SECTION BIT(18) | |
96 | #define ARM_V7S_CONT_PAGE_XN_SHIFT 15 | |
97 | ||
98 | /* | |
99 | * The attribute bits are consistently ordered*, but occupy bits [17:10] of | |
100 | * a level 1 PTE vs. bits [11:4] at level 2. Thus we define the individual | |
101 | * fields relative to that 8-bit block, plus a total shift relative to the PTE. | |
102 | */ | |
103 | #define ARM_V7S_ATTR_SHIFT(lvl) (16 - (lvl) * 6) | |
104 | ||
105 | #define ARM_V7S_ATTR_MASK 0xff | |
106 | #define ARM_V7S_ATTR_AP0 BIT(0) | |
107 | #define ARM_V7S_ATTR_AP1 BIT(1) | |
108 | #define ARM_V7S_ATTR_AP2 BIT(5) | |
109 | #define ARM_V7S_ATTR_S BIT(6) | |
110 | #define ARM_V7S_ATTR_NG BIT(7) | |
111 | #define ARM_V7S_TEX_SHIFT 2 | |
112 | #define ARM_V7S_TEX_MASK 0x7 | |
113 | #define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT) | |
114 | ||
1afe2319 YW |
115 | #define ARM_V7S_ATTR_MTK_4GB BIT(9) /* MTK extend it for 4GB mode */ |
116 | ||
e5fc9753 RM |
117 | /* *well, except for TEX on level 2 large pages, of course :( */ |
118 | #define ARM_V7S_CONT_PAGE_TEX_SHIFT 6 | |
119 | #define ARM_V7S_CONT_PAGE_TEX_MASK (ARM_V7S_TEX_MASK << ARM_V7S_CONT_PAGE_TEX_SHIFT) | |
120 | ||
121 | /* Simplified access permissions */ | |
122 | #define ARM_V7S_PTE_AF ARM_V7S_ATTR_AP0 | |
123 | #define ARM_V7S_PTE_AP_UNPRIV ARM_V7S_ATTR_AP1 | |
124 | #define ARM_V7S_PTE_AP_RDONLY ARM_V7S_ATTR_AP2 | |
125 | ||
126 | /* Register bits */ | |
127 | #define ARM_V7S_RGN_NC 0 | |
128 | #define ARM_V7S_RGN_WBWA 1 | |
129 | #define ARM_V7S_RGN_WT 2 | |
130 | #define ARM_V7S_RGN_WB 3 | |
131 | ||
132 | #define ARM_V7S_PRRR_TYPE_DEVICE 1 | |
133 | #define ARM_V7S_PRRR_TYPE_NORMAL 2 | |
134 | #define ARM_V7S_PRRR_TR(n, type) (((type) & 0x3) << ((n) * 2)) | |
135 | #define ARM_V7S_PRRR_DS0 BIT(16) | |
136 | #define ARM_V7S_PRRR_DS1 BIT(17) | |
137 | #define ARM_V7S_PRRR_NS0 BIT(18) | |
138 | #define ARM_V7S_PRRR_NS1 BIT(19) | |
139 | #define ARM_V7S_PRRR_NOS(n) BIT((n) + 24) | |
140 | ||
141 | #define ARM_V7S_NMRR_IR(n, attr) (((attr) & 0x3) << ((n) * 2)) | |
142 | #define ARM_V7S_NMRR_OR(n, attr) (((attr) & 0x3) << ((n) * 2 + 16)) | |
143 | ||
144 | #define ARM_V7S_TTBR_S BIT(1) | |
145 | #define ARM_V7S_TTBR_NOS BIT(5) | |
146 | #define ARM_V7S_TTBR_ORGN_ATTR(attr) (((attr) & 0x3) << 3) | |
147 | #define ARM_V7S_TTBR_IRGN_ATTR(attr) \ | |
148 | ((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1)) | |
149 | ||
150 | #define ARM_V7S_TCR_PD1 BIT(5) | |
151 | ||
0a352554 NB |
152 | #ifdef CONFIG_ZONE_DMA32 |
153 | #define ARM_V7S_TABLE_GFP_DMA GFP_DMA32 | |
154 | #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32 | |
155 | #else | |
156 | #define ARM_V7S_TABLE_GFP_DMA GFP_DMA | |
157 | #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA | |
158 | #endif | |
159 | ||
e5fc9753 RM |
160 | typedef u32 arm_v7s_iopte; |
161 | ||
162 | static bool selftest_running; | |
163 | ||
164 | struct arm_v7s_io_pgtable { | |
165 | struct io_pgtable iop; | |
166 | ||
167 | arm_v7s_iopte *pgd; | |
168 | struct kmem_cache *l2_tables; | |
119ff305 | 169 | spinlock_t split_lock; |
e5fc9753 RM |
170 | }; |
171 | ||
172 | static dma_addr_t __arm_v7s_dma_addr(void *pages) | |
173 | { | |
174 | return (dma_addr_t)virt_to_phys(pages); | |
175 | } | |
176 | ||
177 | static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl) | |
178 | { | |
179 | if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) | |
180 | pte &= ARM_V7S_TABLE_MASK; | |
181 | else | |
182 | pte &= ARM_V7S_LVL_MASK(lvl); | |
183 | return phys_to_virt(pte); | |
184 | } | |
185 | ||
186 | static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, | |
187 | struct arm_v7s_io_pgtable *data) | |
188 | { | |
81b3c252 RM |
189 | struct io_pgtable_cfg *cfg = &data->iop.cfg; |
190 | struct device *dev = cfg->iommu_dev; | |
29859aeb | 191 | phys_addr_t phys; |
e5fc9753 RM |
192 | dma_addr_t dma; |
193 | size_t size = ARM_V7S_TABLE_SIZE(lvl); | |
194 | void *table = NULL; | |
195 | ||
196 | if (lvl == 1) | |
0a352554 NB |
197 | table = (void *)__get_free_pages( |
198 | __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size)); | |
e5fc9753 | 199 | else if (lvl == 2) |
0a352554 | 200 | table = kmem_cache_zalloc(data->l2_tables, gfp); |
29859aeb | 201 | phys = virt_to_phys(table); |
0a352554 | 202 | if (phys != (arm_v7s_iopte)phys) { |
29859aeb | 203 | /* Doesn't fit in PTE */ |
0a352554 | 204 | dev_err(dev, "Page table does not fit in PTE: %pa", &phys); |
29859aeb | 205 | goto out_free; |
0a352554 | 206 | } |
4f41845b | 207 | if (table && !cfg->coherent_walk) { |
e5fc9753 RM |
208 | dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); |
209 | if (dma_mapping_error(dev, dma)) | |
210 | goto out_free; | |
211 | /* | |
212 | * We depend on the IOMMU being able to work with any physical | |
213 | * address directly, so if the DMA layer suggests otherwise by | |
214 | * translating or truncating them, that bodes very badly... | |
215 | */ | |
29859aeb | 216 | if (dma != phys) |
e5fc9753 RM |
217 | goto out_unmap; |
218 | } | |
032ebd85 NB |
219 | if (lvl == 2) |
220 | kmemleak_ignore(table); | |
e5fc9753 RM |
221 | return table; |
222 | ||
223 | out_unmap: | |
224 | dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); | |
225 | dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); | |
226 | out_free: | |
227 | if (lvl == 1) | |
228 | free_pages((unsigned long)table, get_order(size)); | |
229 | else | |
230 | kmem_cache_free(data->l2_tables, table); | |
231 | return NULL; | |
232 | } | |
233 | ||
234 | static void __arm_v7s_free_table(void *table, int lvl, | |
235 | struct arm_v7s_io_pgtable *data) | |
236 | { | |
81b3c252 RM |
237 | struct io_pgtable_cfg *cfg = &data->iop.cfg; |
238 | struct device *dev = cfg->iommu_dev; | |
e5fc9753 RM |
239 | size_t size = ARM_V7S_TABLE_SIZE(lvl); |
240 | ||
4f41845b | 241 | if (!cfg->coherent_walk) |
e5fc9753 RM |
242 | dma_unmap_single(dev, __arm_v7s_dma_addr(table), size, |
243 | DMA_TO_DEVICE); | |
244 | if (lvl == 1) | |
245 | free_pages((unsigned long)table, get_order(size)); | |
246 | else | |
247 | kmem_cache_free(data->l2_tables, table); | |
248 | } | |
249 | ||
250 | static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries, | |
251 | struct io_pgtable_cfg *cfg) | |
252 | { | |
4f41845b | 253 | if (cfg->coherent_walk) |
e5fc9753 RM |
254 | return; |
255 | ||
256 | dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep), | |
257 | num_entries * sizeof(*ptep), DMA_TO_DEVICE); | |
258 | } | |
259 | static void __arm_v7s_set_pte(arm_v7s_iopte *ptep, arm_v7s_iopte pte, | |
260 | int num_entries, struct io_pgtable_cfg *cfg) | |
261 | { | |
262 | int i; | |
263 | ||
264 | for (i = 0; i < num_entries; i++) | |
265 | ptep[i] = pte; | |
266 | ||
267 | __arm_v7s_pte_sync(ptep, num_entries, cfg); | |
268 | } | |
269 | ||
270 | static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, | |
271 | struct io_pgtable_cfg *cfg) | |
272 | { | |
273 | bool ap = !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS); | |
e88ccab1 | 274 | arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S; |
e5fc9753 | 275 | |
e88ccab1 RM |
276 | if (!(prot & IOMMU_MMIO)) |
277 | pte |= ARM_V7S_ATTR_TEX(1); | |
e5fc9753 | 278 | if (ap) { |
5baf1e9d RM |
279 | pte |= ARM_V7S_PTE_AF; |
280 | if (!(prot & IOMMU_PRIV)) | |
281 | pte |= ARM_V7S_PTE_AP_UNPRIV; | |
e5fc9753 RM |
282 | if (!(prot & IOMMU_WRITE)) |
283 | pte |= ARM_V7S_PTE_AP_RDONLY; | |
284 | } | |
285 | pte <<= ARM_V7S_ATTR_SHIFT(lvl); | |
286 | ||
287 | if ((prot & IOMMU_NOEXEC) && ap) | |
288 | pte |= ARM_V7S_ATTR_XN(lvl); | |
e88ccab1 RM |
289 | if (prot & IOMMU_MMIO) |
290 | pte |= ARM_V7S_ATTR_B; | |
291 | else if (prot & IOMMU_CACHE) | |
e5fc9753 RM |
292 | pte |= ARM_V7S_ATTR_B | ARM_V7S_ATTR_C; |
293 | ||
b9f1ef30 RM |
294 | pte |= ARM_V7S_PTE_TYPE_PAGE; |
295 | if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)) | |
296 | pte |= ARM_V7S_ATTR_NS_SECTION; | |
297 | ||
298 | if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB) | |
299 | pte |= ARM_V7S_ATTR_MTK_4GB; | |
300 | ||
e5fc9753 RM |
301 | return pte; |
302 | } | |
303 | ||
304 | static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl) | |
305 | { | |
306 | int prot = IOMMU_READ; | |
e88ccab1 | 307 | arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl); |
e5fc9753 | 308 | |
e633fc7a | 309 | if (!(attr & ARM_V7S_PTE_AP_RDONLY)) |
e5fc9753 | 310 | prot |= IOMMU_WRITE; |
5baf1e9d RM |
311 | if (!(attr & ARM_V7S_PTE_AP_UNPRIV)) |
312 | prot |= IOMMU_PRIV; | |
e88ccab1 RM |
313 | if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0) |
314 | prot |= IOMMU_MMIO; | |
315 | else if (pte & ARM_V7S_ATTR_C) | |
e5fc9753 | 316 | prot |= IOMMU_CACHE; |
e633fc7a RM |
317 | if (pte & ARM_V7S_ATTR_XN(lvl)) |
318 | prot |= IOMMU_NOEXEC; | |
e5fc9753 RM |
319 | |
320 | return prot; | |
321 | } | |
322 | ||
323 | static arm_v7s_iopte arm_v7s_pte_to_cont(arm_v7s_iopte pte, int lvl) | |
324 | { | |
325 | if (lvl == 1) { | |
326 | pte |= ARM_V7S_CONT_SECTION; | |
327 | } else if (lvl == 2) { | |
328 | arm_v7s_iopte xn = pte & ARM_V7S_ATTR_XN(lvl); | |
329 | arm_v7s_iopte tex = pte & ARM_V7S_CONT_PAGE_TEX_MASK; | |
330 | ||
331 | pte ^= xn | tex | ARM_V7S_PTE_TYPE_PAGE; | |
332 | pte |= (xn << ARM_V7S_CONT_PAGE_XN_SHIFT) | | |
333 | (tex << ARM_V7S_CONT_PAGE_TEX_SHIFT) | | |
334 | ARM_V7S_PTE_TYPE_CONT_PAGE; | |
335 | } | |
336 | return pte; | |
337 | } | |
338 | ||
339 | static arm_v7s_iopte arm_v7s_cont_to_pte(arm_v7s_iopte pte, int lvl) | |
340 | { | |
341 | if (lvl == 1) { | |
342 | pte &= ~ARM_V7S_CONT_SECTION; | |
343 | } else if (lvl == 2) { | |
344 | arm_v7s_iopte xn = pte & BIT(ARM_V7S_CONT_PAGE_XN_SHIFT); | |
345 | arm_v7s_iopte tex = pte & (ARM_V7S_CONT_PAGE_TEX_MASK << | |
346 | ARM_V7S_CONT_PAGE_TEX_SHIFT); | |
347 | ||
348 | pte ^= xn | tex | ARM_V7S_PTE_TYPE_CONT_PAGE; | |
349 | pte |= (xn >> ARM_V7S_CONT_PAGE_XN_SHIFT) | | |
350 | (tex >> ARM_V7S_CONT_PAGE_TEX_SHIFT) | | |
351 | ARM_V7S_PTE_TYPE_PAGE; | |
352 | } | |
353 | return pte; | |
354 | } | |
355 | ||
356 | static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl) | |
357 | { | |
358 | if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte, lvl)) | |
359 | return pte & ARM_V7S_CONT_SECTION; | |
360 | else if (lvl == 2) | |
361 | return !(pte & ARM_V7S_PTE_TYPE_PAGE); | |
362 | return false; | |
363 | } | |
364 | ||
193e67c0 VG |
365 | static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long, |
366 | size_t, int, arm_v7s_iopte *); | |
e5fc9753 RM |
367 | |
368 | static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, | |
369 | unsigned long iova, phys_addr_t paddr, int prot, | |
370 | int lvl, int num_entries, arm_v7s_iopte *ptep) | |
371 | { | |
372 | struct io_pgtable_cfg *cfg = &data->iop.cfg; | |
b9f1ef30 | 373 | arm_v7s_iopte pte; |
e5fc9753 RM |
374 | int i; |
375 | ||
376 | for (i = 0; i < num_entries; i++) | |
377 | if (ARM_V7S_PTE_IS_TABLE(ptep[i], lvl)) { | |
378 | /* | |
379 | * We need to unmap and free the old table before | |
380 | * overwriting it with a block entry. | |
381 | */ | |
382 | arm_v7s_iopte *tblp; | |
383 | size_t sz = ARM_V7S_BLOCK_SIZE(lvl); | |
384 | ||
385 | tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl); | |
386 | if (WARN_ON(__arm_v7s_unmap(data, iova + i * sz, | |
387 | sz, lvl, tblp) != sz)) | |
388 | return -EINVAL; | |
389 | } else if (ptep[i]) { | |
390 | /* We require an unmap first */ | |
391 | WARN_ON(!selftest_running); | |
392 | return -EEXIST; | |
393 | } | |
394 | ||
b9f1ef30 | 395 | pte = arm_v7s_prot_to_pte(prot, lvl, cfg); |
e5fc9753 RM |
396 | if (num_entries > 1) |
397 | pte = arm_v7s_pte_to_cont(pte, lvl); | |
398 | ||
399 | pte |= paddr & ARM_V7S_LVL_MASK(lvl); | |
400 | ||
401 | __arm_v7s_set_pte(ptep, pte, num_entries, cfg); | |
402 | return 0; | |
403 | } | |
404 | ||
b9f1ef30 RM |
405 | static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table, |
406 | arm_v7s_iopte *ptep, | |
119ff305 | 407 | arm_v7s_iopte curr, |
b9f1ef30 RM |
408 | struct io_pgtable_cfg *cfg) |
409 | { | |
119ff305 | 410 | arm_v7s_iopte old, new; |
b9f1ef30 RM |
411 | |
412 | new = virt_to_phys(table) | ARM_V7S_PTE_TYPE_TABLE; | |
413 | if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) | |
414 | new |= ARM_V7S_ATTR_NS_TABLE; | |
415 | ||
77f34458 WD |
416 | /* |
417 | * Ensure the table itself is visible before its PTE can be. | |
418 | * Whilst we could get away with cmpxchg64_release below, this | |
419 | * doesn't have any ordering semantics when !CONFIG_SMP. | |
420 | */ | |
421 | dma_wmb(); | |
119ff305 RM |
422 | |
423 | old = cmpxchg_relaxed(ptep, curr, new); | |
424 | __arm_v7s_pte_sync(ptep, 1, cfg); | |
425 | ||
426 | return old; | |
b9f1ef30 RM |
427 | } |
428 | ||
e5fc9753 RM |
429 | static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, |
430 | phys_addr_t paddr, size_t size, int prot, | |
431 | int lvl, arm_v7s_iopte *ptep) | |
432 | { | |
433 | struct io_pgtable_cfg *cfg = &data->iop.cfg; | |
434 | arm_v7s_iopte pte, *cptep; | |
435 | int num_entries = size >> ARM_V7S_LVL_SHIFT(lvl); | |
436 | ||
437 | /* Find our entry at the current level */ | |
438 | ptep += ARM_V7S_LVL_IDX(iova, lvl); | |
439 | ||
440 | /* If we can install a leaf entry at this level, then do so */ | |
441 | if (num_entries) | |
442 | return arm_v7s_init_pte(data, iova, paddr, prot, | |
443 | lvl, num_entries, ptep); | |
444 | ||
445 | /* We can't allocate tables at the final level */ | |
446 | if (WARN_ON(lvl == 2)) | |
447 | return -EINVAL; | |
448 | ||
449 | /* Grab a pointer to the next level */ | |
119ff305 | 450 | pte = READ_ONCE(*ptep); |
e5fc9753 RM |
451 | if (!pte) { |
452 | cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data); | |
453 | if (!cptep) | |
454 | return -ENOMEM; | |
455 | ||
119ff305 RM |
456 | pte = arm_v7s_install_table(cptep, ptep, 0, cfg); |
457 | if (pte) | |
458 | __arm_v7s_free_table(cptep, lvl + 1, data); | |
a03849e7 | 459 | } else { |
119ff305 RM |
460 | /* We've no easy way of knowing if it's synced yet, so... */ |
461 | __arm_v7s_pte_sync(ptep, 1, cfg); | |
462 | } | |
463 | ||
464 | if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) { | |
465 | cptep = iopte_deref(pte, lvl); | |
466 | } else if (pte) { | |
a03849e7 OT |
467 | /* We require an unmap first */ |
468 | WARN_ON(!selftest_running); | |
469 | return -EEXIST; | |
e5fc9753 RM |
470 | } |
471 | ||
472 | /* Rinse, repeat */ | |
473 | return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep); | |
474 | } | |
475 | ||
476 | static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, | |
477 | phys_addr_t paddr, size_t size, int prot) | |
478 | { | |
479 | struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); | |
507e4c9d | 480 | struct io_pgtable *iop = &data->iop; |
e5fc9753 RM |
481 | int ret; |
482 | ||
483 | /* If no access, then nothing to do */ | |
484 | if (!(prot & (IOMMU_READ | IOMMU_WRITE))) | |
485 | return 0; | |
486 | ||
76557391 RM |
487 | if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr))) |
488 | return -ERANGE; | |
489 | ||
e5fc9753 RM |
490 | ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); |
491 | /* | |
492 | * Synchronise all PTE updates for the new mapping before there's | |
493 | * a chance for anything to kick off a table walk for the new iova. | |
494 | */ | |
507e4c9d RM |
495 | if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) { |
496 | io_pgtable_tlb_add_flush(iop, iova, size, | |
497 | ARM_V7S_BLOCK_SIZE(2), false); | |
498 | io_pgtable_tlb_sync(iop); | |
e5fc9753 RM |
499 | } else { |
500 | wmb(); | |
501 | } | |
502 | ||
503 | return ret; | |
504 | } | |
505 | ||
506 | static void arm_v7s_free_pgtable(struct io_pgtable *iop) | |
507 | { | |
508 | struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop); | |
509 | int i; | |
510 | ||
511 | for (i = 0; i < ARM_V7S_PTES_PER_LVL(1); i++) { | |
512 | arm_v7s_iopte pte = data->pgd[i]; | |
513 | ||
514 | if (ARM_V7S_PTE_IS_TABLE(pte, 1)) | |
515 | __arm_v7s_free_table(iopte_deref(pte, 1), 2, data); | |
516 | } | |
517 | __arm_v7s_free_table(data->pgd, 1, data); | |
518 | kmem_cache_destroy(data->l2_tables); | |
519 | kfree(data); | |
520 | } | |
521 | ||
119ff305 RM |
522 | static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, |
523 | unsigned long iova, int idx, int lvl, | |
524 | arm_v7s_iopte *ptep) | |
e5fc9753 | 525 | { |
507e4c9d | 526 | struct io_pgtable *iop = &data->iop; |
e5fc9753 RM |
527 | arm_v7s_iopte pte; |
528 | size_t size = ARM_V7S_BLOCK_SIZE(lvl); | |
529 | int i; | |
530 | ||
119ff305 RM |
531 | /* Check that we didn't lose a race to get the lock */ |
532 | pte = *ptep; | |
533 | if (!arm_v7s_pte_is_cont(pte, lvl)) | |
534 | return pte; | |
535 | ||
e5fc9753 | 536 | ptep -= idx & (ARM_V7S_CONT_PAGES - 1); |
119ff305 RM |
537 | pte = arm_v7s_cont_to_pte(pte, lvl); |
538 | for (i = 0; i < ARM_V7S_CONT_PAGES; i++) | |
539 | ptep[i] = pte + i * size; | |
e5fc9753 | 540 | |
507e4c9d | 541 | __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg); |
e5fc9753 RM |
542 | |
543 | size *= ARM_V7S_CONT_PAGES; | |
507e4c9d RM |
544 | io_pgtable_tlb_add_flush(iop, iova, size, size, true); |
545 | io_pgtable_tlb_sync(iop); | |
119ff305 | 546 | return pte; |
e5fc9753 RM |
547 | } |
548 | ||
193e67c0 VG |
549 | static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, |
550 | unsigned long iova, size_t size, | |
551 | arm_v7s_iopte blk_pte, | |
552 | arm_v7s_iopte *ptep) | |
e5fc9753 | 553 | { |
b9f1ef30 RM |
554 | struct io_pgtable_cfg *cfg = &data->iop.cfg; |
555 | arm_v7s_iopte pte, *tablep; | |
556 | int i, unmap_idx, num_entries, num_ptes; | |
557 | ||
558 | tablep = __arm_v7s_alloc_table(2, GFP_ATOMIC, data); | |
559 | if (!tablep) | |
560 | return 0; /* Bytes unmapped */ | |
e5fc9753 | 561 | |
b9f1ef30 RM |
562 | num_ptes = ARM_V7S_PTES_PER_LVL(2); |
563 | num_entries = size >> ARM_V7S_LVL_SHIFT(2); | |
564 | unmap_idx = ARM_V7S_LVL_IDX(iova, 2); | |
e5fc9753 | 565 | |
b9f1ef30 RM |
566 | pte = arm_v7s_prot_to_pte(arm_v7s_pte_to_prot(blk_pte, 1), 2, cfg); |
567 | if (num_entries > 1) | |
568 | pte = arm_v7s_pte_to_cont(pte, 2); | |
e5fc9753 | 569 | |
b9f1ef30 | 570 | for (i = 0; i < num_ptes; i += num_entries, pte += size) { |
e5fc9753 | 571 | /* Unmap! */ |
b9f1ef30 | 572 | if (i == unmap_idx) |
e5fc9753 RM |
573 | continue; |
574 | ||
b9f1ef30 | 575 | __arm_v7s_set_pte(&tablep[i], pte, num_entries, cfg); |
e5fc9753 RM |
576 | } |
577 | ||
119ff305 RM |
578 | pte = arm_v7s_install_table(tablep, ptep, blk_pte, cfg); |
579 | if (pte != blk_pte) { | |
580 | __arm_v7s_free_table(tablep, 2, data); | |
581 | ||
582 | if (!ARM_V7S_PTE_IS_TABLE(pte, 1)) | |
583 | return 0; | |
584 | ||
585 | tablep = iopte_deref(pte, 1); | |
586 | return __arm_v7s_unmap(data, iova, size, 2, tablep); | |
587 | } | |
b9f1ef30 RM |
588 | |
589 | io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); | |
b2dfeba6 | 590 | io_pgtable_tlb_sync(&data->iop); |
e5fc9753 RM |
591 | return size; |
592 | } | |
593 | ||
193e67c0 VG |
594 | static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, |
595 | unsigned long iova, size_t size, int lvl, | |
596 | arm_v7s_iopte *ptep) | |
e5fc9753 RM |
597 | { |
598 | arm_v7s_iopte pte[ARM_V7S_CONT_PAGES]; | |
507e4c9d | 599 | struct io_pgtable *iop = &data->iop; |
e5fc9753 RM |
600 | int idx, i = 0, num_entries = size >> ARM_V7S_LVL_SHIFT(lvl); |
601 | ||
602 | /* Something went horribly wrong and we ran out of page table */ | |
603 | if (WARN_ON(lvl > 2)) | |
604 | return 0; | |
605 | ||
606 | idx = ARM_V7S_LVL_IDX(iova, lvl); | |
607 | ptep += idx; | |
608 | do { | |
119ff305 RM |
609 | pte[i] = READ_ONCE(ptep[i]); |
610 | if (WARN_ON(!ARM_V7S_PTE_IS_VALID(pte[i]))) | |
e5fc9753 | 611 | return 0; |
e5fc9753 RM |
612 | } while (++i < num_entries); |
613 | ||
614 | /* | |
615 | * If we've hit a contiguous 'large page' entry at this level, it | |
616 | * needs splitting first, unless we're unmapping the whole lot. | |
119ff305 RM |
617 | * |
618 | * For splitting, we can't rewrite 16 PTEs atomically, and since we | |
619 | * can't necessarily assume TEX remap we don't have a software bit to | |
620 | * mark live entries being split. In practice (i.e. DMA API code), we | |
621 | * will never be splitting large pages anyway, so just wrap this edge | |
622 | * case in a lock for the sake of correctness and be done with it. | |
e5fc9753 | 623 | */ |
119ff305 RM |
624 | if (num_entries <= 1 && arm_v7s_pte_is_cont(pte[0], lvl)) { |
625 | unsigned long flags; | |
626 | ||
627 | spin_lock_irqsave(&data->split_lock, flags); | |
628 | pte[0] = arm_v7s_split_cont(data, iova, idx, lvl, ptep); | |
629 | spin_unlock_irqrestore(&data->split_lock, flags); | |
630 | } | |
e5fc9753 RM |
631 | |
632 | /* If the size matches this level, we're in the right place */ | |
633 | if (num_entries) { | |
634 | size_t blk_size = ARM_V7S_BLOCK_SIZE(lvl); | |
635 | ||
507e4c9d | 636 | __arm_v7s_set_pte(ptep, 0, num_entries, &iop->cfg); |
e5fc9753 RM |
637 | |
638 | for (i = 0; i < num_entries; i++) { | |
639 | if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) { | |
640 | /* Also flush any partial walks */ | |
507e4c9d RM |
641 | io_pgtable_tlb_add_flush(iop, iova, blk_size, |
642 | ARM_V7S_BLOCK_SIZE(lvl + 1), false); | |
643 | io_pgtable_tlb_sync(iop); | |
e5fc9753 RM |
644 | ptep = iopte_deref(pte[i], lvl); |
645 | __arm_v7s_free_table(ptep, lvl + 1, data); | |
b2dfeba6 RM |
646 | } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { |
647 | /* | |
648 | * Order the PTE update against queueing the IOVA, to | |
649 | * guarantee that a flush callback from a different CPU | |
650 | * has observed it before the TLBIALL can be issued. | |
651 | */ | |
652 | smp_wmb(); | |
e5fc9753 | 653 | } else { |
507e4c9d RM |
654 | io_pgtable_tlb_add_flush(iop, iova, blk_size, |
655 | blk_size, true); | |
e5fc9753 RM |
656 | } |
657 | iova += blk_size; | |
658 | } | |
659 | return size; | |
660 | } else if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte[0], lvl)) { | |
661 | /* | |
662 | * Insert a table at the next level to map the old region, | |
663 | * minus the part we want to unmap | |
664 | */ | |
b9f1ef30 | 665 | return arm_v7s_split_blk_unmap(data, iova, size, pte[0], ptep); |
e5fc9753 RM |
666 | } |
667 | ||
668 | /* Keep on walkin' */ | |
669 | ptep = iopte_deref(pte[0], lvl); | |
670 | return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep); | |
671 | } | |
672 | ||
193e67c0 VG |
673 | static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, |
674 | size_t size) | |
e5fc9753 | 675 | { |
e5fc9753 | 676 | struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); |
e5fc9753 | 677 | |
76557391 RM |
678 | if (WARN_ON(upper_32_bits(iova))) |
679 | return 0; | |
680 | ||
4d689b61 | 681 | return __arm_v7s_unmap(data, iova, size, 1, data->pgd); |
e5fc9753 RM |
682 | } |
683 | ||
684 | static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, | |
685 | unsigned long iova) | |
686 | { | |
687 | struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); | |
688 | arm_v7s_iopte *ptep = data->pgd, pte; | |
689 | int lvl = 0; | |
690 | u32 mask; | |
691 | ||
692 | do { | |
119ff305 RM |
693 | ptep += ARM_V7S_LVL_IDX(iova, ++lvl); |
694 | pte = READ_ONCE(*ptep); | |
e5fc9753 RM |
695 | ptep = iopte_deref(pte, lvl); |
696 | } while (ARM_V7S_PTE_IS_TABLE(pte, lvl)); | |
697 | ||
698 | if (!ARM_V7S_PTE_IS_VALID(pte)) | |
699 | return 0; | |
700 | ||
701 | mask = ARM_V7S_LVL_MASK(lvl); | |
702 | if (arm_v7s_pte_is_cont(pte, lvl)) | |
703 | mask *= ARM_V7S_CONT_PAGES; | |
704 | return (pte & mask) | (iova & ~mask); | |
705 | } | |
706 | ||
707 | static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, | |
708 | void *cookie) | |
709 | { | |
710 | struct arm_v7s_io_pgtable *data; | |
711 | ||
712 | if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS) | |
713 | return NULL; | |
714 | ||
3850db49 RM |
715 | if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | |
716 | IO_PGTABLE_QUIRK_NO_PERMS | | |
1afe2319 | 717 | IO_PGTABLE_QUIRK_TLBI_ON_MAP | |
81b3c252 | 718 | IO_PGTABLE_QUIRK_ARM_MTK_4GB | |
b2dfeba6 | 719 | IO_PGTABLE_QUIRK_NON_STRICT)) |
3850db49 RM |
720 | return NULL; |
721 | ||
1afe2319 YW |
722 | /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */ |
723 | if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB && | |
724 | !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS)) | |
725 | return NULL; | |
726 | ||
e5fc9753 RM |
727 | data = kmalloc(sizeof(*data), GFP_KERNEL); |
728 | if (!data) | |
729 | return NULL; | |
730 | ||
119ff305 | 731 | spin_lock_init(&data->split_lock); |
e5fc9753 RM |
732 | data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2", |
733 | ARM_V7S_TABLE_SIZE(2), | |
734 | ARM_V7S_TABLE_SIZE(2), | |
0a352554 | 735 | ARM_V7S_TABLE_SLAB_FLAGS, NULL); |
e5fc9753 RM |
736 | if (!data->l2_tables) |
737 | goto out_free_data; | |
738 | ||
739 | data->iop.ops = (struct io_pgtable_ops) { | |
740 | .map = arm_v7s_map, | |
741 | .unmap = arm_v7s_unmap, | |
742 | .iova_to_phys = arm_v7s_iova_to_phys, | |
743 | }; | |
744 | ||
745 | /* We have to do this early for __arm_v7s_alloc_table to work... */ | |
746 | data->iop.cfg = *cfg; | |
747 | ||
748 | /* | |
749 | * Unless the IOMMU driver indicates supersection support by | |
750 | * having SZ_16M set in the initial bitmap, they won't be used. | |
751 | */ | |
752 | cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M; | |
753 | ||
754 | /* TCR: T0SZ=0, disable TTBR1 */ | |
755 | cfg->arm_v7s_cfg.tcr = ARM_V7S_TCR_PD1; | |
756 | ||
757 | /* | |
758 | * TEX remap: the indices used map to the closest equivalent types | |
759 | * under the non-TEX-remap interpretation of those attribute bits, | |
760 | * excepting various implementation-defined aspects of shareability. | |
761 | */ | |
762 | cfg->arm_v7s_cfg.prrr = ARM_V7S_PRRR_TR(1, ARM_V7S_PRRR_TYPE_DEVICE) | | |
763 | ARM_V7S_PRRR_TR(4, ARM_V7S_PRRR_TYPE_NORMAL) | | |
764 | ARM_V7S_PRRR_TR(7, ARM_V7S_PRRR_TYPE_NORMAL) | | |
765 | ARM_V7S_PRRR_DS0 | ARM_V7S_PRRR_DS1 | | |
766 | ARM_V7S_PRRR_NS1 | ARM_V7S_PRRR_NOS(7); | |
767 | cfg->arm_v7s_cfg.nmrr = ARM_V7S_NMRR_IR(7, ARM_V7S_RGN_WBWA) | | |
768 | ARM_V7S_NMRR_OR(7, ARM_V7S_RGN_WBWA); | |
769 | ||
770 | /* Looking good; allocate a pgd */ | |
771 | data->pgd = __arm_v7s_alloc_table(1, GFP_KERNEL, data); | |
772 | if (!data->pgd) | |
773 | goto out_free_data; | |
774 | ||
775 | /* Ensure the empty pgd is visible before any actual TTBR write */ | |
776 | wmb(); | |
777 | ||
778 | /* TTBRs */ | |
779 | cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) | | |
780 | ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS | | |
9e6ea59f BA |
781 | (cfg->coherent_walk ? |
782 | (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) | | |
783 | ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) : | |
784 | (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) | | |
785 | ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC))); | |
e5fc9753 RM |
786 | cfg->arm_v7s_cfg.ttbr[1] = 0; |
787 | return &data->iop; | |
788 | ||
789 | out_free_data: | |
790 | kmem_cache_destroy(data->l2_tables); | |
791 | kfree(data); | |
792 | return NULL; | |
793 | } | |
794 | ||
795 | struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = { | |
796 | .alloc = arm_v7s_alloc_pgtable, | |
797 | .free = arm_v7s_free_pgtable, | |
798 | }; | |
799 | ||
800 | #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST | |
801 | ||
802 | static struct io_pgtable_cfg *cfg_cookie; | |
803 | ||
804 | static void dummy_tlb_flush_all(void *cookie) | |
805 | { | |
806 | WARN_ON(cookie != cfg_cookie); | |
807 | } | |
808 | ||
809 | static void dummy_tlb_add_flush(unsigned long iova, size_t size, | |
810 | size_t granule, bool leaf, void *cookie) | |
811 | { | |
812 | WARN_ON(cookie != cfg_cookie); | |
813 | WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); | |
814 | } | |
815 | ||
816 | static void dummy_tlb_sync(void *cookie) | |
817 | { | |
818 | WARN_ON(cookie != cfg_cookie); | |
819 | } | |
820 | ||
60ab7a75 | 821 | static const struct iommu_gather_ops dummy_tlb_ops = { |
e5fc9753 RM |
822 | .tlb_flush_all = dummy_tlb_flush_all, |
823 | .tlb_add_flush = dummy_tlb_add_flush, | |
824 | .tlb_sync = dummy_tlb_sync, | |
825 | }; | |
826 | ||
827 | #define __FAIL(ops) ({ \ | |
828 | WARN(1, "selftest: test failed\n"); \ | |
829 | selftest_running = false; \ | |
830 | -EFAULT; \ | |
831 | }) | |
832 | ||
833 | static int __init arm_v7s_do_selftests(void) | |
834 | { | |
835 | struct io_pgtable_ops *ops; | |
836 | struct io_pgtable_cfg cfg = { | |
837 | .tlb = &dummy_tlb_ops, | |
838 | .oas = 32, | |
839 | .ias = 32, | |
4f41845b WD |
840 | .coherent_walk = true, |
841 | .quirks = IO_PGTABLE_QUIRK_ARM_NS, | |
e5fc9753 RM |
842 | .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, |
843 | }; | |
844 | unsigned int iova, size, iova_start; | |
845 | unsigned int i, loopnr = 0; | |
846 | ||
847 | selftest_running = true; | |
848 | ||
849 | cfg_cookie = &cfg; | |
850 | ||
851 | ops = alloc_io_pgtable_ops(ARM_V7S, &cfg, &cfg); | |
852 | if (!ops) { | |
853 | pr_err("selftest: failed to allocate io pgtable ops\n"); | |
854 | return -EINVAL; | |
855 | } | |
856 | ||
857 | /* | |
858 | * Initial sanity checks. | |
859 | * Empty page tables shouldn't provide any translations. | |
860 | */ | |
861 | if (ops->iova_to_phys(ops, 42)) | |
862 | return __FAIL(ops); | |
863 | ||
864 | if (ops->iova_to_phys(ops, SZ_1G + 42)) | |
865 | return __FAIL(ops); | |
866 | ||
867 | if (ops->iova_to_phys(ops, SZ_2G + 42)) | |
868 | return __FAIL(ops); | |
869 | ||
870 | /* | |
871 | * Distinct mappings of different granule sizes. | |
872 | */ | |
873 | iova = 0; | |
4ae8a5c5 | 874 | for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) { |
e5fc9753 RM |
875 | size = 1UL << i; |
876 | if (ops->map(ops, iova, iova, size, IOMMU_READ | | |
877 | IOMMU_WRITE | | |
878 | IOMMU_NOEXEC | | |
879 | IOMMU_CACHE)) | |
880 | return __FAIL(ops); | |
881 | ||
882 | /* Overlapping mappings */ | |
883 | if (!ops->map(ops, iova, iova + size, size, | |
884 | IOMMU_READ | IOMMU_NOEXEC)) | |
885 | return __FAIL(ops); | |
886 | ||
887 | if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) | |
888 | return __FAIL(ops); | |
889 | ||
890 | iova += SZ_16M; | |
e5fc9753 RM |
891 | loopnr++; |
892 | } | |
893 | ||
894 | /* Partial unmap */ | |
895 | i = 1; | |
896 | size = 1UL << __ffs(cfg.pgsize_bitmap); | |
897 | while (i < loopnr) { | |
898 | iova_start = i * SZ_16M; | |
899 | if (ops->unmap(ops, iova_start + size, size) != size) | |
900 | return __FAIL(ops); | |
901 | ||
902 | /* Remap of partial unmap */ | |
903 | if (ops->map(ops, iova_start + size, size, size, IOMMU_READ)) | |
904 | return __FAIL(ops); | |
905 | ||
906 | if (ops->iova_to_phys(ops, iova_start + size + 42) | |
907 | != (size + 42)) | |
908 | return __FAIL(ops); | |
909 | i++; | |
910 | } | |
911 | ||
912 | /* Full unmap */ | |
913 | iova = 0; | |
f793b13e | 914 | for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) { |
e5fc9753 RM |
915 | size = 1UL << i; |
916 | ||
917 | if (ops->unmap(ops, iova, size) != size) | |
918 | return __FAIL(ops); | |
919 | ||
920 | if (ops->iova_to_phys(ops, iova + 42)) | |
921 | return __FAIL(ops); | |
922 | ||
923 | /* Remap full block */ | |
924 | if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) | |
925 | return __FAIL(ops); | |
926 | ||
927 | if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) | |
928 | return __FAIL(ops); | |
929 | ||
930 | iova += SZ_16M; | |
e5fc9753 RM |
931 | } |
932 | ||
933 | free_io_pgtable_ops(ops); | |
934 | ||
935 | selftest_running = false; | |
936 | ||
937 | pr_info("self test ok\n"); | |
938 | return 0; | |
939 | } | |
940 | subsys_initcall(arm_v7s_do_selftests); | |
941 | #endif |