powerpc/mm: Drop the unnecessary region check
[linux-2.6-block.git] / drivers / iommu / io-pgtable-arm.c
CommitLineData
e1d3c0fd
WD
1/*
2 * CPU-agnostic ARM page table allocator.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
22
2c3d273e 23#include <linux/atomic.h>
6c89928f 24#include <linux/bitops.h>
b77cf11f 25#include <linux/io-pgtable.h>
e1d3c0fd
WD
26#include <linux/iommu.h>
27#include <linux/kernel.h>
28#include <linux/sizes.h>
29#include <linux/slab.h>
30#include <linux/types.h>
8f6aff98 31#include <linux/dma-mapping.h>
e1d3c0fd 32
87a91b15
RM
33#include <asm/barrier.h>
34
6c89928f 35#define ARM_LPAE_MAX_ADDR_BITS 52
e1d3c0fd
WD
36#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
37#define ARM_LPAE_MAX_LEVELS 4
38
39/* Struct accessors */
40#define io_pgtable_to_data(x) \
41 container_of((x), struct arm_lpae_io_pgtable, iop)
42
e1d3c0fd
WD
43#define io_pgtable_ops_to_data(x) \
44 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
45
46/*
47 * For consistency with the architecture, we always consider
48 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
49 */
50#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
51
52/*
53 * Calculate the right shift amount to get to the portion describing level l
54 * in a virtual address mapped by the pagetable in d.
55 */
56#define ARM_LPAE_LVL_SHIFT(l,d) \
57 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
58 * (d)->bits_per_level) + (d)->pg_shift)
59
06c610e8
RM
60#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
61
367bd978 62#define ARM_LPAE_PAGES_PER_PGD(d) \
06c610e8 63 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
e1d3c0fd
WD
64
65/*
66 * Calculate the index at level l used to map virtual address a using the
67 * pagetable in d.
68 */
69#define ARM_LPAE_PGD_IDX(l,d) \
70 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
71
72#define ARM_LPAE_LVL_IDX(a,l,d) \
367bd978 73 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
e1d3c0fd
WD
74 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
75
76/* Calculate the block/page mapping size at level l for pagetable in d. */
77#define ARM_LPAE_BLOCK_SIZE(l,d) \
022f4e4f 78 (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
e1d3c0fd
WD
79 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
80
81/* Page table bits */
82#define ARM_LPAE_PTE_TYPE_SHIFT 0
83#define ARM_LPAE_PTE_TYPE_MASK 0x3
84
85#define ARM_LPAE_PTE_TYPE_BLOCK 1
86#define ARM_LPAE_PTE_TYPE_TABLE 3
87#define ARM_LPAE_PTE_TYPE_PAGE 3
88
6c89928f
RM
89#define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
90
c896c132 91#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
e1d3c0fd
WD
92#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
93#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
94#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
95#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
96#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
c896c132 97#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
e1d3c0fd
WD
98#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
99
100#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
101/* Ignore the contiguous bit for block splitting */
102#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
103#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
104 ARM_LPAE_PTE_ATTR_HI_MASK)
2c3d273e
RM
105/* Software bit for solving coherency races */
106#define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
e1d3c0fd
WD
107
108/* Stage-1 PTE */
109#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
110#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
111#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
112#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
113
114/* Stage-2 PTE */
115#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
116#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
117#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
118#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
119#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
120#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
121
122/* Register bits */
123#define ARM_32_LPAE_TCR_EAE (1 << 31)
124#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
125
63979b8d
WD
126#define ARM_LPAE_TCR_EPD1 (1 << 23)
127
e1d3c0fd
WD
128#define ARM_LPAE_TCR_TG0_4K (0 << 14)
129#define ARM_LPAE_TCR_TG0_64K (1 << 14)
130#define ARM_LPAE_TCR_TG0_16K (2 << 14)
131
132#define ARM_LPAE_TCR_SH0_SHIFT 12
133#define ARM_LPAE_TCR_SH0_MASK 0x3
134#define ARM_LPAE_TCR_SH_NS 0
135#define ARM_LPAE_TCR_SH_OS 2
136#define ARM_LPAE_TCR_SH_IS 3
137
138#define ARM_LPAE_TCR_ORGN0_SHIFT 10
139#define ARM_LPAE_TCR_IRGN0_SHIFT 8
140#define ARM_LPAE_TCR_RGN_MASK 0x3
141#define ARM_LPAE_TCR_RGN_NC 0
142#define ARM_LPAE_TCR_RGN_WBWA 1
143#define ARM_LPAE_TCR_RGN_WT 2
144#define ARM_LPAE_TCR_RGN_WB 3
145
146#define ARM_LPAE_TCR_SL0_SHIFT 6
147#define ARM_LPAE_TCR_SL0_MASK 0x3
148
149#define ARM_LPAE_TCR_T0SZ_SHIFT 0
150#define ARM_LPAE_TCR_SZ_MASK 0xf
151
152#define ARM_LPAE_TCR_PS_SHIFT 16
153#define ARM_LPAE_TCR_PS_MASK 0x7
154
155#define ARM_LPAE_TCR_IPS_SHIFT 32
156#define ARM_LPAE_TCR_IPS_MASK 0x7
157
158#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
159#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
160#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
161#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
162#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
163#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
6c89928f 164#define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
e1d3c0fd
WD
165
166#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
167#define ARM_LPAE_MAIR_ATTR_MASK 0xff
168#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
169#define ARM_LPAE_MAIR_ATTR_NC 0x44
170#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
171#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
172#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
173#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
174
175/* IOPTE accessors */
6c89928f 176#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
e1d3c0fd
WD
177
178#define iopte_type(pte,l) \
179 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
180
181#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
182
183#define iopte_leaf(pte,l) \
184 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
185 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
186 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
187
e1d3c0fd
WD
188struct arm_lpae_io_pgtable {
189 struct io_pgtable iop;
190
191 int levels;
192 size_t pgd_size;
193 unsigned long pg_shift;
194 unsigned long bits_per_level;
195
196 void *pgd;
197};
198
199typedef u64 arm_lpae_iopte;
200
6c89928f
RM
201static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
202 struct arm_lpae_io_pgtable *data)
203{
204 arm_lpae_iopte pte = paddr;
205
206 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
207 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
208}
209
210static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
211 struct arm_lpae_io_pgtable *data)
212{
78688059 213 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
6c89928f
RM
214
215 if (data->pg_shift < 16)
216 return paddr;
217
218 /* Rotate the packed high-order bits back to the top */
219 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
220}
221
fe4b991d
WD
222static bool selftest_running = false;
223
ffcb6d16 224static dma_addr_t __arm_lpae_dma_addr(void *pages)
f8d54961 225{
ffcb6d16 226 return (dma_addr_t)virt_to_phys(pages);
f8d54961
RM
227}
228
229static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
230 struct io_pgtable_cfg *cfg)
231{
232 struct device *dev = cfg->iommu_dev;
4b123757
RM
233 int order = get_order(size);
234 struct page *p;
f8d54961 235 dma_addr_t dma;
4b123757 236 void *pages;
f8d54961 237
4b123757 238 VM_BUG_ON((gfp & __GFP_HIGHMEM));
fac83d29
JPB
239 p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
240 gfp | __GFP_ZERO, order);
4b123757 241 if (!p)
f8d54961
RM
242 return NULL;
243
4b123757 244 pages = page_address(p);
81b3c252 245 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
f8d54961
RM
246 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
247 if (dma_mapping_error(dev, dma))
248 goto out_free;
249 /*
250 * We depend on the IOMMU being able to work with any physical
ffcb6d16
RM
251 * address directly, so if the DMA layer suggests otherwise by
252 * translating or truncating them, that bodes very badly...
f8d54961 253 */
ffcb6d16 254 if (dma != virt_to_phys(pages))
f8d54961
RM
255 goto out_unmap;
256 }
257
258 return pages;
259
260out_unmap:
261 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
262 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
263out_free:
4b123757 264 __free_pages(p, order);
f8d54961
RM
265 return NULL;
266}
267
268static void __arm_lpae_free_pages(void *pages, size_t size,
269 struct io_pgtable_cfg *cfg)
270{
81b3c252 271 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
ffcb6d16 272 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
f8d54961 273 size, DMA_TO_DEVICE);
4b123757 274 free_pages((unsigned long)pages, get_order(size));
f8d54961
RM
275}
276
2c3d273e
RM
277static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
278 struct io_pgtable_cfg *cfg)
279{
280 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
281 sizeof(*ptep), DMA_TO_DEVICE);
282}
283
f8d54961 284static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
87a91b15 285 struct io_pgtable_cfg *cfg)
f8d54961 286{
f8d54961
RM
287 *ptep = pte;
288
81b3c252 289 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
2c3d273e 290 __arm_lpae_sync_pte(ptep, cfg);
f8d54961
RM
291}
292
193e67c0
VG
293static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
294 unsigned long iova, size_t size, int lvl,
295 arm_lpae_iopte *ptep);
cf27ec93 296
fb3a9579
RM
297static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
298 phys_addr_t paddr, arm_lpae_iopte prot,
299 int lvl, arm_lpae_iopte *ptep)
300{
301 arm_lpae_iopte pte = prot;
302
303 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
304 pte |= ARM_LPAE_PTE_NS;
305
306 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
307 pte |= ARM_LPAE_PTE_TYPE_PAGE;
308 else
309 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
310
311 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
6c89928f 312 pte |= paddr_to_iopte(paddr, data);
fb3a9579
RM
313
314 __arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
315}
316
e1d3c0fd
WD
317static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
318 unsigned long iova, phys_addr_t paddr,
319 arm_lpae_iopte prot, int lvl,
320 arm_lpae_iopte *ptep)
321{
fb3a9579 322 arm_lpae_iopte pte = *ptep;
e1d3c0fd 323
fb3a9579 324 if (iopte_leaf(pte, lvl)) {
cf27ec93 325 /* We require an unmap first */
fe4b991d 326 WARN_ON(!selftest_running);
e1d3c0fd 327 return -EEXIST;
fb3a9579 328 } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
cf27ec93
WD
329 /*
330 * We need to unmap and free the old table before
331 * overwriting it with a block entry.
332 */
333 arm_lpae_iopte *tblp;
334 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
335
336 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
337 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
338 return -EINVAL;
fe4b991d 339 }
e1d3c0fd 340
fb3a9579
RM
341 __arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
342 return 0;
343}
c896c132 344
fb3a9579
RM
345static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
346 arm_lpae_iopte *ptep,
2c3d273e 347 arm_lpae_iopte curr,
fb3a9579
RM
348 struct io_pgtable_cfg *cfg)
349{
2c3d273e 350 arm_lpae_iopte old, new;
e1d3c0fd 351
fb3a9579
RM
352 new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
353 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
354 new |= ARM_LPAE_PTE_NSTABLE;
e1d3c0fd 355
77f34458
WD
356 /*
357 * Ensure the table itself is visible before its PTE can be.
358 * Whilst we could get away with cmpxchg64_release below, this
359 * doesn't have any ordering semantics when !CONFIG_SMP.
360 */
361 dma_wmb();
2c3d273e
RM
362
363 old = cmpxchg64_relaxed(ptep, curr, new);
364
365 if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) ||
366 (old & ARM_LPAE_PTE_SW_SYNC))
367 return old;
368
369 /* Even if it's not ours, there's no point waiting; just kick it */
370 __arm_lpae_sync_pte(ptep, cfg);
371 if (old == curr)
372 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
373
374 return old;
e1d3c0fd
WD
375}
376
377static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
378 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
379 int lvl, arm_lpae_iopte *ptep)
380{
381 arm_lpae_iopte *cptep, pte;
e1d3c0fd 382 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
2c3d273e 383 size_t tblsz = ARM_LPAE_GRANULE(data);
f8d54961 384 struct io_pgtable_cfg *cfg = &data->iop.cfg;
e1d3c0fd
WD
385
386 /* Find our entry at the current level */
387 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
388
389 /* If we can install a leaf entry at this level, then do so */
f8d54961 390 if (size == block_size && (size & cfg->pgsize_bitmap))
e1d3c0fd
WD
391 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
392
393 /* We can't allocate tables at the final level */
394 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
395 return -EINVAL;
396
397 /* Grab a pointer to the next level */
2c3d273e 398 pte = READ_ONCE(*ptep);
e1d3c0fd 399 if (!pte) {
2c3d273e 400 cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
e1d3c0fd
WD
401 if (!cptep)
402 return -ENOMEM;
403
2c3d273e
RM
404 pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
405 if (pte)
406 __arm_lpae_free_pages(cptep, tblsz, cfg);
407 } else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) &&
408 !(pte & ARM_LPAE_PTE_SW_SYNC)) {
409 __arm_lpae_sync_pte(ptep, cfg);
410 }
411
412 if (pte && !iopte_leaf(pte, lvl)) {
e1d3c0fd 413 cptep = iopte_deref(pte, data);
2c3d273e 414 } else if (pte) {
ed46e66c
OT
415 /* We require an unmap first */
416 WARN_ON(!selftest_running);
417 return -EEXIST;
e1d3c0fd
WD
418 }
419
420 /* Rinse, repeat */
421 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
422}
423
424static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
425 int prot)
426{
427 arm_lpae_iopte pte;
428
429 if (data->iop.fmt == ARM_64_LPAE_S1 ||
430 data->iop.fmt == ARM_32_LPAE_S1) {
e7468a23 431 pte = ARM_LPAE_PTE_nG;
e1d3c0fd
WD
432
433 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
434 pte |= ARM_LPAE_PTE_AP_RDONLY;
435
e7468a23
JG
436 if (!(prot & IOMMU_PRIV))
437 pte |= ARM_LPAE_PTE_AP_UNPRIV;
438
fb948251
RM
439 if (prot & IOMMU_MMIO)
440 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
441 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
442 else if (prot & IOMMU_CACHE)
e1d3c0fd
WD
443 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
444 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
445 } else {
446 pte = ARM_LPAE_PTE_HAP_FAULT;
447 if (prot & IOMMU_READ)
448 pte |= ARM_LPAE_PTE_HAP_READ;
449 if (prot & IOMMU_WRITE)
450 pte |= ARM_LPAE_PTE_HAP_WRITE;
fb948251
RM
451 if (prot & IOMMU_MMIO)
452 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
453 else if (prot & IOMMU_CACHE)
e1d3c0fd
WD
454 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
455 else
456 pte |= ARM_LPAE_PTE_MEMATTR_NC;
457 }
458
459 if (prot & IOMMU_NOEXEC)
460 pte |= ARM_LPAE_PTE_XN;
461
462 return pte;
463}
464
465static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
466 phys_addr_t paddr, size_t size, int iommu_prot)
467{
468 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
469 arm_lpae_iopte *ptep = data->pgd;
87a91b15 470 int ret, lvl = ARM_LPAE_START_LVL(data);
e1d3c0fd
WD
471 arm_lpae_iopte prot;
472
473 /* If no access, then nothing to do */
474 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
475 return 0;
476
76557391
RM
477 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
478 paddr >= (1ULL << data->iop.cfg.oas)))
479 return -ERANGE;
480
e1d3c0fd 481 prot = arm_lpae_prot_to_pte(data, iommu_prot);
87a91b15
RM
482 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
483 /*
484 * Synchronise all PTE updates for the new mapping before there's
485 * a chance for anything to kick off a table walk for the new iova.
486 */
487 wmb();
488
489 return ret;
e1d3c0fd
WD
490}
491
492static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
493 arm_lpae_iopte *ptep)
494{
495 arm_lpae_iopte *start, *end;
496 unsigned long table_size;
497
e1d3c0fd
WD
498 if (lvl == ARM_LPAE_START_LVL(data))
499 table_size = data->pgd_size;
500 else
06c610e8 501 table_size = ARM_LPAE_GRANULE(data);
e1d3c0fd
WD
502
503 start = ptep;
12c2ab09
WD
504
505 /* Only leaf entries at the last level */
506 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
507 end = ptep;
508 else
509 end = (void *)ptep + table_size;
e1d3c0fd
WD
510
511 while (ptep != end) {
512 arm_lpae_iopte pte = *ptep++;
513
514 if (!pte || iopte_leaf(pte, lvl))
515 continue;
516
517 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
518 }
519
f8d54961 520 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
e1d3c0fd
WD
521}
522
523static void arm_lpae_free_pgtable(struct io_pgtable *iop)
524{
525 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
526
527 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
528 kfree(data);
529}
530
193e67c0
VG
531static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
532 unsigned long iova, size_t size,
533 arm_lpae_iopte blk_pte, int lvl,
534 arm_lpae_iopte *ptep)
e1d3c0fd 535{
fb3a9579
RM
536 struct io_pgtable_cfg *cfg = &data->iop.cfg;
537 arm_lpae_iopte pte, *tablep;
e1d3c0fd 538 phys_addr_t blk_paddr;
fb3a9579
RM
539 size_t tablesz = ARM_LPAE_GRANULE(data);
540 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
541 int i, unmap_idx = -1;
542
543 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
544 return 0;
e1d3c0fd 545
fb3a9579
RM
546 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
547 if (!tablep)
548 return 0; /* Bytes unmapped */
e1d3c0fd 549
fb3a9579
RM
550 if (size == split_sz)
551 unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
e1d3c0fd 552
6c89928f 553 blk_paddr = iopte_to_paddr(blk_pte, data);
fb3a9579
RM
554 pte = iopte_prot(blk_pte);
555
556 for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
e1d3c0fd 557 /* Unmap! */
fb3a9579 558 if (i == unmap_idx)
e1d3c0fd
WD
559 continue;
560
fb3a9579 561 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
e1d3c0fd
WD
562 }
563
2c3d273e
RM
564 pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
565 if (pte != blk_pte) {
566 __arm_lpae_free_pages(tablep, tablesz, cfg);
567 /*
568 * We may race against someone unmapping another part of this
569 * block, but anything else is invalid. We can't misinterpret
570 * a page entry here since we're never at the last level.
571 */
572 if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
573 return 0;
574
575 tablep = iopte_deref(pte, data);
85c7a0f1
RM
576 } else if (unmap_idx >= 0) {
577 io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
b6b65ca2 578 io_pgtable_tlb_sync(&data->iop);
85c7a0f1 579 return size;
2c3d273e 580 }
fb3a9579 581
85c7a0f1 582 return __arm_lpae_unmap(data, iova, size, lvl, tablep);
e1d3c0fd
WD
583}
584
193e67c0
VG
585static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
586 unsigned long iova, size_t size, int lvl,
587 arm_lpae_iopte *ptep)
e1d3c0fd
WD
588{
589 arm_lpae_iopte pte;
507e4c9d 590 struct io_pgtable *iop = &data->iop;
e1d3c0fd 591
2eb97c78
RM
592 /* Something went horribly wrong and we ran out of page table */
593 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
594 return 0;
595
e1d3c0fd 596 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
2c3d273e 597 pte = READ_ONCE(*ptep);
2eb97c78 598 if (WARN_ON(!pte))
e1d3c0fd
WD
599 return 0;
600
601 /* If the size matches this level, we're in the right place */
fb3a9579 602 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
507e4c9d 603 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
e1d3c0fd
WD
604
605 if (!iopte_leaf(pte, lvl)) {
606 /* Also flush any partial walks */
507e4c9d
RM
607 io_pgtable_tlb_add_flush(iop, iova, size,
608 ARM_LPAE_GRANULE(data), false);
609 io_pgtable_tlb_sync(iop);
e1d3c0fd
WD
610 ptep = iopte_deref(pte, data);
611 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
b6b65ca2
ZL
612 } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
613 /*
614 * Order the PTE update against queueing the IOVA, to
615 * guarantee that a flush callback from a different CPU
616 * has observed it before the TLBIALL can be issued.
617 */
618 smp_wmb();
e1d3c0fd 619 } else {
507e4c9d 620 io_pgtable_tlb_add_flush(iop, iova, size, size, true);
e1d3c0fd
WD
621 }
622
623 return size;
624 } else if (iopte_leaf(pte, lvl)) {
625 /*
626 * Insert a table at the next level to map the old region,
627 * minus the part we want to unmap
628 */
fb3a9579
RM
629 return arm_lpae_split_blk_unmap(data, iova, size, pte,
630 lvl + 1, ptep);
e1d3c0fd
WD
631 }
632
633 /* Keep on walkin' */
634 ptep = iopte_deref(pte, data);
635 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
636}
637
193e67c0
VG
638static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
639 size_t size)
e1d3c0fd 640{
e1d3c0fd 641 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
e1d3c0fd
WD
642 arm_lpae_iopte *ptep = data->pgd;
643 int lvl = ARM_LPAE_START_LVL(data);
644
76557391
RM
645 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
646 return 0;
647
32b12449 648 return __arm_lpae_unmap(data, iova, size, lvl, ptep);
e1d3c0fd
WD
649}
650
651static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
652 unsigned long iova)
653{
654 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
655 arm_lpae_iopte pte, *ptep = data->pgd;
656 int lvl = ARM_LPAE_START_LVL(data);
657
658 do {
659 /* Valid IOPTE pointer? */
660 if (!ptep)
661 return 0;
662
663 /* Grab the IOPTE we're interested in */
2c3d273e
RM
664 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
665 pte = READ_ONCE(*ptep);
e1d3c0fd
WD
666
667 /* Valid entry? */
668 if (!pte)
669 return 0;
670
671 /* Leaf entry? */
672 if (iopte_leaf(pte,lvl))
673 goto found_translation;
674
675 /* Take it to the next level */
676 ptep = iopte_deref(pte, data);
677 } while (++lvl < ARM_LPAE_MAX_LEVELS);
678
679 /* Ran out of page tables to walk */
680 return 0;
681
682found_translation:
7c6d90e2 683 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
6c89928f 684 return iopte_to_paddr(pte, data) | iova;
e1d3c0fd
WD
685}
686
687static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
688{
6c89928f
RM
689 unsigned long granule, page_sizes;
690 unsigned int max_addr_bits = 48;
e1d3c0fd
WD
691
692 /*
693 * We need to restrict the supported page sizes to match the
694 * translation regime for a particular granule. Aim to match
695 * the CPU page size if possible, otherwise prefer smaller sizes.
696 * While we're at it, restrict the block sizes to match the
697 * chosen granule.
698 */
699 if (cfg->pgsize_bitmap & PAGE_SIZE)
700 granule = PAGE_SIZE;
701 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
702 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
703 else if (cfg->pgsize_bitmap & PAGE_MASK)
704 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
705 else
706 granule = 0;
707
708 switch (granule) {
709 case SZ_4K:
6c89928f 710 page_sizes = (SZ_4K | SZ_2M | SZ_1G);
e1d3c0fd
WD
711 break;
712 case SZ_16K:
6c89928f 713 page_sizes = (SZ_16K | SZ_32M);
e1d3c0fd
WD
714 break;
715 case SZ_64K:
6c89928f
RM
716 max_addr_bits = 52;
717 page_sizes = (SZ_64K | SZ_512M);
718 if (cfg->oas > 48)
719 page_sizes |= 1ULL << 42; /* 4TB */
e1d3c0fd
WD
720 break;
721 default:
6c89928f 722 page_sizes = 0;
e1d3c0fd 723 }
6c89928f
RM
724
725 cfg->pgsize_bitmap &= page_sizes;
726 cfg->ias = min(cfg->ias, max_addr_bits);
727 cfg->oas = min(cfg->oas, max_addr_bits);
e1d3c0fd
WD
728}
729
730static struct arm_lpae_io_pgtable *
731arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
732{
733 unsigned long va_bits, pgd_bits;
734 struct arm_lpae_io_pgtable *data;
735
736 arm_lpae_restrict_pgsizes(cfg);
737
738 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
739 return NULL;
740
741 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
742 return NULL;
743
744 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
745 return NULL;
746
ffcb6d16
RM
747 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
748 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
749 return NULL;
750 }
751
e1d3c0fd
WD
752 data = kmalloc(sizeof(*data), GFP_KERNEL);
753 if (!data)
754 return NULL;
755
756 data->pg_shift = __ffs(cfg->pgsize_bitmap);
757 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
758
759 va_bits = cfg->ias - data->pg_shift;
760 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
761
762 /* Calculate the actual size of our pgd (without concatenation) */
763 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
764 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
765
766 data->iop.ops = (struct io_pgtable_ops) {
767 .map = arm_lpae_map,
768 .unmap = arm_lpae_unmap,
769 .iova_to_phys = arm_lpae_iova_to_phys,
770 };
771
772 return data;
773}
774
775static struct io_pgtable *
776arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
777{
778 u64 reg;
3850db49
RM
779 struct arm_lpae_io_pgtable *data;
780
b6b65ca2
ZL
781 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA |
782 IO_PGTABLE_QUIRK_NON_STRICT))
3850db49 783 return NULL;
e1d3c0fd 784
3850db49 785 data = arm_lpae_alloc_pgtable(cfg);
e1d3c0fd
WD
786 if (!data)
787 return NULL;
788
789 /* TCR */
790 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
791 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
792 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
793
06c610e8 794 switch (ARM_LPAE_GRANULE(data)) {
e1d3c0fd
WD
795 case SZ_4K:
796 reg |= ARM_LPAE_TCR_TG0_4K;
797 break;
798 case SZ_16K:
799 reg |= ARM_LPAE_TCR_TG0_16K;
800 break;
801 case SZ_64K:
802 reg |= ARM_LPAE_TCR_TG0_64K;
803 break;
804 }
805
806 switch (cfg->oas) {
807 case 32:
808 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
809 break;
810 case 36:
811 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
812 break;
813 case 40:
814 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
815 break;
816 case 42:
817 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
818 break;
819 case 44:
820 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
821 break;
822 case 48:
823 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
824 break;
6c89928f
RM
825 case 52:
826 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
827 break;
e1d3c0fd
WD
828 default:
829 goto out_free_data;
830 }
831
832 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
63979b8d
WD
833
834 /* Disable speculative walks through TTBR1 */
835 reg |= ARM_LPAE_TCR_EPD1;
e1d3c0fd
WD
836 cfg->arm_lpae_s1_cfg.tcr = reg;
837
838 /* MAIRs */
839 reg = (ARM_LPAE_MAIR_ATTR_NC
840 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
841 (ARM_LPAE_MAIR_ATTR_WBRWA
842 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
843 (ARM_LPAE_MAIR_ATTR_DEVICE
844 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
845
846 cfg->arm_lpae_s1_cfg.mair[0] = reg;
847 cfg->arm_lpae_s1_cfg.mair[1] = 0;
848
849 /* Looking good; allocate a pgd */
f8d54961 850 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
e1d3c0fd
WD
851 if (!data->pgd)
852 goto out_free_data;
853
87a91b15
RM
854 /* Ensure the empty pgd is visible before any actual TTBR write */
855 wmb();
e1d3c0fd
WD
856
857 /* TTBRs */
858 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
859 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
860 return &data->iop;
861
862out_free_data:
863 kfree(data);
864 return NULL;
865}
866
867static struct io_pgtable *
868arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
869{
870 u64 reg, sl;
3850db49
RM
871 struct arm_lpae_io_pgtable *data;
872
873 /* The NS quirk doesn't apply at stage 2 */
b6b65ca2
ZL
874 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NO_DMA |
875 IO_PGTABLE_QUIRK_NON_STRICT))
3850db49 876 return NULL;
e1d3c0fd 877
3850db49 878 data = arm_lpae_alloc_pgtable(cfg);
e1d3c0fd
WD
879 if (!data)
880 return NULL;
881
882 /*
883 * Concatenate PGDs at level 1 if possible in order to reduce
884 * the depth of the stage-2 walk.
885 */
886 if (data->levels == ARM_LPAE_MAX_LEVELS) {
887 unsigned long pgd_pages;
888
889 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
890 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
891 data->pgd_size = pgd_pages << data->pg_shift;
892 data->levels--;
893 }
894 }
895
896 /* VTCR */
897 reg = ARM_64_LPAE_S2_TCR_RES1 |
898 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
899 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
900 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
901
902 sl = ARM_LPAE_START_LVL(data);
903
06c610e8 904 switch (ARM_LPAE_GRANULE(data)) {
e1d3c0fd
WD
905 case SZ_4K:
906 reg |= ARM_LPAE_TCR_TG0_4K;
907 sl++; /* SL0 format is different for 4K granule size */
908 break;
909 case SZ_16K:
910 reg |= ARM_LPAE_TCR_TG0_16K;
911 break;
912 case SZ_64K:
913 reg |= ARM_LPAE_TCR_TG0_64K;
914 break;
915 }
916
917 switch (cfg->oas) {
918 case 32:
919 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
920 break;
921 case 36:
922 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
923 break;
924 case 40:
925 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
926 break;
927 case 42:
928 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
929 break;
930 case 44:
931 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
932 break;
933 case 48:
934 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
935 break;
6c89928f
RM
936 case 52:
937 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
938 break;
e1d3c0fd
WD
939 default:
940 goto out_free_data;
941 }
942
943 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
944 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
945 cfg->arm_lpae_s2_cfg.vtcr = reg;
946
947 /* Allocate pgd pages */
f8d54961 948 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
e1d3c0fd
WD
949 if (!data->pgd)
950 goto out_free_data;
951
87a91b15
RM
952 /* Ensure the empty pgd is visible before any actual TTBR write */
953 wmb();
e1d3c0fd
WD
954
955 /* VTTBR */
956 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
957 return &data->iop;
958
959out_free_data:
960 kfree(data);
961 return NULL;
962}
963
964static struct io_pgtable *
965arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
966{
967 struct io_pgtable *iop;
968
969 if (cfg->ias > 32 || cfg->oas > 40)
970 return NULL;
971
972 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
973 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
974 if (iop) {
975 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
976 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
977 }
978
979 return iop;
980}
981
982static struct io_pgtable *
983arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
984{
985 struct io_pgtable *iop;
986
987 if (cfg->ias > 40 || cfg->oas > 40)
988 return NULL;
989
990 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
991 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
992 if (iop)
993 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
994
995 return iop;
996}
997
998struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
999 .alloc = arm_64_lpae_alloc_pgtable_s1,
1000 .free = arm_lpae_free_pgtable,
1001};
1002
1003struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1004 .alloc = arm_64_lpae_alloc_pgtable_s2,
1005 .free = arm_lpae_free_pgtable,
1006};
1007
1008struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1009 .alloc = arm_32_lpae_alloc_pgtable_s1,
1010 .free = arm_lpae_free_pgtable,
1011};
1012
1013struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1014 .alloc = arm_32_lpae_alloc_pgtable_s2,
1015 .free = arm_lpae_free_pgtable,
1016};
fe4b991d
WD
1017
1018#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1019
1020static struct io_pgtable_cfg *cfg_cookie;
1021
1022static void dummy_tlb_flush_all(void *cookie)
1023{
1024 WARN_ON(cookie != cfg_cookie);
1025}
1026
06c610e8
RM
1027static void dummy_tlb_add_flush(unsigned long iova, size_t size,
1028 size_t granule, bool leaf, void *cookie)
fe4b991d
WD
1029{
1030 WARN_ON(cookie != cfg_cookie);
1031 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1032}
1033
1034static void dummy_tlb_sync(void *cookie)
1035{
1036 WARN_ON(cookie != cfg_cookie);
1037}
1038
dfed5f01 1039static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
fe4b991d
WD
1040 .tlb_flush_all = dummy_tlb_flush_all,
1041 .tlb_add_flush = dummy_tlb_add_flush,
1042 .tlb_sync = dummy_tlb_sync,
fe4b991d
WD
1043};
1044
1045static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1046{
1047 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1048 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1049
1050 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1051 cfg->pgsize_bitmap, cfg->ias);
1052 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1053 data->levels, data->pgd_size, data->pg_shift,
1054 data->bits_per_level, data->pgd);
1055}
1056
1057#define __FAIL(ops, i) ({ \
1058 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1059 arm_lpae_dump_ops(ops); \
1060 selftest_running = false; \
1061 -EFAULT; \
1062})
1063
1064static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1065{
1066 static const enum io_pgtable_fmt fmts[] = {
1067 ARM_64_LPAE_S1,
1068 ARM_64_LPAE_S2,
1069 };
1070
1071 int i, j;
1072 unsigned long iova;
1073 size_t size;
1074 struct io_pgtable_ops *ops;
1075
1076 selftest_running = true;
1077
1078 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1079 cfg_cookie = cfg;
1080 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1081 if (!ops) {
1082 pr_err("selftest: failed to allocate io pgtable ops\n");
1083 return -ENOMEM;
1084 }
1085
1086 /*
1087 * Initial sanity checks.
1088 * Empty page tables shouldn't provide any translations.
1089 */
1090 if (ops->iova_to_phys(ops, 42))
1091 return __FAIL(ops, i);
1092
1093 if (ops->iova_to_phys(ops, SZ_1G + 42))
1094 return __FAIL(ops, i);
1095
1096 if (ops->iova_to_phys(ops, SZ_2G + 42))
1097 return __FAIL(ops, i);
1098
1099 /*
1100 * Distinct mappings of different granule sizes.
1101 */
1102 iova = 0;
4ae8a5c5 1103 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
fe4b991d
WD
1104 size = 1UL << j;
1105
1106 if (ops->map(ops, iova, iova, size, IOMMU_READ |
1107 IOMMU_WRITE |
1108 IOMMU_NOEXEC |
1109 IOMMU_CACHE))
1110 return __FAIL(ops, i);
1111
1112 /* Overlapping mappings */
1113 if (!ops->map(ops, iova, iova + size, size,
1114 IOMMU_READ | IOMMU_NOEXEC))
1115 return __FAIL(ops, i);
1116
1117 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1118 return __FAIL(ops, i);
1119
1120 iova += SZ_1G;
fe4b991d
WD
1121 }
1122
1123 /* Partial unmap */
1124 size = 1UL << __ffs(cfg->pgsize_bitmap);
1125 if (ops->unmap(ops, SZ_1G + size, size) != size)
1126 return __FAIL(ops, i);
1127
1128 /* Remap of partial unmap */
1129 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1130 return __FAIL(ops, i);
1131
1132 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1133 return __FAIL(ops, i);
1134
1135 /* Full unmap */
1136 iova = 0;
f793b13e 1137 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
fe4b991d
WD
1138 size = 1UL << j;
1139
1140 if (ops->unmap(ops, iova, size) != size)
1141 return __FAIL(ops, i);
1142
1143 if (ops->iova_to_phys(ops, iova + 42))
1144 return __FAIL(ops, i);
1145
1146 /* Remap full block */
1147 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1148 return __FAIL(ops, i);
1149
1150 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1151 return __FAIL(ops, i);
1152
1153 iova += SZ_1G;
fe4b991d
WD
1154 }
1155
1156 free_io_pgtable_ops(ops);
1157 }
1158
1159 selftest_running = false;
1160 return 0;
1161}
1162
1163static int __init arm_lpae_do_selftests(void)
1164{
1165 static const unsigned long pgsize[] = {
1166 SZ_4K | SZ_2M | SZ_1G,
1167 SZ_16K | SZ_32M,
1168 SZ_64K | SZ_512M,
1169 };
1170
1171 static const unsigned int ias[] = {
1172 32, 36, 40, 42, 44, 48,
1173 };
1174
1175 int i, j, pass = 0, fail = 0;
1176 struct io_pgtable_cfg cfg = {
1177 .tlb = &dummy_tlb_ops,
1178 .oas = 48,
81b3c252 1179 .quirks = IO_PGTABLE_QUIRK_NO_DMA,
fe4b991d
WD
1180 };
1181
1182 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1183 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1184 cfg.pgsize_bitmap = pgsize[i];
1185 cfg.ias = ias[j];
1186 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1187 pgsize[i], ias[j]);
1188 if (arm_lpae_run_tests(&cfg))
1189 fail++;
1190 else
1191 pass++;
1192 }
1193 }
1194
1195 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1196 return fail ? -EFAULT : 0;
1197}
1198subsys_initcall(arm_lpae_do_selftests);
1199#endif