libnvdimm/altmap: Track namespace boundaries in altmap
[linux-2.6-block.git] / drivers / iommu / io-pgtable-arm.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
e1d3c0fd
WD
2/*
3 * CPU-agnostic ARM page table allocator.
4 *
e1d3c0fd
WD
5 * Copyright (C) 2014 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 */
9
10#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
11
2c3d273e 12#include <linux/atomic.h>
6c89928f 13#include <linux/bitops.h>
b77cf11f 14#include <linux/io-pgtable.h>
e1d3c0fd
WD
15#include <linux/iommu.h>
16#include <linux/kernel.h>
17#include <linux/sizes.h>
18#include <linux/slab.h>
19#include <linux/types.h>
8f6aff98 20#include <linux/dma-mapping.h>
e1d3c0fd 21
87a91b15
RM
22#include <asm/barrier.h>
23
6c89928f 24#define ARM_LPAE_MAX_ADDR_BITS 52
e1d3c0fd
WD
25#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
26#define ARM_LPAE_MAX_LEVELS 4
27
28/* Struct accessors */
29#define io_pgtable_to_data(x) \
30 container_of((x), struct arm_lpae_io_pgtable, iop)
31
e1d3c0fd
WD
32#define io_pgtable_ops_to_data(x) \
33 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
34
35/*
36 * For consistency with the architecture, we always consider
37 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
38 */
39#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
40
41/*
42 * Calculate the right shift amount to get to the portion describing level l
43 * in a virtual address mapped by the pagetable in d.
44 */
45#define ARM_LPAE_LVL_SHIFT(l,d) \
46 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
47 * (d)->bits_per_level) + (d)->pg_shift)
48
06c610e8
RM
49#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
50
367bd978 51#define ARM_LPAE_PAGES_PER_PGD(d) \
06c610e8 52 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
e1d3c0fd
WD
53
54/*
55 * Calculate the index at level l used to map virtual address a using the
56 * pagetable in d.
57 */
58#define ARM_LPAE_PGD_IDX(l,d) \
59 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
60
61#define ARM_LPAE_LVL_IDX(a,l,d) \
367bd978 62 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
e1d3c0fd
WD
63 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
64
65/* Calculate the block/page mapping size at level l for pagetable in d. */
66#define ARM_LPAE_BLOCK_SIZE(l,d) \
022f4e4f 67 (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
e1d3c0fd
WD
68 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
69
70/* Page table bits */
71#define ARM_LPAE_PTE_TYPE_SHIFT 0
72#define ARM_LPAE_PTE_TYPE_MASK 0x3
73
74#define ARM_LPAE_PTE_TYPE_BLOCK 1
75#define ARM_LPAE_PTE_TYPE_TABLE 3
76#define ARM_LPAE_PTE_TYPE_PAGE 3
77
6c89928f
RM
78#define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
79
c896c132 80#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
e1d3c0fd
WD
81#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
82#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
83#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
84#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
85#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
c896c132 86#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
e1d3c0fd
WD
87#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
88
89#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
90/* Ignore the contiguous bit for block splitting */
91#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
92#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
93 ARM_LPAE_PTE_ATTR_HI_MASK)
2c3d273e
RM
94/* Software bit for solving coherency races */
95#define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
e1d3c0fd
WD
96
97/* Stage-1 PTE */
98#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
99#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
100#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
101#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
102
103/* Stage-2 PTE */
104#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
105#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
106#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
107#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
108#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
109#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
110
111/* Register bits */
112#define ARM_32_LPAE_TCR_EAE (1 << 31)
113#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
114
63979b8d
WD
115#define ARM_LPAE_TCR_EPD1 (1 << 23)
116
e1d3c0fd
WD
117#define ARM_LPAE_TCR_TG0_4K (0 << 14)
118#define ARM_LPAE_TCR_TG0_64K (1 << 14)
119#define ARM_LPAE_TCR_TG0_16K (2 << 14)
120
121#define ARM_LPAE_TCR_SH0_SHIFT 12
122#define ARM_LPAE_TCR_SH0_MASK 0x3
123#define ARM_LPAE_TCR_SH_NS 0
124#define ARM_LPAE_TCR_SH_OS 2
125#define ARM_LPAE_TCR_SH_IS 3
126
127#define ARM_LPAE_TCR_ORGN0_SHIFT 10
128#define ARM_LPAE_TCR_IRGN0_SHIFT 8
129#define ARM_LPAE_TCR_RGN_MASK 0x3
130#define ARM_LPAE_TCR_RGN_NC 0
131#define ARM_LPAE_TCR_RGN_WBWA 1
132#define ARM_LPAE_TCR_RGN_WT 2
133#define ARM_LPAE_TCR_RGN_WB 3
134
135#define ARM_LPAE_TCR_SL0_SHIFT 6
136#define ARM_LPAE_TCR_SL0_MASK 0x3
137
138#define ARM_LPAE_TCR_T0SZ_SHIFT 0
139#define ARM_LPAE_TCR_SZ_MASK 0xf
140
141#define ARM_LPAE_TCR_PS_SHIFT 16
142#define ARM_LPAE_TCR_PS_MASK 0x7
143
144#define ARM_LPAE_TCR_IPS_SHIFT 32
145#define ARM_LPAE_TCR_IPS_MASK 0x7
146
147#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
148#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
149#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
150#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
151#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
152#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
6c89928f 153#define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
e1d3c0fd
WD
154
155#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
156#define ARM_LPAE_MAIR_ATTR_MASK 0xff
157#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
158#define ARM_LPAE_MAIR_ATTR_NC 0x44
90ec7a76 159#define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
e1d3c0fd
WD
160#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
161#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
162#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
163#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
90ec7a76 164#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
e1d3c0fd 165
d08d42de
RH
166#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
167#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
168#define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
169
e1d3c0fd 170/* IOPTE accessors */
6c89928f 171#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
e1d3c0fd
WD
172
173#define iopte_type(pte,l) \
174 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
175
176#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
177
e1d3c0fd
WD
178struct arm_lpae_io_pgtable {
179 struct io_pgtable iop;
180
181 int levels;
182 size_t pgd_size;
183 unsigned long pg_shift;
184 unsigned long bits_per_level;
185
186 void *pgd;
187};
188
189typedef u64 arm_lpae_iopte;
190
d08d42de
RH
191static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
192 enum io_pgtable_fmt fmt)
193{
194 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
195 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
196
197 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
198}
199
6c89928f
RM
200static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
201 struct arm_lpae_io_pgtable *data)
202{
203 arm_lpae_iopte pte = paddr;
204
205 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
206 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
207}
208
209static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
210 struct arm_lpae_io_pgtable *data)
211{
78688059 212 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
6c89928f
RM
213
214 if (data->pg_shift < 16)
215 return paddr;
216
217 /* Rotate the packed high-order bits back to the top */
218 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
219}
220
fe4b991d
WD
221static bool selftest_running = false;
222
ffcb6d16 223static dma_addr_t __arm_lpae_dma_addr(void *pages)
f8d54961 224{
ffcb6d16 225 return (dma_addr_t)virt_to_phys(pages);
f8d54961
RM
226}
227
228static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
229 struct io_pgtable_cfg *cfg)
230{
231 struct device *dev = cfg->iommu_dev;
4b123757
RM
232 int order = get_order(size);
233 struct page *p;
f8d54961 234 dma_addr_t dma;
4b123757 235 void *pages;
f8d54961 236
4b123757 237 VM_BUG_ON((gfp & __GFP_HIGHMEM));
fac83d29
JPB
238 p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
239 gfp | __GFP_ZERO, order);
4b123757 240 if (!p)
f8d54961
RM
241 return NULL;
242
4b123757 243 pages = page_address(p);
4f41845b 244 if (!cfg->coherent_walk) {
f8d54961
RM
245 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
246 if (dma_mapping_error(dev, dma))
247 goto out_free;
248 /*
249 * We depend on the IOMMU being able to work with any physical
ffcb6d16
RM
250 * address directly, so if the DMA layer suggests otherwise by
251 * translating or truncating them, that bodes very badly...
f8d54961 252 */
ffcb6d16 253 if (dma != virt_to_phys(pages))
f8d54961
RM
254 goto out_unmap;
255 }
256
257 return pages;
258
259out_unmap:
260 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
261 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
262out_free:
4b123757 263 __free_pages(p, order);
f8d54961
RM
264 return NULL;
265}
266
267static void __arm_lpae_free_pages(void *pages, size_t size,
268 struct io_pgtable_cfg *cfg)
269{
4f41845b 270 if (!cfg->coherent_walk)
ffcb6d16 271 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
f8d54961 272 size, DMA_TO_DEVICE);
4b123757 273 free_pages((unsigned long)pages, get_order(size));
f8d54961
RM
274}
275
2c3d273e
RM
276static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
277 struct io_pgtable_cfg *cfg)
278{
279 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
280 sizeof(*ptep), DMA_TO_DEVICE);
281}
282
f8d54961 283static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
87a91b15 284 struct io_pgtable_cfg *cfg)
f8d54961 285{
f8d54961
RM
286 *ptep = pte;
287
4f41845b 288 if (!cfg->coherent_walk)
2c3d273e 289 __arm_lpae_sync_pte(ptep, cfg);
f8d54961
RM
290}
291
193e67c0
VG
292static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
293 unsigned long iova, size_t size, int lvl,
294 arm_lpae_iopte *ptep);
cf27ec93 295
fb3a9579
RM
296static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
297 phys_addr_t paddr, arm_lpae_iopte prot,
298 int lvl, arm_lpae_iopte *ptep)
299{
300 arm_lpae_iopte pte = prot;
301
302 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
303 pte |= ARM_LPAE_PTE_NS;
304
d08d42de 305 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
fb3a9579
RM
306 pte |= ARM_LPAE_PTE_TYPE_PAGE;
307 else
308 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
309
d08d42de
RH
310 if (data->iop.fmt != ARM_MALI_LPAE)
311 pte |= ARM_LPAE_PTE_AF;
312 pte |= ARM_LPAE_PTE_SH_IS;
6c89928f 313 pte |= paddr_to_iopte(paddr, data);
fb3a9579
RM
314
315 __arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
316}
317
e1d3c0fd
WD
318static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
319 unsigned long iova, phys_addr_t paddr,
320 arm_lpae_iopte prot, int lvl,
321 arm_lpae_iopte *ptep)
322{
fb3a9579 323 arm_lpae_iopte pte = *ptep;
e1d3c0fd 324
d08d42de 325 if (iopte_leaf(pte, lvl, data->iop.fmt)) {
cf27ec93 326 /* We require an unmap first */
fe4b991d 327 WARN_ON(!selftest_running);
e1d3c0fd 328 return -EEXIST;
fb3a9579 329 } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
cf27ec93
WD
330 /*
331 * We need to unmap and free the old table before
332 * overwriting it with a block entry.
333 */
334 arm_lpae_iopte *tblp;
335 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
336
337 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
338 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
339 return -EINVAL;
fe4b991d 340 }
e1d3c0fd 341
fb3a9579
RM
342 __arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
343 return 0;
344}
c896c132 345
fb3a9579
RM
346static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
347 arm_lpae_iopte *ptep,
2c3d273e 348 arm_lpae_iopte curr,
fb3a9579
RM
349 struct io_pgtable_cfg *cfg)
350{
2c3d273e 351 arm_lpae_iopte old, new;
e1d3c0fd 352
fb3a9579
RM
353 new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
354 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
355 new |= ARM_LPAE_PTE_NSTABLE;
e1d3c0fd 356
77f34458
WD
357 /*
358 * Ensure the table itself is visible before its PTE can be.
359 * Whilst we could get away with cmpxchg64_release below, this
360 * doesn't have any ordering semantics when !CONFIG_SMP.
361 */
362 dma_wmb();
2c3d273e
RM
363
364 old = cmpxchg64_relaxed(ptep, curr, new);
365
4f41845b 366 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
2c3d273e
RM
367 return old;
368
369 /* Even if it's not ours, there's no point waiting; just kick it */
370 __arm_lpae_sync_pte(ptep, cfg);
371 if (old == curr)
372 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
373
374 return old;
e1d3c0fd
WD
375}
376
377static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
378 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
379 int lvl, arm_lpae_iopte *ptep)
380{
381 arm_lpae_iopte *cptep, pte;
e1d3c0fd 382 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
2c3d273e 383 size_t tblsz = ARM_LPAE_GRANULE(data);
f8d54961 384 struct io_pgtable_cfg *cfg = &data->iop.cfg;
e1d3c0fd
WD
385
386 /* Find our entry at the current level */
387 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
388
389 /* If we can install a leaf entry at this level, then do so */
f8d54961 390 if (size == block_size && (size & cfg->pgsize_bitmap))
e1d3c0fd
WD
391 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
392
393 /* We can't allocate tables at the final level */
394 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
395 return -EINVAL;
396
397 /* Grab a pointer to the next level */
2c3d273e 398 pte = READ_ONCE(*ptep);
e1d3c0fd 399 if (!pte) {
2c3d273e 400 cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
e1d3c0fd
WD
401 if (!cptep)
402 return -ENOMEM;
403
2c3d273e
RM
404 pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
405 if (pte)
406 __arm_lpae_free_pages(cptep, tblsz, cfg);
4f41845b 407 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
2c3d273e
RM
408 __arm_lpae_sync_pte(ptep, cfg);
409 }
410
d08d42de 411 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
e1d3c0fd 412 cptep = iopte_deref(pte, data);
2c3d273e 413 } else if (pte) {
ed46e66c
OT
414 /* We require an unmap first */
415 WARN_ON(!selftest_running);
416 return -EEXIST;
e1d3c0fd
WD
417 }
418
419 /* Rinse, repeat */
420 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
421}
422
423static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
424 int prot)
425{
426 arm_lpae_iopte pte;
427
428 if (data->iop.fmt == ARM_64_LPAE_S1 ||
429 data->iop.fmt == ARM_32_LPAE_S1) {
e7468a23 430 pte = ARM_LPAE_PTE_nG;
e1d3c0fd
WD
431 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
432 pte |= ARM_LPAE_PTE_AP_RDONLY;
e7468a23
JG
433 if (!(prot & IOMMU_PRIV))
434 pte |= ARM_LPAE_PTE_AP_UNPRIV;
e1d3c0fd
WD
435 } else {
436 pte = ARM_LPAE_PTE_HAP_FAULT;
437 if (prot & IOMMU_READ)
438 pte |= ARM_LPAE_PTE_HAP_READ;
439 if (prot & IOMMU_WRITE)
440 pte |= ARM_LPAE_PTE_HAP_WRITE;
d08d42de
RH
441 }
442
443 /*
444 * Note that this logic is structured to accommodate Mali LPAE
445 * having stage-1-like attributes but stage-2-like permissions.
446 */
447 if (data->iop.fmt == ARM_64_LPAE_S2 ||
448 data->iop.fmt == ARM_32_LPAE_S2) {
fb948251
RM
449 if (prot & IOMMU_MMIO)
450 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
451 else if (prot & IOMMU_CACHE)
e1d3c0fd
WD
452 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
453 else
454 pte |= ARM_LPAE_PTE_MEMATTR_NC;
d08d42de
RH
455 } else {
456 if (prot & IOMMU_MMIO)
457 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
458 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
459 else if (prot & IOMMU_CACHE)
460 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
461 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
90ec7a76
VG
462 else if (prot & IOMMU_QCOM_SYS_CACHE)
463 pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
464 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
e1d3c0fd
WD
465 }
466
467 if (prot & IOMMU_NOEXEC)
468 pte |= ARM_LPAE_PTE_XN;
469
470 return pte;
471}
472
473static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
474 phys_addr_t paddr, size_t size, int iommu_prot)
475{
476 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
477 arm_lpae_iopte *ptep = data->pgd;
87a91b15 478 int ret, lvl = ARM_LPAE_START_LVL(data);
e1d3c0fd
WD
479 arm_lpae_iopte prot;
480
481 /* If no access, then nothing to do */
482 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
483 return 0;
484
76557391
RM
485 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
486 paddr >= (1ULL << data->iop.cfg.oas)))
487 return -ERANGE;
488
e1d3c0fd 489 prot = arm_lpae_prot_to_pte(data, iommu_prot);
87a91b15
RM
490 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
491 /*
492 * Synchronise all PTE updates for the new mapping before there's
493 * a chance for anything to kick off a table walk for the new iova.
494 */
495 wmb();
496
497 return ret;
e1d3c0fd
WD
498}
499
500static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
501 arm_lpae_iopte *ptep)
502{
503 arm_lpae_iopte *start, *end;
504 unsigned long table_size;
505
e1d3c0fd
WD
506 if (lvl == ARM_LPAE_START_LVL(data))
507 table_size = data->pgd_size;
508 else
06c610e8 509 table_size = ARM_LPAE_GRANULE(data);
e1d3c0fd
WD
510
511 start = ptep;
12c2ab09
WD
512
513 /* Only leaf entries at the last level */
514 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
515 end = ptep;
516 else
517 end = (void *)ptep + table_size;
e1d3c0fd
WD
518
519 while (ptep != end) {
520 arm_lpae_iopte pte = *ptep++;
521
d08d42de 522 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
e1d3c0fd
WD
523 continue;
524
525 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
526 }
527
f8d54961 528 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
e1d3c0fd
WD
529}
530
531static void arm_lpae_free_pgtable(struct io_pgtable *iop)
532{
533 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
534
535 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
536 kfree(data);
537}
538
193e67c0
VG
539static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
540 unsigned long iova, size_t size,
541 arm_lpae_iopte blk_pte, int lvl,
542 arm_lpae_iopte *ptep)
e1d3c0fd 543{
fb3a9579
RM
544 struct io_pgtable_cfg *cfg = &data->iop.cfg;
545 arm_lpae_iopte pte, *tablep;
e1d3c0fd 546 phys_addr_t blk_paddr;
fb3a9579
RM
547 size_t tablesz = ARM_LPAE_GRANULE(data);
548 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
549 int i, unmap_idx = -1;
550
551 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
552 return 0;
e1d3c0fd 553
fb3a9579
RM
554 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
555 if (!tablep)
556 return 0; /* Bytes unmapped */
e1d3c0fd 557
fb3a9579
RM
558 if (size == split_sz)
559 unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
e1d3c0fd 560
6c89928f 561 blk_paddr = iopte_to_paddr(blk_pte, data);
fb3a9579
RM
562 pte = iopte_prot(blk_pte);
563
564 for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
e1d3c0fd 565 /* Unmap! */
fb3a9579 566 if (i == unmap_idx)
e1d3c0fd
WD
567 continue;
568
fb3a9579 569 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
e1d3c0fd
WD
570 }
571
2c3d273e
RM
572 pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
573 if (pte != blk_pte) {
574 __arm_lpae_free_pages(tablep, tablesz, cfg);
575 /*
576 * We may race against someone unmapping another part of this
577 * block, but anything else is invalid. We can't misinterpret
578 * a page entry here since we're never at the last level.
579 */
580 if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
581 return 0;
582
583 tablep = iopte_deref(pte, data);
85c7a0f1
RM
584 } else if (unmap_idx >= 0) {
585 io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
b6b65ca2 586 io_pgtable_tlb_sync(&data->iop);
85c7a0f1 587 return size;
2c3d273e 588 }
fb3a9579 589
85c7a0f1 590 return __arm_lpae_unmap(data, iova, size, lvl, tablep);
e1d3c0fd
WD
591}
592
193e67c0
VG
593static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
594 unsigned long iova, size_t size, int lvl,
595 arm_lpae_iopte *ptep)
e1d3c0fd
WD
596{
597 arm_lpae_iopte pte;
507e4c9d 598 struct io_pgtable *iop = &data->iop;
e1d3c0fd 599
2eb97c78
RM
600 /* Something went horribly wrong and we ran out of page table */
601 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
602 return 0;
603
e1d3c0fd 604 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
2c3d273e 605 pte = READ_ONCE(*ptep);
2eb97c78 606 if (WARN_ON(!pte))
e1d3c0fd
WD
607 return 0;
608
609 /* If the size matches this level, we're in the right place */
fb3a9579 610 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
507e4c9d 611 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
e1d3c0fd 612
d08d42de 613 if (!iopte_leaf(pte, lvl, iop->fmt)) {
e1d3c0fd 614 /* Also flush any partial walks */
507e4c9d
RM
615 io_pgtable_tlb_add_flush(iop, iova, size,
616 ARM_LPAE_GRANULE(data), false);
617 io_pgtable_tlb_sync(iop);
e1d3c0fd
WD
618 ptep = iopte_deref(pte, data);
619 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
b6b65ca2
ZL
620 } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
621 /*
622 * Order the PTE update against queueing the IOVA, to
623 * guarantee that a flush callback from a different CPU
624 * has observed it before the TLBIALL can be issued.
625 */
626 smp_wmb();
e1d3c0fd 627 } else {
507e4c9d 628 io_pgtable_tlb_add_flush(iop, iova, size, size, true);
e1d3c0fd
WD
629 }
630
631 return size;
d08d42de 632 } else if (iopte_leaf(pte, lvl, iop->fmt)) {
e1d3c0fd
WD
633 /*
634 * Insert a table at the next level to map the old region,
635 * minus the part we want to unmap
636 */
fb3a9579
RM
637 return arm_lpae_split_blk_unmap(data, iova, size, pte,
638 lvl + 1, ptep);
e1d3c0fd
WD
639 }
640
641 /* Keep on walkin' */
642 ptep = iopte_deref(pte, data);
643 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
644}
645
193e67c0
VG
646static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
647 size_t size)
e1d3c0fd 648{
e1d3c0fd 649 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
e1d3c0fd
WD
650 arm_lpae_iopte *ptep = data->pgd;
651 int lvl = ARM_LPAE_START_LVL(data);
652
76557391
RM
653 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
654 return 0;
655
32b12449 656 return __arm_lpae_unmap(data, iova, size, lvl, ptep);
e1d3c0fd
WD
657}
658
659static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
660 unsigned long iova)
661{
662 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
663 arm_lpae_iopte pte, *ptep = data->pgd;
664 int lvl = ARM_LPAE_START_LVL(data);
665
666 do {
667 /* Valid IOPTE pointer? */
668 if (!ptep)
669 return 0;
670
671 /* Grab the IOPTE we're interested in */
2c3d273e
RM
672 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
673 pte = READ_ONCE(*ptep);
e1d3c0fd
WD
674
675 /* Valid entry? */
676 if (!pte)
677 return 0;
678
679 /* Leaf entry? */
d08d42de 680 if (iopte_leaf(pte, lvl, data->iop.fmt))
e1d3c0fd
WD
681 goto found_translation;
682
683 /* Take it to the next level */
684 ptep = iopte_deref(pte, data);
685 } while (++lvl < ARM_LPAE_MAX_LEVELS);
686
687 /* Ran out of page tables to walk */
688 return 0;
689
690found_translation:
7c6d90e2 691 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
6c89928f 692 return iopte_to_paddr(pte, data) | iova;
e1d3c0fd
WD
693}
694
695static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
696{
6c89928f
RM
697 unsigned long granule, page_sizes;
698 unsigned int max_addr_bits = 48;
e1d3c0fd
WD
699
700 /*
701 * We need to restrict the supported page sizes to match the
702 * translation regime for a particular granule. Aim to match
703 * the CPU page size if possible, otherwise prefer smaller sizes.
704 * While we're at it, restrict the block sizes to match the
705 * chosen granule.
706 */
707 if (cfg->pgsize_bitmap & PAGE_SIZE)
708 granule = PAGE_SIZE;
709 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
710 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
711 else if (cfg->pgsize_bitmap & PAGE_MASK)
712 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
713 else
714 granule = 0;
715
716 switch (granule) {
717 case SZ_4K:
6c89928f 718 page_sizes = (SZ_4K | SZ_2M | SZ_1G);
e1d3c0fd
WD
719 break;
720 case SZ_16K:
6c89928f 721 page_sizes = (SZ_16K | SZ_32M);
e1d3c0fd
WD
722 break;
723 case SZ_64K:
6c89928f
RM
724 max_addr_bits = 52;
725 page_sizes = (SZ_64K | SZ_512M);
726 if (cfg->oas > 48)
727 page_sizes |= 1ULL << 42; /* 4TB */
e1d3c0fd
WD
728 break;
729 default:
6c89928f 730 page_sizes = 0;
e1d3c0fd 731 }
6c89928f
RM
732
733 cfg->pgsize_bitmap &= page_sizes;
734 cfg->ias = min(cfg->ias, max_addr_bits);
735 cfg->oas = min(cfg->oas, max_addr_bits);
e1d3c0fd
WD
736}
737
738static struct arm_lpae_io_pgtable *
739arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
740{
741 unsigned long va_bits, pgd_bits;
742 struct arm_lpae_io_pgtable *data;
743
744 arm_lpae_restrict_pgsizes(cfg);
745
746 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
747 return NULL;
748
749 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
750 return NULL;
751
752 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
753 return NULL;
754
ffcb6d16
RM
755 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
756 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
757 return NULL;
758 }
759
e1d3c0fd
WD
760 data = kmalloc(sizeof(*data), GFP_KERNEL);
761 if (!data)
762 return NULL;
763
764 data->pg_shift = __ffs(cfg->pgsize_bitmap);
765 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
766
767 va_bits = cfg->ias - data->pg_shift;
768 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
769
770 /* Calculate the actual size of our pgd (without concatenation) */
771 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
772 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
773
774 data->iop.ops = (struct io_pgtable_ops) {
775 .map = arm_lpae_map,
776 .unmap = arm_lpae_unmap,
777 .iova_to_phys = arm_lpae_iova_to_phys,
778 };
779
780 return data;
781}
782
783static struct io_pgtable *
784arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
785{
786 u64 reg;
3850db49
RM
787 struct arm_lpae_io_pgtable *data;
788
4f41845b 789 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
b6b65ca2 790 IO_PGTABLE_QUIRK_NON_STRICT))
3850db49 791 return NULL;
e1d3c0fd 792
3850db49 793 data = arm_lpae_alloc_pgtable(cfg);
e1d3c0fd
WD
794 if (!data)
795 return NULL;
796
797 /* TCR */
9e6ea59f
BA
798 if (cfg->coherent_walk) {
799 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
800 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
801 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
802 } else {
803 reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
804 (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
805 (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
806 }
e1d3c0fd 807
06c610e8 808 switch (ARM_LPAE_GRANULE(data)) {
e1d3c0fd
WD
809 case SZ_4K:
810 reg |= ARM_LPAE_TCR_TG0_4K;
811 break;
812 case SZ_16K:
813 reg |= ARM_LPAE_TCR_TG0_16K;
814 break;
815 case SZ_64K:
816 reg |= ARM_LPAE_TCR_TG0_64K;
817 break;
818 }
819
820 switch (cfg->oas) {
821 case 32:
822 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
823 break;
824 case 36:
825 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
826 break;
827 case 40:
828 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
829 break;
830 case 42:
831 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
832 break;
833 case 44:
834 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
835 break;
836 case 48:
837 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
838 break;
6c89928f
RM
839 case 52:
840 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
841 break;
e1d3c0fd
WD
842 default:
843 goto out_free_data;
844 }
845
846 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
63979b8d
WD
847
848 /* Disable speculative walks through TTBR1 */
849 reg |= ARM_LPAE_TCR_EPD1;
e1d3c0fd
WD
850 cfg->arm_lpae_s1_cfg.tcr = reg;
851
852 /* MAIRs */
853 reg = (ARM_LPAE_MAIR_ATTR_NC
854 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
855 (ARM_LPAE_MAIR_ATTR_WBRWA
856 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
857 (ARM_LPAE_MAIR_ATTR_DEVICE
90ec7a76
VG
858 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
859 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
860 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
e1d3c0fd
WD
861
862 cfg->arm_lpae_s1_cfg.mair[0] = reg;
863 cfg->arm_lpae_s1_cfg.mair[1] = 0;
864
865 /* Looking good; allocate a pgd */
f8d54961 866 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
e1d3c0fd
WD
867 if (!data->pgd)
868 goto out_free_data;
869
87a91b15
RM
870 /* Ensure the empty pgd is visible before any actual TTBR write */
871 wmb();
e1d3c0fd
WD
872
873 /* TTBRs */
874 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
875 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
876 return &data->iop;
877
878out_free_data:
879 kfree(data);
880 return NULL;
881}
882
883static struct io_pgtable *
884arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
885{
886 u64 reg, sl;
3850db49
RM
887 struct arm_lpae_io_pgtable *data;
888
889 /* The NS quirk doesn't apply at stage 2 */
4f41845b 890 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
3850db49 891 return NULL;
e1d3c0fd 892
3850db49 893 data = arm_lpae_alloc_pgtable(cfg);
e1d3c0fd
WD
894 if (!data)
895 return NULL;
896
897 /*
898 * Concatenate PGDs at level 1 if possible in order to reduce
899 * the depth of the stage-2 walk.
900 */
901 if (data->levels == ARM_LPAE_MAX_LEVELS) {
902 unsigned long pgd_pages;
903
904 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
905 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
906 data->pgd_size = pgd_pages << data->pg_shift;
907 data->levels--;
908 }
909 }
910
911 /* VTCR */
912 reg = ARM_64_LPAE_S2_TCR_RES1 |
913 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
914 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
915 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
916
917 sl = ARM_LPAE_START_LVL(data);
918
06c610e8 919 switch (ARM_LPAE_GRANULE(data)) {
e1d3c0fd
WD
920 case SZ_4K:
921 reg |= ARM_LPAE_TCR_TG0_4K;
922 sl++; /* SL0 format is different for 4K granule size */
923 break;
924 case SZ_16K:
925 reg |= ARM_LPAE_TCR_TG0_16K;
926 break;
927 case SZ_64K:
928 reg |= ARM_LPAE_TCR_TG0_64K;
929 break;
930 }
931
932 switch (cfg->oas) {
933 case 32:
934 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
935 break;
936 case 36:
937 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
938 break;
939 case 40:
940 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
941 break;
942 case 42:
943 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
944 break;
945 case 44:
946 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
947 break;
948 case 48:
949 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
950 break;
6c89928f
RM
951 case 52:
952 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
953 break;
e1d3c0fd
WD
954 default:
955 goto out_free_data;
956 }
957
958 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
959 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
960 cfg->arm_lpae_s2_cfg.vtcr = reg;
961
962 /* Allocate pgd pages */
f8d54961 963 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
e1d3c0fd
WD
964 if (!data->pgd)
965 goto out_free_data;
966
87a91b15
RM
967 /* Ensure the empty pgd is visible before any actual TTBR write */
968 wmb();
e1d3c0fd
WD
969
970 /* VTTBR */
971 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
972 return &data->iop;
973
974out_free_data:
975 kfree(data);
976 return NULL;
977}
978
979static struct io_pgtable *
980arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
981{
982 struct io_pgtable *iop;
983
984 if (cfg->ias > 32 || cfg->oas > 40)
985 return NULL;
986
987 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
988 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
989 if (iop) {
990 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
991 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
992 }
993
994 return iop;
995}
996
997static struct io_pgtable *
998arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
999{
1000 struct io_pgtable *iop;
1001
1002 if (cfg->ias > 40 || cfg->oas > 40)
1003 return NULL;
1004
1005 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1006 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1007 if (iop)
1008 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
1009
1010 return iop;
1011}
1012
d08d42de
RH
1013static struct io_pgtable *
1014arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1015{
1016 struct io_pgtable *iop;
1017
1018 if (cfg->ias != 48 || cfg->oas > 40)
1019 return NULL;
1020
1021 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1022 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1023 if (iop) {
1024 u64 mair, ttbr;
1025
1026 /* Copy values as union fields overlap */
1027 mair = cfg->arm_lpae_s1_cfg.mair[0];
1028 ttbr = cfg->arm_lpae_s1_cfg.ttbr[0];
1029
1030 cfg->arm_mali_lpae_cfg.memattr = mair;
1031 cfg->arm_mali_lpae_cfg.transtab = ttbr |
1032 ARM_MALI_LPAE_TTBR_READ_INNER |
1033 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1034 }
1035
1036 return iop;
1037}
1038
e1d3c0fd
WD
1039struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1040 .alloc = arm_64_lpae_alloc_pgtable_s1,
1041 .free = arm_lpae_free_pgtable,
1042};
1043
1044struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1045 .alloc = arm_64_lpae_alloc_pgtable_s2,
1046 .free = arm_lpae_free_pgtable,
1047};
1048
1049struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1050 .alloc = arm_32_lpae_alloc_pgtable_s1,
1051 .free = arm_lpae_free_pgtable,
1052};
1053
1054struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1055 .alloc = arm_32_lpae_alloc_pgtable_s2,
1056 .free = arm_lpae_free_pgtable,
1057};
fe4b991d 1058
d08d42de
RH
1059struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1060 .alloc = arm_mali_lpae_alloc_pgtable,
1061 .free = arm_lpae_free_pgtable,
1062};
1063
fe4b991d
WD
1064#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1065
1066static struct io_pgtable_cfg *cfg_cookie;
1067
1068static void dummy_tlb_flush_all(void *cookie)
1069{
1070 WARN_ON(cookie != cfg_cookie);
1071}
1072
06c610e8
RM
1073static void dummy_tlb_add_flush(unsigned long iova, size_t size,
1074 size_t granule, bool leaf, void *cookie)
fe4b991d
WD
1075{
1076 WARN_ON(cookie != cfg_cookie);
1077 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1078}
1079
1080static void dummy_tlb_sync(void *cookie)
1081{
1082 WARN_ON(cookie != cfg_cookie);
1083}
1084
dfed5f01 1085static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
fe4b991d
WD
1086 .tlb_flush_all = dummy_tlb_flush_all,
1087 .tlb_add_flush = dummy_tlb_add_flush,
1088 .tlb_sync = dummy_tlb_sync,
fe4b991d
WD
1089};
1090
1091static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1092{
1093 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1094 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1095
1096 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1097 cfg->pgsize_bitmap, cfg->ias);
1098 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1099 data->levels, data->pgd_size, data->pg_shift,
1100 data->bits_per_level, data->pgd);
1101}
1102
1103#define __FAIL(ops, i) ({ \
1104 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1105 arm_lpae_dump_ops(ops); \
1106 selftest_running = false; \
1107 -EFAULT; \
1108})
1109
1110static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1111{
1112 static const enum io_pgtable_fmt fmts[] = {
1113 ARM_64_LPAE_S1,
1114 ARM_64_LPAE_S2,
1115 };
1116
1117 int i, j;
1118 unsigned long iova;
1119 size_t size;
1120 struct io_pgtable_ops *ops;
1121
1122 selftest_running = true;
1123
1124 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1125 cfg_cookie = cfg;
1126 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1127 if (!ops) {
1128 pr_err("selftest: failed to allocate io pgtable ops\n");
1129 return -ENOMEM;
1130 }
1131
1132 /*
1133 * Initial sanity checks.
1134 * Empty page tables shouldn't provide any translations.
1135 */
1136 if (ops->iova_to_phys(ops, 42))
1137 return __FAIL(ops, i);
1138
1139 if (ops->iova_to_phys(ops, SZ_1G + 42))
1140 return __FAIL(ops, i);
1141
1142 if (ops->iova_to_phys(ops, SZ_2G + 42))
1143 return __FAIL(ops, i);
1144
1145 /*
1146 * Distinct mappings of different granule sizes.
1147 */
1148 iova = 0;
4ae8a5c5 1149 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
fe4b991d
WD
1150 size = 1UL << j;
1151
1152 if (ops->map(ops, iova, iova, size, IOMMU_READ |
1153 IOMMU_WRITE |
1154 IOMMU_NOEXEC |
1155 IOMMU_CACHE))
1156 return __FAIL(ops, i);
1157
1158 /* Overlapping mappings */
1159 if (!ops->map(ops, iova, iova + size, size,
1160 IOMMU_READ | IOMMU_NOEXEC))
1161 return __FAIL(ops, i);
1162
1163 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1164 return __FAIL(ops, i);
1165
1166 iova += SZ_1G;
fe4b991d
WD
1167 }
1168
1169 /* Partial unmap */
1170 size = 1UL << __ffs(cfg->pgsize_bitmap);
1171 if (ops->unmap(ops, SZ_1G + size, size) != size)
1172 return __FAIL(ops, i);
1173
1174 /* Remap of partial unmap */
1175 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1176 return __FAIL(ops, i);
1177
1178 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1179 return __FAIL(ops, i);
1180
1181 /* Full unmap */
1182 iova = 0;
f793b13e 1183 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
fe4b991d
WD
1184 size = 1UL << j;
1185
1186 if (ops->unmap(ops, iova, size) != size)
1187 return __FAIL(ops, i);
1188
1189 if (ops->iova_to_phys(ops, iova + 42))
1190 return __FAIL(ops, i);
1191
1192 /* Remap full block */
1193 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1194 return __FAIL(ops, i);
1195
1196 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1197 return __FAIL(ops, i);
1198
1199 iova += SZ_1G;
fe4b991d
WD
1200 }
1201
1202 free_io_pgtable_ops(ops);
1203 }
1204
1205 selftest_running = false;
1206 return 0;
1207}
1208
1209static int __init arm_lpae_do_selftests(void)
1210{
1211 static const unsigned long pgsize[] = {
1212 SZ_4K | SZ_2M | SZ_1G,
1213 SZ_16K | SZ_32M,
1214 SZ_64K | SZ_512M,
1215 };
1216
1217 static const unsigned int ias[] = {
1218 32, 36, 40, 42, 44, 48,
1219 };
1220
1221 int i, j, pass = 0, fail = 0;
1222 struct io_pgtable_cfg cfg = {
1223 .tlb = &dummy_tlb_ops,
1224 .oas = 48,
4f41845b 1225 .coherent_walk = true,
fe4b991d
WD
1226 };
1227
1228 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1229 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1230 cfg.pgsize_bitmap = pgsize[i];
1231 cfg.ias = ias[j];
1232 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1233 pgsize[i], ias[j]);
1234 if (arm_lpae_run_tests(&cfg))
1235 fail++;
1236 else
1237 pass++;
1238 }
1239 }
1240
1241 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1242 return fail ? -EFAULT : 0;
1243}
1244subsys_initcall(arm_lpae_do_selftests);
1245#endif