Merge tag 'drm-next-2022-12-23' of git://anongit.freedesktop.org/drm/drm
[linux-2.6-block.git] / drivers / iommu / io-pgtable-arm.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
e1d3c0fd
WD
2/*
3 * CPU-agnostic ARM page table allocator.
4 *
e1d3c0fd
WD
5 * Copyright (C) 2014 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 */
9
10#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
11
2c3d273e 12#include <linux/atomic.h>
6c89928f 13#include <linux/bitops.h>
b77cf11f 14#include <linux/io-pgtable.h>
e1d3c0fd
WD
15#include <linux/kernel.h>
16#include <linux/sizes.h>
17#include <linux/slab.h>
18#include <linux/types.h>
8f6aff98 19#include <linux/dma-mapping.h>
e1d3c0fd 20
87a91b15
RM
21#include <asm/barrier.h>
22
7cef39dd
JPB
23#include "io-pgtable-arm.h"
24
6c89928f 25#define ARM_LPAE_MAX_ADDR_BITS 52
e1d3c0fd
WD
26#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
27#define ARM_LPAE_MAX_LEVELS 4
28
29/* Struct accessors */
30#define io_pgtable_to_data(x) \
31 container_of((x), struct arm_lpae_io_pgtable, iop)
32
e1d3c0fd
WD
33#define io_pgtable_ops_to_data(x) \
34 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
35
e1d3c0fd
WD
36/*
37 * Calculate the right shift amount to get to the portion describing level l
38 * in a virtual address mapped by the pagetable in d.
39 */
40#define ARM_LPAE_LVL_SHIFT(l,d) \
5fb190b0
RM
41 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
42 ilog2(sizeof(arm_lpae_iopte)))
e1d3c0fd 43
5fb190b0
RM
44#define ARM_LPAE_GRANULE(d) \
45 (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
c79278c1
RM
46#define ARM_LPAE_PGD_SIZE(d) \
47 (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
e1d3c0fd 48
1fe27be5
IM
49#define ARM_LPAE_PTES_PER_TABLE(d) \
50 (ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
51
e1d3c0fd
WD
52/*
53 * Calculate the index at level l used to map virtual address a using the
54 * pagetable in d.
55 */
56#define ARM_LPAE_PGD_IDX(l,d) \
c79278c1 57 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
e1d3c0fd
WD
58
59#define ARM_LPAE_LVL_IDX(a,l,d) \
367bd978 60 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
e1d3c0fd
WD
61 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
62
63/* Calculate the block/page mapping size at level l for pagetable in d. */
5fb190b0 64#define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d))
e1d3c0fd
WD
65
66/* Page table bits */
67#define ARM_LPAE_PTE_TYPE_SHIFT 0
68#define ARM_LPAE_PTE_TYPE_MASK 0x3
69
70#define ARM_LPAE_PTE_TYPE_BLOCK 1
71#define ARM_LPAE_PTE_TYPE_TABLE 3
72#define ARM_LPAE_PTE_TYPE_PAGE 3
73
6c89928f
RM
74#define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
75
c896c132 76#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
e1d3c0fd
WD
77#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
78#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
79#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
80#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
81#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
c896c132 82#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
e1d3c0fd
WD
83#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
84
85#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
86/* Ignore the contiguous bit for block splitting */
87#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
88#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
89 ARM_LPAE_PTE_ATTR_HI_MASK)
2c3d273e
RM
90/* Software bit for solving coherency races */
91#define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
e1d3c0fd
WD
92
93/* Stage-1 PTE */
94#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
95#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
96#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
97#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
98
99/* Stage-2 PTE */
100#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
101#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
102#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
103#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
104#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
105#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
106
107/* Register bits */
fb485eb1 108#define ARM_LPAE_VTCR_SL0_MASK 0x3
e1d3c0fd
WD
109
110#define ARM_LPAE_TCR_T0SZ_SHIFT 0
e1d3c0fd 111
fb485eb1
RM
112#define ARM_LPAE_VTCR_PS_SHIFT 16
113#define ARM_LPAE_VTCR_PS_MASK 0x7
e1d3c0fd 114
e1d3c0fd
WD
115#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
116#define ARM_LPAE_MAIR_ATTR_MASK 0xff
117#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
118#define ARM_LPAE_MAIR_ATTR_NC 0x44
90ec7a76 119#define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
e1d3c0fd
WD
120#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
121#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
122#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
123#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
90ec7a76 124#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
e1d3c0fd 125
d08d42de
RH
126#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
127#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
128#define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
129
52f325f4
RM
130#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
131#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
132
e1d3c0fd 133/* IOPTE accessors */
6c89928f 134#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
e1d3c0fd 135
f37eb484 136#define iopte_type(pte) \
e1d3c0fd
WD
137 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
138
139#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
140
e1d3c0fd
WD
141struct arm_lpae_io_pgtable {
142 struct io_pgtable iop;
143
c79278c1 144 int pgd_bits;
594ab90f 145 int start_level;
5fb190b0 146 int bits_per_level;
e1d3c0fd
WD
147
148 void *pgd;
149};
150
151typedef u64 arm_lpae_iopte;
152
d08d42de
RH
153static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
154 enum io_pgtable_fmt fmt)
155{
156 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
f37eb484 157 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
d08d42de 158
f37eb484 159 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
d08d42de
RH
160}
161
6c89928f
RM
162static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
163 struct arm_lpae_io_pgtable *data)
164{
165 arm_lpae_iopte pte = paddr;
166
167 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
168 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
169}
170
171static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
172 struct arm_lpae_io_pgtable *data)
173{
78688059 174 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
6c89928f 175
5fb190b0 176 if (ARM_LPAE_GRANULE(data) < SZ_64K)
6c89928f
RM
177 return paddr;
178
179 /* Rotate the packed high-order bits back to the top */
180 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
181}
182
fe4b991d
WD
183static bool selftest_running = false;
184
ffcb6d16 185static dma_addr_t __arm_lpae_dma_addr(void *pages)
f8d54961 186{
ffcb6d16 187 return (dma_addr_t)virt_to_phys(pages);
f8d54961
RM
188}
189
190static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
191 struct io_pgtable_cfg *cfg)
192{
193 struct device *dev = cfg->iommu_dev;
4b123757
RM
194 int order = get_order(size);
195 struct page *p;
f8d54961 196 dma_addr_t dma;
4b123757 197 void *pages;
f8d54961 198
4b123757 199 VM_BUG_ON((gfp & __GFP_HIGHMEM));
ca25ec24 200 p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
4b123757 201 if (!p)
f8d54961
RM
202 return NULL;
203
4b123757 204 pages = page_address(p);
4f41845b 205 if (!cfg->coherent_walk) {
f8d54961
RM
206 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
207 if (dma_mapping_error(dev, dma))
208 goto out_free;
209 /*
210 * We depend on the IOMMU being able to work with any physical
ffcb6d16
RM
211 * address directly, so if the DMA layer suggests otherwise by
212 * translating or truncating them, that bodes very badly...
f8d54961 213 */
ffcb6d16 214 if (dma != virt_to_phys(pages))
f8d54961
RM
215 goto out_unmap;
216 }
217
218 return pages;
219
220out_unmap:
221 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
222 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
223out_free:
4b123757 224 __free_pages(p, order);
f8d54961
RM
225 return NULL;
226}
227
228static void __arm_lpae_free_pages(void *pages, size_t size,
229 struct io_pgtable_cfg *cfg)
230{
4f41845b 231 if (!cfg->coherent_walk)
ffcb6d16 232 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
f8d54961 233 size, DMA_TO_DEVICE);
4b123757 234 free_pages((unsigned long)pages, get_order(size));
f8d54961
RM
235}
236
41e1eb25 237static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
2c3d273e
RM
238 struct io_pgtable_cfg *cfg)
239{
240 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
41e1eb25 241 sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
2c3d273e
RM
242}
243
1fe27be5 244static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg)
f8d54961 245{
41e1eb25 246
1fe27be5 247 *ptep = 0;
f8d54961 248
4f41845b 249 if (!cfg->coherent_walk)
1fe27be5 250 __arm_lpae_sync_pte(ptep, 1, cfg);
f8d54961
RM
251}
252
193e67c0 253static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
3951c41a 254 struct iommu_iotlb_gather *gather,
1fe27be5
IM
255 unsigned long iova, size_t size, size_t pgcount,
256 int lvl, arm_lpae_iopte *ptep);
cf27ec93 257
fb3a9579
RM
258static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
259 phys_addr_t paddr, arm_lpae_iopte prot,
41e1eb25 260 int lvl, int num_entries, arm_lpae_iopte *ptep)
fb3a9579
RM
261{
262 arm_lpae_iopte pte = prot;
41e1eb25
IM
263 struct io_pgtable_cfg *cfg = &data->iop.cfg;
264 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
265 int i;
fb3a9579 266
d08d42de 267 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
fb3a9579
RM
268 pte |= ARM_LPAE_PTE_TYPE_PAGE;
269 else
270 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
271
41e1eb25
IM
272 for (i = 0; i < num_entries; i++)
273 ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
fb3a9579 274
41e1eb25
IM
275 if (!cfg->coherent_walk)
276 __arm_lpae_sync_pte(ptep, num_entries, cfg);
fb3a9579
RM
277}
278
e1d3c0fd
WD
279static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
280 unsigned long iova, phys_addr_t paddr,
41e1eb25 281 arm_lpae_iopte prot, int lvl, int num_entries,
e1d3c0fd
WD
282 arm_lpae_iopte *ptep)
283{
41e1eb25
IM
284 int i;
285
286 for (i = 0; i < num_entries; i++)
287 if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
288 /* We require an unmap first */
289 WARN_ON(!selftest_running);
290 return -EEXIST;
291 } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
292 /*
293 * We need to unmap and free the old table before
294 * overwriting it with a block entry.
295 */
296 arm_lpae_iopte *tblp;
297 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
298
299 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
1fe27be5 300 if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
41e1eb25
IM
301 lvl, tblp) != sz) {
302 WARN_ON(1);
303 return -EINVAL;
304 }
3951c41a 305 }
e1d3c0fd 306
41e1eb25 307 __arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
fb3a9579
RM
308 return 0;
309}
c896c132 310
fb3a9579
RM
311static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
312 arm_lpae_iopte *ptep,
2c3d273e 313 arm_lpae_iopte curr,
9abe2ac8 314 struct arm_lpae_io_pgtable *data)
fb3a9579 315{
2c3d273e 316 arm_lpae_iopte old, new;
9abe2ac8 317 struct io_pgtable_cfg *cfg = &data->iop.cfg;
e1d3c0fd 318
9abe2ac8 319 new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
fb3a9579
RM
320 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
321 new |= ARM_LPAE_PTE_NSTABLE;
e1d3c0fd 322
77f34458
WD
323 /*
324 * Ensure the table itself is visible before its PTE can be.
325 * Whilst we could get away with cmpxchg64_release below, this
326 * doesn't have any ordering semantics when !CONFIG_SMP.
327 */
328 dma_wmb();
2c3d273e
RM
329
330 old = cmpxchg64_relaxed(ptep, curr, new);
331
4f41845b 332 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
2c3d273e
RM
333 return old;
334
335 /* Even if it's not ours, there's no point waiting; just kick it */
41e1eb25 336 __arm_lpae_sync_pte(ptep, 1, cfg);
2c3d273e
RM
337 if (old == curr)
338 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
339
340 return old;
e1d3c0fd
WD
341}
342
343static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
4a77b12d
IM
344 phys_addr_t paddr, size_t size, size_t pgcount,
345 arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
346 gfp_t gfp, size_t *mapped)
e1d3c0fd
WD
347{
348 arm_lpae_iopte *cptep, pte;
e1d3c0fd 349 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
2c3d273e 350 size_t tblsz = ARM_LPAE_GRANULE(data);
f8d54961 351 struct io_pgtable_cfg *cfg = &data->iop.cfg;
4a77b12d 352 int ret = 0, num_entries, max_entries, map_idx_start;
e1d3c0fd
WD
353
354 /* Find our entry at the current level */
4a77b12d
IM
355 map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
356 ptep += map_idx_start;
e1d3c0fd
WD
357
358 /* If we can install a leaf entry at this level, then do so */
4a77b12d
IM
359 if (size == block_size) {
360 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start;
361 num_entries = min_t(int, pgcount, max_entries);
362 ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
99cbb8e4 363 if (!ret)
4a77b12d
IM
364 *mapped += num_entries * size;
365
366 return ret;
367 }
e1d3c0fd
WD
368
369 /* We can't allocate tables at the final level */
370 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
371 return -EINVAL;
372
373 /* Grab a pointer to the next level */
2c3d273e 374 pte = READ_ONCE(*ptep);
e1d3c0fd 375 if (!pte) {
f34ce7a7 376 cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
e1d3c0fd
WD
377 if (!cptep)
378 return -ENOMEM;
379
9abe2ac8 380 pte = arm_lpae_install_table(cptep, ptep, 0, data);
2c3d273e
RM
381 if (pte)
382 __arm_lpae_free_pages(cptep, tblsz, cfg);
4f41845b 383 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
41e1eb25 384 __arm_lpae_sync_pte(ptep, 1, cfg);
2c3d273e
RM
385 }
386
d08d42de 387 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
e1d3c0fd 388 cptep = iopte_deref(pte, data);
2c3d273e 389 } else if (pte) {
ed46e66c
OT
390 /* We require an unmap first */
391 WARN_ON(!selftest_running);
392 return -EEXIST;
e1d3c0fd
WD
393 }
394
395 /* Rinse, repeat */
4a77b12d
IM
396 return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
397 cptep, gfp, mapped);
e1d3c0fd
WD
398}
399
400static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
401 int prot)
402{
403 arm_lpae_iopte pte;
404
405 if (data->iop.fmt == ARM_64_LPAE_S1 ||
406 data->iop.fmt == ARM_32_LPAE_S1) {
e7468a23 407 pte = ARM_LPAE_PTE_nG;
e1d3c0fd
WD
408 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
409 pte |= ARM_LPAE_PTE_AP_RDONLY;
e7468a23
JG
410 if (!(prot & IOMMU_PRIV))
411 pte |= ARM_LPAE_PTE_AP_UNPRIV;
e1d3c0fd
WD
412 } else {
413 pte = ARM_LPAE_PTE_HAP_FAULT;
414 if (prot & IOMMU_READ)
415 pte |= ARM_LPAE_PTE_HAP_READ;
416 if (prot & IOMMU_WRITE)
417 pte |= ARM_LPAE_PTE_HAP_WRITE;
d08d42de
RH
418 }
419
420 /*
421 * Note that this logic is structured to accommodate Mali LPAE
422 * having stage-1-like attributes but stage-2-like permissions.
423 */
424 if (data->iop.fmt == ARM_64_LPAE_S2 ||
425 data->iop.fmt == ARM_32_LPAE_S2) {
fb948251
RM
426 if (prot & IOMMU_MMIO)
427 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
428 else if (prot & IOMMU_CACHE)
e1d3c0fd
WD
429 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
430 else
431 pte |= ARM_LPAE_PTE_MEMATTR_NC;
d08d42de
RH
432 } else {
433 if (prot & IOMMU_MMIO)
434 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
435 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
436 else if (prot & IOMMU_CACHE)
437 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
438 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
e1d3c0fd
WD
439 }
440
728da60d
RM
441 /*
442 * Also Mali has its own notions of shareability wherein its Inner
443 * domain covers the cores within the GPU, and its Outer domain is
444 * "outside the GPU" (i.e. either the Inner or System domain in CPU
445 * terms, depending on coherency).
446 */
447 if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
7618e479
RM
448 pte |= ARM_LPAE_PTE_SH_IS;
449 else
450 pte |= ARM_LPAE_PTE_SH_OS;
451
e1d3c0fd
WD
452 if (prot & IOMMU_NOEXEC)
453 pte |= ARM_LPAE_PTE_XN;
454
7618e479
RM
455 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
456 pte |= ARM_LPAE_PTE_NS;
457
458 if (data->iop.fmt != ARM_MALI_LPAE)
459 pte |= ARM_LPAE_PTE_AF;
460
e1d3c0fd
WD
461 return pte;
462}
463
4a77b12d
IM
464static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
465 phys_addr_t paddr, size_t pgsize, size_t pgcount,
466 int iommu_prot, gfp_t gfp, size_t *mapped)
e1d3c0fd
WD
467{
468 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
f7b90d2c 469 struct io_pgtable_cfg *cfg = &data->iop.cfg;
e1d3c0fd 470 arm_lpae_iopte *ptep = data->pgd;
594ab90f 471 int ret, lvl = data->start_level;
e1d3c0fd 472 arm_lpae_iopte prot;
08090744 473 long iaext = (s64)iova >> cfg->ias;
e1d3c0fd 474
4a77b12d 475 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
f7b90d2c
RM
476 return -EINVAL;
477
db690301
RM
478 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
479 iaext = ~iaext;
480 if (WARN_ON(iaext || paddr >> cfg->oas))
76557391
RM
481 return -ERANGE;
482
f12e0d22
KZ
483 /* If no access, then nothing to do */
484 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
485 return 0;
486
e1d3c0fd 487 prot = arm_lpae_prot_to_pte(data, iommu_prot);
4a77b12d
IM
488 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
489 ptep, gfp, mapped);
87a91b15
RM
490 /*
491 * Synchronise all PTE updates for the new mapping before there's
492 * a chance for anything to kick off a table walk for the new iova.
493 */
494 wmb();
495
496 return ret;
e1d3c0fd
WD
497}
498
499static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
500 arm_lpae_iopte *ptep)
501{
502 arm_lpae_iopte *start, *end;
503 unsigned long table_size;
504
594ab90f 505 if (lvl == data->start_level)
c79278c1 506 table_size = ARM_LPAE_PGD_SIZE(data);
e1d3c0fd 507 else
06c610e8 508 table_size = ARM_LPAE_GRANULE(data);
e1d3c0fd
WD
509
510 start = ptep;
12c2ab09
WD
511
512 /* Only leaf entries at the last level */
513 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
514 end = ptep;
515 else
516 end = (void *)ptep + table_size;
e1d3c0fd
WD
517
518 while (ptep != end) {
519 arm_lpae_iopte pte = *ptep++;
520
d08d42de 521 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
e1d3c0fd
WD
522 continue;
523
524 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
525 }
526
f8d54961 527 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
e1d3c0fd
WD
528}
529
530static void arm_lpae_free_pgtable(struct io_pgtable *iop)
531{
532 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
533
594ab90f 534 __arm_lpae_free_pgtable(data, data->start_level, data->pgd);
e1d3c0fd
WD
535 kfree(data);
536}
537
193e67c0 538static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
3951c41a 539 struct iommu_iotlb_gather *gather,
193e67c0
VG
540 unsigned long iova, size_t size,
541 arm_lpae_iopte blk_pte, int lvl,
1fe27be5 542 arm_lpae_iopte *ptep, size_t pgcount)
e1d3c0fd 543{
fb3a9579
RM
544 struct io_pgtable_cfg *cfg = &data->iop.cfg;
545 arm_lpae_iopte pte, *tablep;
e1d3c0fd 546 phys_addr_t blk_paddr;
fb3a9579
RM
547 size_t tablesz = ARM_LPAE_GRANULE(data);
548 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
1fe27be5
IM
549 int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
550 int i, unmap_idx_start = -1, num_entries = 0, max_entries;
fb3a9579
RM
551
552 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
553 return 0;
e1d3c0fd 554
fb3a9579
RM
555 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
556 if (!tablep)
557 return 0; /* Bytes unmapped */
e1d3c0fd 558
1fe27be5
IM
559 if (size == split_sz) {
560 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
561 max_entries = ptes_per_table - unmap_idx_start;
562 num_entries = min_t(int, pgcount, max_entries);
563 }
e1d3c0fd 564
6c89928f 565 blk_paddr = iopte_to_paddr(blk_pte, data);
fb3a9579
RM
566 pte = iopte_prot(blk_pte);
567
1fe27be5 568 for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) {
e1d3c0fd 569 /* Unmap! */
1fe27be5 570 if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
e1d3c0fd
WD
571 continue;
572
41e1eb25 573 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
e1d3c0fd
WD
574 }
575
9abe2ac8 576 pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
2c3d273e
RM
577 if (pte != blk_pte) {
578 __arm_lpae_free_pages(tablep, tablesz, cfg);
579 /*
580 * We may race against someone unmapping another part of this
581 * block, but anything else is invalid. We can't misinterpret
582 * a page entry here since we're never at the last level.
583 */
f37eb484 584 if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE)
2c3d273e
RM
585 return 0;
586
587 tablep = iopte_deref(pte, data);
1fe27be5
IM
588 } else if (unmap_idx_start >= 0) {
589 for (i = 0; i < num_entries; i++)
590 io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);
591
592 return num_entries * size;
2c3d273e 593 }
fb3a9579 594
1fe27be5 595 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
e1d3c0fd
WD
596}
597
193e67c0 598static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
3951c41a 599 struct iommu_iotlb_gather *gather,
1fe27be5
IM
600 unsigned long iova, size_t size, size_t pgcount,
601 int lvl, arm_lpae_iopte *ptep)
e1d3c0fd
WD
602{
603 arm_lpae_iopte pte;
507e4c9d 604 struct io_pgtable *iop = &data->iop;
1fe27be5 605 int i = 0, num_entries, max_entries, unmap_idx_start;
e1d3c0fd 606
2eb97c78
RM
607 /* Something went horribly wrong and we ran out of page table */
608 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
609 return 0;
610
1fe27be5
IM
611 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
612 ptep += unmap_idx_start;
2c3d273e 613 pte = READ_ONCE(*ptep);
2eb97c78 614 if (WARN_ON(!pte))
e1d3c0fd
WD
615 return 0;
616
617 /* If the size matches this level, we're in the right place */
fb3a9579 618 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
1fe27be5
IM
619 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
620 num_entries = min_t(int, pgcount, max_entries);
621
622 while (i < num_entries) {
623 pte = READ_ONCE(*ptep);
624 if (WARN_ON(!pte))
625 break;
626
627 __arm_lpae_clear_pte(ptep, &iop->cfg);
628
629 if (!iopte_leaf(pte, lvl, iop->fmt)) {
630 /* Also flush any partial walks */
631 io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
632 ARM_LPAE_GRANULE(data));
633 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
f7403abf 634 } else if (!iommu_iotlb_gather_queued(gather)) {
1fe27be5
IM
635 io_pgtable_tlb_add_page(iop, gather, iova + i * size, size);
636 }
637
638 ptep++;
639 i++;
e1d3c0fd
WD
640 }
641
1fe27be5 642 return i * size;
d08d42de 643 } else if (iopte_leaf(pte, lvl, iop->fmt)) {
e1d3c0fd
WD
644 /*
645 * Insert a table at the next level to map the old region,
646 * minus the part we want to unmap
647 */
3951c41a 648 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
1fe27be5 649 lvl + 1, ptep, pgcount);
e1d3c0fd
WD
650 }
651
652 /* Keep on walkin' */
653 ptep = iopte_deref(pte, data);
1fe27be5 654 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
e1d3c0fd
WD
655}
656
1fe27be5
IM
657static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
658 size_t pgsize, size_t pgcount,
659 struct iommu_iotlb_gather *gather)
e1d3c0fd 660{
e1d3c0fd 661 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
f7b90d2c 662 struct io_pgtable_cfg *cfg = &data->iop.cfg;
e1d3c0fd 663 arm_lpae_iopte *ptep = data->pgd;
08090744 664 long iaext = (s64)iova >> cfg->ias;
e1d3c0fd 665
1fe27be5 666 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
f7b90d2c
RM
667 return 0;
668
db690301
RM
669 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
670 iaext = ~iaext;
671 if (WARN_ON(iaext))
76557391
RM
672 return 0;
673
1fe27be5
IM
674 return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
675 data->start_level, ptep);
676}
677
e1d3c0fd
WD
678static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
679 unsigned long iova)
680{
681 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
682 arm_lpae_iopte pte, *ptep = data->pgd;
594ab90f 683 int lvl = data->start_level;
e1d3c0fd
WD
684
685 do {
686 /* Valid IOPTE pointer? */
687 if (!ptep)
688 return 0;
689
690 /* Grab the IOPTE we're interested in */
2c3d273e
RM
691 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
692 pte = READ_ONCE(*ptep);
e1d3c0fd
WD
693
694 /* Valid entry? */
695 if (!pte)
696 return 0;
697
698 /* Leaf entry? */
d08d42de 699 if (iopte_leaf(pte, lvl, data->iop.fmt))
e1d3c0fd
WD
700 goto found_translation;
701
702 /* Take it to the next level */
703 ptep = iopte_deref(pte, data);
704 } while (++lvl < ARM_LPAE_MAX_LEVELS);
705
706 /* Ran out of page tables to walk */
707 return 0;
708
709found_translation:
7c6d90e2 710 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
6c89928f 711 return iopte_to_paddr(pte, data) | iova;
e1d3c0fd
WD
712}
713
714static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
715{
6c89928f
RM
716 unsigned long granule, page_sizes;
717 unsigned int max_addr_bits = 48;
e1d3c0fd
WD
718
719 /*
720 * We need to restrict the supported page sizes to match the
721 * translation regime for a particular granule. Aim to match
722 * the CPU page size if possible, otherwise prefer smaller sizes.
723 * While we're at it, restrict the block sizes to match the
724 * chosen granule.
725 */
726 if (cfg->pgsize_bitmap & PAGE_SIZE)
727 granule = PAGE_SIZE;
728 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
729 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
730 else if (cfg->pgsize_bitmap & PAGE_MASK)
731 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
732 else
733 granule = 0;
734
735 switch (granule) {
736 case SZ_4K:
6c89928f 737 page_sizes = (SZ_4K | SZ_2M | SZ_1G);
e1d3c0fd
WD
738 break;
739 case SZ_16K:
6c89928f 740 page_sizes = (SZ_16K | SZ_32M);
e1d3c0fd
WD
741 break;
742 case SZ_64K:
6c89928f
RM
743 max_addr_bits = 52;
744 page_sizes = (SZ_64K | SZ_512M);
745 if (cfg->oas > 48)
746 page_sizes |= 1ULL << 42; /* 4TB */
e1d3c0fd
WD
747 break;
748 default:
6c89928f 749 page_sizes = 0;
e1d3c0fd 750 }
6c89928f
RM
751
752 cfg->pgsize_bitmap &= page_sizes;
753 cfg->ias = min(cfg->ias, max_addr_bits);
754 cfg->oas = min(cfg->oas, max_addr_bits);
e1d3c0fd
WD
755}
756
757static struct arm_lpae_io_pgtable *
758arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
759{
e1d3c0fd 760 struct arm_lpae_io_pgtable *data;
5fb190b0 761 int levels, va_bits, pg_shift;
e1d3c0fd
WD
762
763 arm_lpae_restrict_pgsizes(cfg);
764
765 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
766 return NULL;
767
768 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
769 return NULL;
770
771 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
772 return NULL;
773
774 data = kmalloc(sizeof(*data), GFP_KERNEL);
775 if (!data)
776 return NULL;
777
5fb190b0
RM
778 pg_shift = __ffs(cfg->pgsize_bitmap);
779 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
e1d3c0fd 780
5fb190b0 781 va_bits = cfg->ias - pg_shift;
594ab90f
RM
782 levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
783 data->start_level = ARM_LPAE_MAX_LEVELS - levels;
e1d3c0fd
WD
784
785 /* Calculate the actual size of our pgd (without concatenation) */
c79278c1 786 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
e1d3c0fd
WD
787
788 data->iop.ops = (struct io_pgtable_ops) {
4a77b12d 789 .map_pages = arm_lpae_map_pages,
1fe27be5 790 .unmap_pages = arm_lpae_unmap_pages,
e1d3c0fd
WD
791 .iova_to_phys = arm_lpae_iova_to_phys,
792 };
793
794 return data;
795}
796
797static struct io_pgtable *
798arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
799{
800 u64 reg;
3850db49 801 struct arm_lpae_io_pgtable *data;
fb485eb1 802 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
db690301 803 bool tg1;
3850db49 804
4f41845b 805 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
e67890c9
SPR
806 IO_PGTABLE_QUIRK_ARM_TTBR1 |
807 IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
3850db49 808 return NULL;
e1d3c0fd 809
3850db49 810 data = arm_lpae_alloc_pgtable(cfg);
e1d3c0fd
WD
811 if (!data)
812 return NULL;
813
814 /* TCR */
9e6ea59f 815 if (cfg->coherent_walk) {
fb485eb1
RM
816 tcr->sh = ARM_LPAE_TCR_SH_IS;
817 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
818 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
e67890c9
SPR
819 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
820 goto out_free_data;
9e6ea59f 821 } else {
fb485eb1
RM
822 tcr->sh = ARM_LPAE_TCR_SH_OS;
823 tcr->irgn = ARM_LPAE_TCR_RGN_NC;
e67890c9
SPR
824 if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
825 tcr->orgn = ARM_LPAE_TCR_RGN_NC;
826 else
827 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
9e6ea59f 828 }
e1d3c0fd 829
db690301 830 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
06c610e8 831 switch (ARM_LPAE_GRANULE(data)) {
e1d3c0fd 832 case SZ_4K:
db690301 833 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
e1d3c0fd
WD
834 break;
835 case SZ_16K:
db690301 836 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
e1d3c0fd
WD
837 break;
838 case SZ_64K:
db690301 839 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
e1d3c0fd
WD
840 break;
841 }
842
843 switch (cfg->oas) {
844 case 32:
fb485eb1 845 tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
e1d3c0fd
WD
846 break;
847 case 36:
fb485eb1 848 tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
e1d3c0fd
WD
849 break;
850 case 40:
fb485eb1 851 tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
e1d3c0fd
WD
852 break;
853 case 42:
fb485eb1 854 tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
e1d3c0fd
WD
855 break;
856 case 44:
fb485eb1 857 tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
e1d3c0fd
WD
858 break;
859 case 48:
fb485eb1 860 tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
e1d3c0fd 861 break;
6c89928f 862 case 52:
fb485eb1 863 tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
6c89928f 864 break;
e1d3c0fd
WD
865 default:
866 goto out_free_data;
867 }
868
fb485eb1 869 tcr->tsz = 64ULL - cfg->ias;
e1d3c0fd
WD
870
871 /* MAIRs */
872 reg = (ARM_LPAE_MAIR_ATTR_NC
873 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
874 (ARM_LPAE_MAIR_ATTR_WBRWA
875 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
876 (ARM_LPAE_MAIR_ATTR_DEVICE
90ec7a76
VG
877 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
878 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
879 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
e1d3c0fd 880
205577ab 881 cfg->arm_lpae_s1_cfg.mair = reg;
e1d3c0fd
WD
882
883 /* Looking good; allocate a pgd */
c79278c1
RM
884 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
885 GFP_KERNEL, cfg);
e1d3c0fd
WD
886 if (!data->pgd)
887 goto out_free_data;
888
87a91b15
RM
889 /* Ensure the empty pgd is visible before any actual TTBR write */
890 wmb();
e1d3c0fd 891
d1e5f26f
RM
892 /* TTBR */
893 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
e1d3c0fd
WD
894 return &data->iop;
895
896out_free_data:
897 kfree(data);
898 return NULL;
899}
900
901static struct io_pgtable *
902arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
903{
ac4b80e5 904 u64 sl;
3850db49 905 struct arm_lpae_io_pgtable *data;
ac4b80e5 906 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
3850db49
RM
907
908 /* The NS quirk doesn't apply at stage 2 */
a8e5f044 909 if (cfg->quirks)
3850db49 910 return NULL;
e1d3c0fd 911
3850db49 912 data = arm_lpae_alloc_pgtable(cfg);
e1d3c0fd
WD
913 if (!data)
914 return NULL;
915
916 /*
917 * Concatenate PGDs at level 1 if possible in order to reduce
918 * the depth of the stage-2 walk.
919 */
594ab90f 920 if (data->start_level == 0) {
e1d3c0fd
WD
921 unsigned long pgd_pages;
922
c79278c1 923 pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
e1d3c0fd 924 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
c79278c1 925 data->pgd_bits += data->bits_per_level;
594ab90f 926 data->start_level++;
e1d3c0fd
WD
927 }
928 }
929
930 /* VTCR */
30d2acb6 931 if (cfg->coherent_walk) {
ac4b80e5
WD
932 vtcr->sh = ARM_LPAE_TCR_SH_IS;
933 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
934 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
30d2acb6 935 } else {
ac4b80e5
WD
936 vtcr->sh = ARM_LPAE_TCR_SH_OS;
937 vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
938 vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
30d2acb6 939 }
e1d3c0fd 940
594ab90f 941 sl = data->start_level;
e1d3c0fd 942
06c610e8 943 switch (ARM_LPAE_GRANULE(data)) {
e1d3c0fd 944 case SZ_4K:
ac4b80e5 945 vtcr->tg = ARM_LPAE_TCR_TG0_4K;
e1d3c0fd
WD
946 sl++; /* SL0 format is different for 4K granule size */
947 break;
948 case SZ_16K:
ac4b80e5 949 vtcr->tg = ARM_LPAE_TCR_TG0_16K;
e1d3c0fd
WD
950 break;
951 case SZ_64K:
ac4b80e5 952 vtcr->tg = ARM_LPAE_TCR_TG0_64K;
e1d3c0fd
WD
953 break;
954 }
955
956 switch (cfg->oas) {
957 case 32:
ac4b80e5 958 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
e1d3c0fd
WD
959 break;
960 case 36:
ac4b80e5 961 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
e1d3c0fd
WD
962 break;
963 case 40:
ac4b80e5 964 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
e1d3c0fd
WD
965 break;
966 case 42:
ac4b80e5 967 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
e1d3c0fd
WD
968 break;
969 case 44:
ac4b80e5 970 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
e1d3c0fd
WD
971 break;
972 case 48:
ac4b80e5 973 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
e1d3c0fd 974 break;
6c89928f 975 case 52:
ac4b80e5 976 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
6c89928f 977 break;
e1d3c0fd
WD
978 default:
979 goto out_free_data;
980 }
981
ac4b80e5
WD
982 vtcr->tsz = 64ULL - cfg->ias;
983 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
e1d3c0fd
WD
984
985 /* Allocate pgd pages */
c79278c1
RM
986 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
987 GFP_KERNEL, cfg);
e1d3c0fd
WD
988 if (!data->pgd)
989 goto out_free_data;
990
87a91b15
RM
991 /* Ensure the empty pgd is visible before any actual TTBR write */
992 wmb();
e1d3c0fd
WD
993
994 /* VTTBR */
995 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
996 return &data->iop;
997
998out_free_data:
999 kfree(data);
1000 return NULL;
1001}
1002
1003static struct io_pgtable *
1004arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1005{
e1d3c0fd
WD
1006 if (cfg->ias > 32 || cfg->oas > 40)
1007 return NULL;
1008
1009 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
fb485eb1 1010 return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
e1d3c0fd
WD
1011}
1012
1013static struct io_pgtable *
1014arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1015{
e1d3c0fd
WD
1016 if (cfg->ias > 40 || cfg->oas > 40)
1017 return NULL;
1018
1019 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
ac4b80e5 1020 return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
e1d3c0fd
WD
1021}
1022
d08d42de
RH
1023static struct io_pgtable *
1024arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1025{
52f325f4 1026 struct arm_lpae_io_pgtable *data;
d08d42de 1027
52f325f4
RM
1028 /* No quirks for Mali (hopefully) */
1029 if (cfg->quirks)
1030 return NULL;
d08d42de 1031
1be08f45 1032 if (cfg->ias > 48 || cfg->oas > 40)
d08d42de
RH
1033 return NULL;
1034
1035 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
d08d42de 1036
52f325f4
RM
1037 data = arm_lpae_alloc_pgtable(cfg);
1038 if (!data)
1039 return NULL;
d08d42de 1040
1be08f45 1041 /* Mali seems to need a full 4-level table regardless of IAS */
594ab90f
RM
1042 if (data->start_level > 0) {
1043 data->start_level = 0;
c79278c1 1044 data->pgd_bits = 0;
d08d42de 1045 }
52f325f4
RM
1046 /*
1047 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1048 * best we can do is mimic the out-of-tree driver and hope that the
1049 * "implementation-defined caching policy" is good enough. Similarly,
1050 * we'll use it for the sake of a valid attribute for our 'device'
1051 * index, although callers should never request that in practice.
1052 */
1053 cfg->arm_mali_lpae_cfg.memattr =
1054 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1055 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1056 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1057 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1058 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1059 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
d08d42de 1060
c79278c1
RM
1061 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1062 cfg);
52f325f4
RM
1063 if (!data->pgd)
1064 goto out_free_data;
1065
1066 /* Ensure the empty pgd is visible before TRANSTAB can be written */
1067 wmb();
1068
1069 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1070 ARM_MALI_LPAE_TTBR_READ_INNER |
1071 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
728da60d
RM
1072 if (cfg->coherent_walk)
1073 cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1074
52f325f4
RM
1075 return &data->iop;
1076
1077out_free_data:
1078 kfree(data);
1079 return NULL;
d08d42de
RH
1080}
1081
e1d3c0fd
WD
1082struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1083 .alloc = arm_64_lpae_alloc_pgtable_s1,
1084 .free = arm_lpae_free_pgtable,
1085};
1086
1087struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1088 .alloc = arm_64_lpae_alloc_pgtable_s2,
1089 .free = arm_lpae_free_pgtable,
1090};
1091
1092struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1093 .alloc = arm_32_lpae_alloc_pgtable_s1,
1094 .free = arm_lpae_free_pgtable,
1095};
1096
1097struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1098 .alloc = arm_32_lpae_alloc_pgtable_s2,
1099 .free = arm_lpae_free_pgtable,
1100};
fe4b991d 1101
d08d42de
RH
1102struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1103 .alloc = arm_mali_lpae_alloc_pgtable,
1104 .free = arm_lpae_free_pgtable,
1105};
1106
fe4b991d
WD
1107#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1108
b5813c16 1109static struct io_pgtable_cfg *cfg_cookie __initdata;
fe4b991d 1110
b5813c16 1111static void __init dummy_tlb_flush_all(void *cookie)
fe4b991d
WD
1112{
1113 WARN_ON(cookie != cfg_cookie);
1114}
1115
b5813c16
RM
1116static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1117 size_t granule, void *cookie)
fe4b991d
WD
1118{
1119 WARN_ON(cookie != cfg_cookie);
1120 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1121}
1122
b5813c16
RM
1123static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1124 unsigned long iova, size_t granule,
1125 void *cookie)
10b7a7d9 1126{
abfd6fe0 1127 dummy_tlb_flush(iova, granule, granule, cookie);
10b7a7d9
WD
1128}
1129
298f7889 1130static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
fe4b991d 1131 .tlb_flush_all = dummy_tlb_flush_all,
10b7a7d9 1132 .tlb_flush_walk = dummy_tlb_flush,
abfd6fe0 1133 .tlb_add_page = dummy_tlb_add_page,
fe4b991d
WD
1134};
1135
1136static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1137{
1138 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1139 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1140
1141 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1142 cfg->pgsize_bitmap, cfg->ias);
5fb190b0 1143 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
c79278c1 1144 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
5fb190b0 1145 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
fe4b991d
WD
1146}
1147
1148#define __FAIL(ops, i) ({ \
1149 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1150 arm_lpae_dump_ops(ops); \
1151 selftest_running = false; \
1152 -EFAULT; \
1153})
1154
1155static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1156{
9062c1d0 1157 static const enum io_pgtable_fmt fmts[] __initconst = {
fe4b991d
WD
1158 ARM_64_LPAE_S1,
1159 ARM_64_LPAE_S2,
1160 };
1161
1162 int i, j;
1163 unsigned long iova;
99cbb8e4 1164 size_t size, mapped;
fe4b991d
WD
1165 struct io_pgtable_ops *ops;
1166
1167 selftest_running = true;
1168
1169 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1170 cfg_cookie = cfg;
1171 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1172 if (!ops) {
1173 pr_err("selftest: failed to allocate io pgtable ops\n");
1174 return -ENOMEM;
1175 }
1176
1177 /*
1178 * Initial sanity checks.
1179 * Empty page tables shouldn't provide any translations.
1180 */
1181 if (ops->iova_to_phys(ops, 42))
1182 return __FAIL(ops, i);
1183
1184 if (ops->iova_to_phys(ops, SZ_1G + 42))
1185 return __FAIL(ops, i);
1186
1187 if (ops->iova_to_phys(ops, SZ_2G + 42))
1188 return __FAIL(ops, i);
1189
1190 /*
1191 * Distinct mappings of different granule sizes.
1192 */
1193 iova = 0;
4ae8a5c5 1194 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
fe4b991d
WD
1195 size = 1UL << j;
1196
99cbb8e4
RM
1197 if (ops->map_pages(ops, iova, iova, size, 1,
1198 IOMMU_READ | IOMMU_WRITE |
1199 IOMMU_NOEXEC | IOMMU_CACHE,
1200 GFP_KERNEL, &mapped))
fe4b991d
WD
1201 return __FAIL(ops, i);
1202
1203 /* Overlapping mappings */
99cbb8e4
RM
1204 if (!ops->map_pages(ops, iova, iova + size, size, 1,
1205 IOMMU_READ | IOMMU_NOEXEC,
1206 GFP_KERNEL, &mapped))
fe4b991d
WD
1207 return __FAIL(ops, i);
1208
1209 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1210 return __FAIL(ops, i);
1211
1212 iova += SZ_1G;
fe4b991d
WD
1213 }
1214
1215 /* Partial unmap */
1216 size = 1UL << __ffs(cfg->pgsize_bitmap);
99cbb8e4 1217 if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size)
fe4b991d
WD
1218 return __FAIL(ops, i);
1219
1220 /* Remap of partial unmap */
99cbb8e4
RM
1221 if (ops->map_pages(ops, SZ_1G + size, size, size, 1,
1222 IOMMU_READ, GFP_KERNEL, &mapped))
fe4b991d
WD
1223 return __FAIL(ops, i);
1224
1225 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1226 return __FAIL(ops, i);
1227
1228 /* Full unmap */
1229 iova = 0;
f793b13e 1230 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
fe4b991d
WD
1231 size = 1UL << j;
1232
99cbb8e4 1233 if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
fe4b991d
WD
1234 return __FAIL(ops, i);
1235
1236 if (ops->iova_to_phys(ops, iova + 42))
1237 return __FAIL(ops, i);
1238
1239 /* Remap full block */
99cbb8e4
RM
1240 if (ops->map_pages(ops, iova, iova, size, 1,
1241 IOMMU_WRITE, GFP_KERNEL, &mapped))
fe4b991d
WD
1242 return __FAIL(ops, i);
1243
1244 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1245 return __FAIL(ops, i);
1246
1247 iova += SZ_1G;
fe4b991d
WD
1248 }
1249
1250 free_io_pgtable_ops(ops);
1251 }
1252
1253 selftest_running = false;
1254 return 0;
1255}
1256
1257static int __init arm_lpae_do_selftests(void)
1258{
9062c1d0 1259 static const unsigned long pgsize[] __initconst = {
fe4b991d
WD
1260 SZ_4K | SZ_2M | SZ_1G,
1261 SZ_16K | SZ_32M,
1262 SZ_64K | SZ_512M,
1263 };
1264
9062c1d0 1265 static const unsigned int ias[] __initconst = {
fe4b991d
WD
1266 32, 36, 40, 42, 44, 48,
1267 };
1268
1269 int i, j, pass = 0, fail = 0;
ca25ec24 1270 struct device dev;
fe4b991d
WD
1271 struct io_pgtable_cfg cfg = {
1272 .tlb = &dummy_tlb_ops,
1273 .oas = 48,
4f41845b 1274 .coherent_walk = true,
ca25ec24 1275 .iommu_dev = &dev,
fe4b991d
WD
1276 };
1277
ca25ec24
RM
1278 /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
1279 set_dev_node(&dev, NUMA_NO_NODE);
1280
fe4b991d
WD
1281 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1282 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1283 cfg.pgsize_bitmap = pgsize[i];
1284 cfg.ias = ias[j];
1285 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1286 pgsize[i], ias[j]);
1287 if (arm_lpae_run_tests(&cfg))
1288 fail++;
1289 else
1290 pass++;
1291 }
1292 }
1293
1294 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1295 return fail ? -EFAULT : 0;
1296}
1297subsys_initcall(arm_lpae_do_selftests);
1298#endif