Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / drivers / iommu / io-pgtable-arm.c
CommitLineData
e1d3c0fd
WD
1/*
2 * CPU-agnostic ARM page table allocator.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
22
23#include <linux/iommu.h>
24#include <linux/kernel.h>
25#include <linux/sizes.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28
29#include "io-pgtable.h"
30
31#define ARM_LPAE_MAX_ADDR_BITS 48
32#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
33#define ARM_LPAE_MAX_LEVELS 4
34
35/* Struct accessors */
36#define io_pgtable_to_data(x) \
37 container_of((x), struct arm_lpae_io_pgtable, iop)
38
39#define io_pgtable_ops_to_pgtable(x) \
40 container_of((x), struct io_pgtable, ops)
41
42#define io_pgtable_ops_to_data(x) \
43 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44
45/*
46 * For consistency with the architecture, we always consider
47 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
48 */
49#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
50
51/*
52 * Calculate the right shift amount to get to the portion describing level l
53 * in a virtual address mapped by the pagetable in d.
54 */
55#define ARM_LPAE_LVL_SHIFT(l,d) \
56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
57 * (d)->bits_per_level) + (d)->pg_shift)
58
367bd978
WD
59#define ARM_LPAE_PAGES_PER_PGD(d) \
60 DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
e1d3c0fd
WD
61
62/*
63 * Calculate the index at level l used to map virtual address a using the
64 * pagetable in d.
65 */
66#define ARM_LPAE_PGD_IDX(l,d) \
67 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
68
69#define ARM_LPAE_LVL_IDX(a,l,d) \
367bd978 70 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
e1d3c0fd
WD
71 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
72
73/* Calculate the block/page mapping size at level l for pagetable in d. */
74#define ARM_LPAE_BLOCK_SIZE(l,d) \
75 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
76 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
77
78/* Page table bits */
79#define ARM_LPAE_PTE_TYPE_SHIFT 0
80#define ARM_LPAE_PTE_TYPE_MASK 0x3
81
82#define ARM_LPAE_PTE_TYPE_BLOCK 1
83#define ARM_LPAE_PTE_TYPE_TABLE 3
84#define ARM_LPAE_PTE_TYPE_PAGE 3
85
c896c132 86#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
e1d3c0fd
WD
87#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
88#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
89#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
90#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
91#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
c896c132 92#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
e1d3c0fd
WD
93#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
94
95#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
96/* Ignore the contiguous bit for block splitting */
97#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
98#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
99 ARM_LPAE_PTE_ATTR_HI_MASK)
100
101/* Stage-1 PTE */
102#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
103#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
104#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
105#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
106
107/* Stage-2 PTE */
108#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
109#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
110#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
111#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
112#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
113#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
114
115/* Register bits */
116#define ARM_32_LPAE_TCR_EAE (1 << 31)
117#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
118
63979b8d
WD
119#define ARM_LPAE_TCR_EPD1 (1 << 23)
120
e1d3c0fd
WD
121#define ARM_LPAE_TCR_TG0_4K (0 << 14)
122#define ARM_LPAE_TCR_TG0_64K (1 << 14)
123#define ARM_LPAE_TCR_TG0_16K (2 << 14)
124
125#define ARM_LPAE_TCR_SH0_SHIFT 12
126#define ARM_LPAE_TCR_SH0_MASK 0x3
127#define ARM_LPAE_TCR_SH_NS 0
128#define ARM_LPAE_TCR_SH_OS 2
129#define ARM_LPAE_TCR_SH_IS 3
130
131#define ARM_LPAE_TCR_ORGN0_SHIFT 10
132#define ARM_LPAE_TCR_IRGN0_SHIFT 8
133#define ARM_LPAE_TCR_RGN_MASK 0x3
134#define ARM_LPAE_TCR_RGN_NC 0
135#define ARM_LPAE_TCR_RGN_WBWA 1
136#define ARM_LPAE_TCR_RGN_WT 2
137#define ARM_LPAE_TCR_RGN_WB 3
138
139#define ARM_LPAE_TCR_SL0_SHIFT 6
140#define ARM_LPAE_TCR_SL0_MASK 0x3
141
142#define ARM_LPAE_TCR_T0SZ_SHIFT 0
143#define ARM_LPAE_TCR_SZ_MASK 0xf
144
145#define ARM_LPAE_TCR_PS_SHIFT 16
146#define ARM_LPAE_TCR_PS_MASK 0x7
147
148#define ARM_LPAE_TCR_IPS_SHIFT 32
149#define ARM_LPAE_TCR_IPS_MASK 0x7
150
151#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
152#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
153#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
154#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
155#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
156#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
157
158#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
159#define ARM_LPAE_MAIR_ATTR_MASK 0xff
160#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
161#define ARM_LPAE_MAIR_ATTR_NC 0x44
162#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
163#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
164#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
165#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
166
167/* IOPTE accessors */
168#define iopte_deref(pte,d) \
169 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
170 & ~((1ULL << (d)->pg_shift) - 1)))
171
172#define iopte_type(pte,l) \
173 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
174
175#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
176
177#define iopte_leaf(pte,l) \
178 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
179 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
180 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
181
182#define iopte_to_pfn(pte,d) \
183 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
184
185#define pfn_to_iopte(pfn,d) \
186 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
187
188struct arm_lpae_io_pgtable {
189 struct io_pgtable iop;
190
191 int levels;
192 size_t pgd_size;
193 unsigned long pg_shift;
194 unsigned long bits_per_level;
195
196 void *pgd;
197};
198
199typedef u64 arm_lpae_iopte;
200
fe4b991d
WD
201static bool selftest_running = false;
202
e1d3c0fd
WD
203static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
204 unsigned long iova, phys_addr_t paddr,
205 arm_lpae_iopte prot, int lvl,
206 arm_lpae_iopte *ptep)
207{
208 arm_lpae_iopte pte = prot;
209
210 /* We require an unmap first */
fe4b991d
WD
211 if (iopte_leaf(*ptep, lvl)) {
212 WARN_ON(!selftest_running);
e1d3c0fd 213 return -EEXIST;
fe4b991d 214 }
e1d3c0fd 215
c896c132
LP
216 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
217 pte |= ARM_LPAE_PTE_NS;
218
e1d3c0fd
WD
219 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
220 pte |= ARM_LPAE_PTE_TYPE_PAGE;
221 else
222 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
223
224 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
225 pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
226
227 *ptep = pte;
228 data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
229 return 0;
230}
231
232static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
233 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
234 int lvl, arm_lpae_iopte *ptep)
235{
236 arm_lpae_iopte *cptep, pte;
237 void *cookie = data->iop.cookie;
238 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
239
240 /* Find our entry at the current level */
241 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
242
243 /* If we can install a leaf entry at this level, then do so */
244 if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
245 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
246
247 /* We can't allocate tables at the final level */
248 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
249 return -EINVAL;
250
251 /* Grab a pointer to the next level */
252 pte = *ptep;
253 if (!pte) {
254 cptep = alloc_pages_exact(1UL << data->pg_shift,
255 GFP_ATOMIC | __GFP_ZERO);
256 if (!cptep)
257 return -ENOMEM;
258
259 data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
260 cookie);
261 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
c896c132
LP
262 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
263 pte |= ARM_LPAE_PTE_NSTABLE;
e1d3c0fd
WD
264 *ptep = pte;
265 data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
266 } else {
267 cptep = iopte_deref(pte, data);
268 }
269
270 /* Rinse, repeat */
271 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
272}
273
274static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
275 int prot)
276{
277 arm_lpae_iopte pte;
278
279 if (data->iop.fmt == ARM_64_LPAE_S1 ||
280 data->iop.fmt == ARM_32_LPAE_S1) {
281 pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
282
283 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
284 pte |= ARM_LPAE_PTE_AP_RDONLY;
285
286 if (prot & IOMMU_CACHE)
287 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
288 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
289 } else {
290 pte = ARM_LPAE_PTE_HAP_FAULT;
291 if (prot & IOMMU_READ)
292 pte |= ARM_LPAE_PTE_HAP_READ;
293 if (prot & IOMMU_WRITE)
294 pte |= ARM_LPAE_PTE_HAP_WRITE;
295 if (prot & IOMMU_CACHE)
296 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
297 else
298 pte |= ARM_LPAE_PTE_MEMATTR_NC;
299 }
300
301 if (prot & IOMMU_NOEXEC)
302 pte |= ARM_LPAE_PTE_XN;
303
304 return pte;
305}
306
307static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
308 phys_addr_t paddr, size_t size, int iommu_prot)
309{
310 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
311 arm_lpae_iopte *ptep = data->pgd;
312 int lvl = ARM_LPAE_START_LVL(data);
313 arm_lpae_iopte prot;
314
315 /* If no access, then nothing to do */
316 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
317 return 0;
318
319 prot = arm_lpae_prot_to_pte(data, iommu_prot);
320 return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
321}
322
323static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
324 arm_lpae_iopte *ptep)
325{
326 arm_lpae_iopte *start, *end;
327 unsigned long table_size;
328
329 /* Only leaf entries at the last level */
330 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
331 return;
332
333 if (lvl == ARM_LPAE_START_LVL(data))
334 table_size = data->pgd_size;
335 else
336 table_size = 1UL << data->pg_shift;
337
338 start = ptep;
339 end = (void *)ptep + table_size;
340
341 while (ptep != end) {
342 arm_lpae_iopte pte = *ptep++;
343
344 if (!pte || iopte_leaf(pte, lvl))
345 continue;
346
347 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
348 }
349
350 free_pages_exact(start, table_size);
351}
352
353static void arm_lpae_free_pgtable(struct io_pgtable *iop)
354{
355 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
356
357 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
358 kfree(data);
359}
360
361static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
362 unsigned long iova, size_t size,
363 arm_lpae_iopte prot, int lvl,
364 arm_lpae_iopte *ptep, size_t blk_size)
365{
366 unsigned long blk_start, blk_end;
367 phys_addr_t blk_paddr;
368 arm_lpae_iopte table = 0;
369 void *cookie = data->iop.cookie;
370 const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
371
372 blk_start = iova & ~(blk_size - 1);
373 blk_end = blk_start + blk_size;
374 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
375
376 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
377 arm_lpae_iopte *tablep;
378
379 /* Unmap! */
380 if (blk_start == iova)
381 continue;
382
383 /* __arm_lpae_map expects a pointer to the start of the table */
384 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
385 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
386 tablep) < 0) {
387 if (table) {
388 /* Free the table we allocated */
389 tablep = iopte_deref(table, data);
390 __arm_lpae_free_pgtable(data, lvl + 1, tablep);
391 }
392 return 0; /* Bytes unmapped */
393 }
394 }
395
396 *ptep = table;
397 tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
398 iova &= ~(blk_size - 1);
399 tlb->tlb_add_flush(iova, blk_size, true, cookie);
400 return size;
401}
402
403static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
404 unsigned long iova, size_t size, int lvl,
405 arm_lpae_iopte *ptep)
406{
407 arm_lpae_iopte pte;
408 const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
409 void *cookie = data->iop.cookie;
410 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
411
412 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
413 pte = *ptep;
414
415 /* Something went horribly wrong and we ran out of page table */
416 if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
417 return 0;
418
419 /* If the size matches this level, we're in the right place */
420 if (size == blk_size) {
421 *ptep = 0;
422 tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
423
424 if (!iopte_leaf(pte, lvl)) {
425 /* Also flush any partial walks */
426 tlb->tlb_add_flush(iova, size, false, cookie);
427 tlb->tlb_sync(data->iop.cookie);
428 ptep = iopte_deref(pte, data);
429 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
430 } else {
431 tlb->tlb_add_flush(iova, size, true, cookie);
432 }
433
434 return size;
435 } else if (iopte_leaf(pte, lvl)) {
436 /*
437 * Insert a table at the next level to map the old region,
438 * minus the part we want to unmap
439 */
440 return arm_lpae_split_blk_unmap(data, iova, size,
441 iopte_prot(pte), lvl, ptep,
442 blk_size);
443 }
444
445 /* Keep on walkin' */
446 ptep = iopte_deref(pte, data);
447 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
448}
449
450static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
451 size_t size)
452{
453 size_t unmapped;
454 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
455 struct io_pgtable *iop = &data->iop;
456 arm_lpae_iopte *ptep = data->pgd;
457 int lvl = ARM_LPAE_START_LVL(data);
458
459 unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
460 if (unmapped)
461 iop->cfg.tlb->tlb_sync(iop->cookie);
462
463 return unmapped;
464}
465
466static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
467 unsigned long iova)
468{
469 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
470 arm_lpae_iopte pte, *ptep = data->pgd;
471 int lvl = ARM_LPAE_START_LVL(data);
472
473 do {
474 /* Valid IOPTE pointer? */
475 if (!ptep)
476 return 0;
477
478 /* Grab the IOPTE we're interested in */
479 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
480
481 /* Valid entry? */
482 if (!pte)
483 return 0;
484
485 /* Leaf entry? */
486 if (iopte_leaf(pte,lvl))
487 goto found_translation;
488
489 /* Take it to the next level */
490 ptep = iopte_deref(pte, data);
491 } while (++lvl < ARM_LPAE_MAX_LEVELS);
492
493 /* Ran out of page tables to walk */
494 return 0;
495
496found_translation:
497 iova &= ((1 << data->pg_shift) - 1);
498 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
499}
500
501static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
502{
503 unsigned long granule;
504
505 /*
506 * We need to restrict the supported page sizes to match the
507 * translation regime for a particular granule. Aim to match
508 * the CPU page size if possible, otherwise prefer smaller sizes.
509 * While we're at it, restrict the block sizes to match the
510 * chosen granule.
511 */
512 if (cfg->pgsize_bitmap & PAGE_SIZE)
513 granule = PAGE_SIZE;
514 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
515 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
516 else if (cfg->pgsize_bitmap & PAGE_MASK)
517 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
518 else
519 granule = 0;
520
521 switch (granule) {
522 case SZ_4K:
523 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
524 break;
525 case SZ_16K:
526 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
527 break;
528 case SZ_64K:
529 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
530 break;
531 default:
532 cfg->pgsize_bitmap = 0;
533 }
534}
535
536static struct arm_lpae_io_pgtable *
537arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
538{
539 unsigned long va_bits, pgd_bits;
540 struct arm_lpae_io_pgtable *data;
541
542 arm_lpae_restrict_pgsizes(cfg);
543
544 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
545 return NULL;
546
547 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
548 return NULL;
549
550 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
551 return NULL;
552
553 data = kmalloc(sizeof(*data), GFP_KERNEL);
554 if (!data)
555 return NULL;
556
557 data->pg_shift = __ffs(cfg->pgsize_bitmap);
558 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
559
560 va_bits = cfg->ias - data->pg_shift;
561 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
562
563 /* Calculate the actual size of our pgd (without concatenation) */
564 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
565 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
566
567 data->iop.ops = (struct io_pgtable_ops) {
568 .map = arm_lpae_map,
569 .unmap = arm_lpae_unmap,
570 .iova_to_phys = arm_lpae_iova_to_phys,
571 };
572
573 return data;
574}
575
576static struct io_pgtable *
577arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
578{
579 u64 reg;
580 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
581
582 if (!data)
583 return NULL;
584
585 /* TCR */
586 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
587 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
588 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
589
590 switch (1 << data->pg_shift) {
591 case SZ_4K:
592 reg |= ARM_LPAE_TCR_TG0_4K;
593 break;
594 case SZ_16K:
595 reg |= ARM_LPAE_TCR_TG0_16K;
596 break;
597 case SZ_64K:
598 reg |= ARM_LPAE_TCR_TG0_64K;
599 break;
600 }
601
602 switch (cfg->oas) {
603 case 32:
604 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
605 break;
606 case 36:
607 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
608 break;
609 case 40:
610 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
611 break;
612 case 42:
613 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
614 break;
615 case 44:
616 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
617 break;
618 case 48:
619 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
620 break;
621 default:
622 goto out_free_data;
623 }
624
625 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
63979b8d
WD
626
627 /* Disable speculative walks through TTBR1 */
628 reg |= ARM_LPAE_TCR_EPD1;
e1d3c0fd
WD
629 cfg->arm_lpae_s1_cfg.tcr = reg;
630
631 /* MAIRs */
632 reg = (ARM_LPAE_MAIR_ATTR_NC
633 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
634 (ARM_LPAE_MAIR_ATTR_WBRWA
635 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
636 (ARM_LPAE_MAIR_ATTR_DEVICE
637 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
638
639 cfg->arm_lpae_s1_cfg.mair[0] = reg;
640 cfg->arm_lpae_s1_cfg.mair[1] = 0;
641
642 /* Looking good; allocate a pgd */
643 data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
644 if (!data->pgd)
645 goto out_free_data;
646
647 cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
648
649 /* TTBRs */
650 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
651 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
652 return &data->iop;
653
654out_free_data:
655 kfree(data);
656 return NULL;
657}
658
659static struct io_pgtable *
660arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
661{
662 u64 reg, sl;
663 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
664
665 if (!data)
666 return NULL;
667
668 /*
669 * Concatenate PGDs at level 1 if possible in order to reduce
670 * the depth of the stage-2 walk.
671 */
672 if (data->levels == ARM_LPAE_MAX_LEVELS) {
673 unsigned long pgd_pages;
674
675 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
676 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
677 data->pgd_size = pgd_pages << data->pg_shift;
678 data->levels--;
679 }
680 }
681
682 /* VTCR */
683 reg = ARM_64_LPAE_S2_TCR_RES1 |
684 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
685 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
686 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
687
688 sl = ARM_LPAE_START_LVL(data);
689
690 switch (1 << data->pg_shift) {
691 case SZ_4K:
692 reg |= ARM_LPAE_TCR_TG0_4K;
693 sl++; /* SL0 format is different for 4K granule size */
694 break;
695 case SZ_16K:
696 reg |= ARM_LPAE_TCR_TG0_16K;
697 break;
698 case SZ_64K:
699 reg |= ARM_LPAE_TCR_TG0_64K;
700 break;
701 }
702
703 switch (cfg->oas) {
704 case 32:
705 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
706 break;
707 case 36:
708 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
709 break;
710 case 40:
711 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
712 break;
713 case 42:
714 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
715 break;
716 case 44:
717 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
718 break;
719 case 48:
720 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
721 break;
722 default:
723 goto out_free_data;
724 }
725
726 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
727 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
728 cfg->arm_lpae_s2_cfg.vtcr = reg;
729
730 /* Allocate pgd pages */
731 data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
732 if (!data->pgd)
733 goto out_free_data;
734
735 cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
736
737 /* VTTBR */
738 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
739 return &data->iop;
740
741out_free_data:
742 kfree(data);
743 return NULL;
744}
745
746static struct io_pgtable *
747arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
748{
749 struct io_pgtable *iop;
750
751 if (cfg->ias > 32 || cfg->oas > 40)
752 return NULL;
753
754 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
755 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
756 if (iop) {
757 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
758 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
759 }
760
761 return iop;
762}
763
764static struct io_pgtable *
765arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
766{
767 struct io_pgtable *iop;
768
769 if (cfg->ias > 40 || cfg->oas > 40)
770 return NULL;
771
772 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
773 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
774 if (iop)
775 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
776
777 return iop;
778}
779
780struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
781 .alloc = arm_64_lpae_alloc_pgtable_s1,
782 .free = arm_lpae_free_pgtable,
783};
784
785struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
786 .alloc = arm_64_lpae_alloc_pgtable_s2,
787 .free = arm_lpae_free_pgtable,
788};
789
790struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
791 .alloc = arm_32_lpae_alloc_pgtable_s1,
792 .free = arm_lpae_free_pgtable,
793};
794
795struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
796 .alloc = arm_32_lpae_alloc_pgtable_s2,
797 .free = arm_lpae_free_pgtable,
798};
fe4b991d
WD
799
800#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
801
802static struct io_pgtable_cfg *cfg_cookie;
803
804static void dummy_tlb_flush_all(void *cookie)
805{
806 WARN_ON(cookie != cfg_cookie);
807}
808
809static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
810 void *cookie)
811{
812 WARN_ON(cookie != cfg_cookie);
813 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
814}
815
816static void dummy_tlb_sync(void *cookie)
817{
818 WARN_ON(cookie != cfg_cookie);
819}
820
821static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
822{
823 WARN_ON(cookie != cfg_cookie);
824}
825
826static struct iommu_gather_ops dummy_tlb_ops __initdata = {
827 .tlb_flush_all = dummy_tlb_flush_all,
828 .tlb_add_flush = dummy_tlb_add_flush,
829 .tlb_sync = dummy_tlb_sync,
830 .flush_pgtable = dummy_flush_pgtable,
831};
832
833static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
834{
835 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
836 struct io_pgtable_cfg *cfg = &data->iop.cfg;
837
838 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
839 cfg->pgsize_bitmap, cfg->ias);
840 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
841 data->levels, data->pgd_size, data->pg_shift,
842 data->bits_per_level, data->pgd);
843}
844
845#define __FAIL(ops, i) ({ \
846 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
847 arm_lpae_dump_ops(ops); \
848 selftest_running = false; \
849 -EFAULT; \
850})
851
852static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
853{
854 static const enum io_pgtable_fmt fmts[] = {
855 ARM_64_LPAE_S1,
856 ARM_64_LPAE_S2,
857 };
858
859 int i, j;
860 unsigned long iova;
861 size_t size;
862 struct io_pgtable_ops *ops;
863
864 selftest_running = true;
865
866 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
867 cfg_cookie = cfg;
868 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
869 if (!ops) {
870 pr_err("selftest: failed to allocate io pgtable ops\n");
871 return -ENOMEM;
872 }
873
874 /*
875 * Initial sanity checks.
876 * Empty page tables shouldn't provide any translations.
877 */
878 if (ops->iova_to_phys(ops, 42))
879 return __FAIL(ops, i);
880
881 if (ops->iova_to_phys(ops, SZ_1G + 42))
882 return __FAIL(ops, i);
883
884 if (ops->iova_to_phys(ops, SZ_2G + 42))
885 return __FAIL(ops, i);
886
887 /*
888 * Distinct mappings of different granule sizes.
889 */
890 iova = 0;
891 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
892 while (j != BITS_PER_LONG) {
893 size = 1UL << j;
894
895 if (ops->map(ops, iova, iova, size, IOMMU_READ |
896 IOMMU_WRITE |
897 IOMMU_NOEXEC |
898 IOMMU_CACHE))
899 return __FAIL(ops, i);
900
901 /* Overlapping mappings */
902 if (!ops->map(ops, iova, iova + size, size,
903 IOMMU_READ | IOMMU_NOEXEC))
904 return __FAIL(ops, i);
905
906 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
907 return __FAIL(ops, i);
908
909 iova += SZ_1G;
910 j++;
911 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
912 }
913
914 /* Partial unmap */
915 size = 1UL << __ffs(cfg->pgsize_bitmap);
916 if (ops->unmap(ops, SZ_1G + size, size) != size)
917 return __FAIL(ops, i);
918
919 /* Remap of partial unmap */
920 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
921 return __FAIL(ops, i);
922
923 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
924 return __FAIL(ops, i);
925
926 /* Full unmap */
927 iova = 0;
928 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
929 while (j != BITS_PER_LONG) {
930 size = 1UL << j;
931
932 if (ops->unmap(ops, iova, size) != size)
933 return __FAIL(ops, i);
934
935 if (ops->iova_to_phys(ops, iova + 42))
936 return __FAIL(ops, i);
937
938 /* Remap full block */
939 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
940 return __FAIL(ops, i);
941
942 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
943 return __FAIL(ops, i);
944
945 iova += SZ_1G;
946 j++;
947 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
948 }
949
950 free_io_pgtable_ops(ops);
951 }
952
953 selftest_running = false;
954 return 0;
955}
956
957static int __init arm_lpae_do_selftests(void)
958{
959 static const unsigned long pgsize[] = {
960 SZ_4K | SZ_2M | SZ_1G,
961 SZ_16K | SZ_32M,
962 SZ_64K | SZ_512M,
963 };
964
965 static const unsigned int ias[] = {
966 32, 36, 40, 42, 44, 48,
967 };
968
969 int i, j, pass = 0, fail = 0;
970 struct io_pgtable_cfg cfg = {
971 .tlb = &dummy_tlb_ops,
972 .oas = 48,
973 };
974
975 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
976 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
977 cfg.pgsize_bitmap = pgsize[i];
978 cfg.ias = ias[j];
979 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
980 pgsize[i], ias[j]);
981 if (arm_lpae_run_tests(&cfg))
982 fail++;
983 else
984 pass++;
985 }
986 }
987
988 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
989 return fail ? -EFAULT : 0;
990}
991subsys_initcall(arm_lpae_do_selftests);
992#endif