Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
45890f6d VG |
2 | /* |
3 | * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com) | |
45890f6d VG |
4 | */ |
5 | ||
57c8a661 | 6 | #include <linux/memblock.h> |
45890f6d VG |
7 | #include <linux/export.h> |
8 | #include <linux/highmem.h> | |
ca5999fd | 9 | #include <linux/pgtable.h> |
65fddcfc | 10 | #include <asm/processor.h> |
45890f6d VG |
11 | #include <asm/pgalloc.h> |
12 | #include <asm/tlbflush.h> | |
13 | ||
14 | /* | |
15 | * HIGHMEM API: | |
16 | * | |
7423cc0c | 17 | * kmap() API provides sleep semantics hence referred to as "permanent maps" |
45890f6d VG |
18 | * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor |
19 | * for book-keeping | |
20 | * | |
21 | * kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides | |
22 | * shortlived ala "temporary mappings" which historically were implemented as | |
23 | * fixmaps (compile time addr etc). Their book-keeping is done per cpu. | |
24 | * | |
25 | * Both these facts combined (preemption disabled and per-cpu allocation) | |
26 | * means the total number of concurrent fixmaps will be limited to max | |
27 | * such allocations in a single control path. Thus KM_TYPE_NR (another | |
28 | * historic relic) is a small'ish number which caps max percpu fixmaps | |
29 | * | |
30 | * ARC HIGHMEM Details | |
31 | * | |
32 | * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module) | |
33 | * is now shared between vmalloc and kmap (non overlapping though) | |
34 | * | |
35 | * - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD | |
36 | * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means | |
37 | * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split) | |
38 | * | |
39cac191 TG |
39 | * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per |
40 | * CPU. So the number of CPUs sharing a single PTE page is limited. | |
45890f6d VG |
41 | * |
42 | * - pkmap being preemptible, in theory could do with more than 256 concurrent | |
43 | * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse | |
44 | * the PGD and only works with a single page table @pkmap_page_table, hence | |
45 | * sets the limit | |
46 | */ | |
47 | ||
48 | extern pte_t * pkmap_page_table; | |
45890f6d | 49 | |
899cfd2b | 50 | static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr) |
45890f6d | 51 | { |
e05c7b1f | 52 | pmd_t *pmd_k = pmd_off_k(kvaddr); |
45890f6d VG |
53 | pte_t *pte_k; |
54 | ||
e8625dce | 55 | pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); |
8a7f97b9 MR |
56 | if (!pte_k) |
57 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", | |
58 | __func__, PAGE_SIZE, PAGE_SIZE); | |
59 | ||
45890f6d VG |
60 | pmd_populate_kernel(&init_mm, pmd_k, pte_k); |
61 | return pte_k; | |
62 | } | |
63 | ||
899cfd2b | 64 | void __init kmap_init(void) |
45890f6d VG |
65 | { |
66 | /* Due to recursive include hell, we can't do this in processor.h */ | |
67 | BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE)); | |
39cac191 TG |
68 | BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE); |
69 | BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE); | |
45890f6d | 70 | |
45890f6d | 71 | pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE); |
39cac191 | 72 | alloc_kmap_pgtable(FIXMAP_BASE); |
45890f6d | 73 | } |