Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
850f6ac3 BH |
2 | /* |
3 | * highmem.c: virtual kernel memory mappings for high memory | |
4 | * | |
5 | * PowerPC version, stolen from the i386 version. | |
6 | * | |
7 | * Used in CONFIG_HIGHMEM systems for memory pages which | |
8 | * are not addressable by direct kernel virtual addresses. | |
9 | * | |
10 | * Copyright (C) 1999 Gerhard Wichert, Siemens AG | |
11 | * Gerhard.Wichert@pdb.siemens.de | |
12 | * | |
13 | * | |
14 | * Redesigned the x86 32-bit VM architecture to deal with | |
15 | * up to 16 Terrabyte physical memory. With current x86 CPUs | |
16 | * we now support up to 64 Gigabytes physical RAM. | |
17 | * | |
18 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | |
19 | * | |
20 | * Reworked for PowerPC by various contributors. Moved from | |
21 | * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. | |
22 | */ | |
23 | ||
24 | #include <linux/highmem.h> | |
25 | #include <linux/module.h> | |
26 | ||
27 | /* | |
28 | * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap | |
29 | * gives a more generic (and caching) interface. But kmap_atomic can | |
30 | * be used in IRQ contexts, so in some (very limited) cases we need | |
31 | * it. | |
32 | */ | |
3e4d3af5 | 33 | void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
850f6ac3 | 34 | { |
850f6ac3 | 35 | unsigned long vaddr; |
3e4d3af5 | 36 | int idx, type; |
850f6ac3 | 37 | |
2cb7c9cb | 38 | preempt_disable(); |
850f6ac3 BH |
39 | pagefault_disable(); |
40 | if (!PageHighMem(page)) | |
41 | return page_address(page); | |
42 | ||
3e4d3af5 | 43 | type = kmap_atomic_idx_push(); |
850f6ac3 BH |
44 | idx = type + KM_TYPE_NR*smp_processor_id(); |
45 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
6c84f8c5 | 46 | WARN_ON(IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !pte_none(*(kmap_pte - idx))); |
850f6ac3 BH |
47 | __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); |
48 | local_flush_tlb_page(NULL, vaddr); | |
49 | ||
50 | return (void*) vaddr; | |
51 | } | |
52 | EXPORT_SYMBOL(kmap_atomic_prot); | |
53 | ||
3e4d3af5 | 54 | void __kunmap_atomic(void *kvaddr) |
850f6ac3 | 55 | { |
850f6ac3 | 56 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
850f6ac3 BH |
57 | |
58 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { | |
59 | pagefault_enable(); | |
2cb7c9cb | 60 | preempt_enable(); |
850f6ac3 BH |
61 | return; |
62 | } | |
63 | ||
6c84f8c5 CL |
64 | if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM)) { |
65 | int type = kmap_atomic_idx(); | |
3e4d3af5 PZ |
66 | unsigned int idx; |
67 | ||
68 | idx = type + KM_TYPE_NR * smp_processor_id(); | |
6c84f8c5 | 69 | WARN_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
3e4d3af5 PZ |
70 | |
71 | /* | |
72 | * force other mappings to Oops if they'll try to access | |
73 | * this pte without first remap it | |
74 | */ | |
75 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | |
76 | local_flush_tlb_page(NULL, vaddr); | |
77 | } | |
20273941 PZ |
78 | |
79 | kmap_atomic_idx_pop(); | |
850f6ac3 | 80 | pagefault_enable(); |
2cb7c9cb | 81 | preempt_enable(); |
850f6ac3 | 82 | } |
3e4d3af5 | 83 | EXPORT_SYMBOL(__kunmap_atomic); |