Merge remote-tracking branches 'asoc/topic/stm32', 'asoc/topic/sun4i-i2s', 'asoc...
[linux-2.6-block.git] / arch / mips / mm / highmem.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b99fbc10 2#include <linux/compiler.h>
d9ba5778
PG
3#include <linux/init.h>
4#include <linux/export.h>
1da177e4 5#include <linux/highmem.h>
52ab320a 6#include <linux/sched.h>
631330f5 7#include <linux/smp.h>
bb86bf28 8#include <asm/fixmap.h>
1da177e4
LT
9#include <asm/tlbflush.h>
10
bb86bf28
RB
11static pte_t *kmap_pte;
12
13unsigned long highstart_pfn, highend_pfn;
14
3e4d3af5 15void *kmap(struct page *page)
1da177e4
LT
16{
17 void *addr;
18
19 might_sleep();
20 if (!PageHighMem(page))
21 return page_address(page);
22 addr = kmap_high(page);
23 flush_tlb_one((unsigned long)addr);
24
25 return addr;
26}
3e4d3af5 27EXPORT_SYMBOL(kmap);
1da177e4 28
3e4d3af5 29void kunmap(struct page *page)
1da177e4 30{
b72b7092 31 BUG_ON(in_interrupt());
1da177e4
LT
32 if (!PageHighMem(page))
33 return;
34 kunmap_high(page);
35}
3e4d3af5 36EXPORT_SYMBOL(kunmap);
1da177e4
LT
37
38/*
39 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
40 * no global lock is needed and because the kmap code must perform a global TLB
41 * invalidation when the kmap pool wraps.
42 *
43 * However when holding an atomic kmap is is not legal to sleep, so atomic
44 * kmaps are appropriate for short, tight code paths only.
45 */
46
a24401bc 47void *kmap_atomic(struct page *page)
1da177e4 48{
1da177e4 49 unsigned long vaddr;
3e4d3af5 50 int idx, type;
1da177e4 51
2cb7c9cb 52 preempt_disable();
a866374a 53 pagefault_disable();
1da177e4
LT
54 if (!PageHighMem(page))
55 return page_address(page);
56
3e4d3af5 57 type = kmap_atomic_idx_push();
1da177e4
LT
58 idx = type + KM_TYPE_NR*smp_processor_id();
59 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
60#ifdef CONFIG_DEBUG_HIGHMEM
b72b7092 61 BUG_ON(!pte_none(*(kmap_pte - idx)));
1da177e4 62#endif
bb86bf28 63 set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
1da177e4
LT
64 local_flush_tlb_one((unsigned long)vaddr);
65
66 return (void*) vaddr;
67}
a24401bc 68EXPORT_SYMBOL(kmap_atomic);
1da177e4 69
3e4d3af5 70void __kunmap_atomic(void *kvaddr)
1da177e4 71{
1da177e4 72 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
b99fbc10 73 int type __maybe_unused;
1da177e4
LT
74
75 if (vaddr < FIXADDR_START) { // FIXME
a866374a 76 pagefault_enable();
2cb7c9cb 77 preempt_enable();
1da177e4
LT
78 return;
79 }
80
20273941 81 type = kmap_atomic_idx();
3e4d3af5
PZ
82#ifdef CONFIG_DEBUG_HIGHMEM
83 {
84 int idx = type + KM_TYPE_NR * smp_processor_id();
1da177e4 85
3e4d3af5 86 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
1da177e4 87
3e4d3af5
PZ
88 /*
89 * force other mappings to Oops if they'll try to access
90 * this pte without first remap it
91 */
92 pte_clear(&init_mm, vaddr, kmap_pte-idx);
93 local_flush_tlb_one(vaddr);
94 }
95#endif
20273941 96 kmap_atomic_idx_pop();
a866374a 97 pagefault_enable();
2cb7c9cb 98 preempt_enable();
1da177e4 99}
3e4d3af5 100EXPORT_SYMBOL(__kunmap_atomic);
1da177e4 101
60080265
RB
102/*
103 * This is the same as kmap_atomic() but can map memory that doesn't
104 * have a struct page associated with it.
105 */
3e4d3af5 106void *kmap_atomic_pfn(unsigned long pfn)
60080265 107{
60080265 108 unsigned long vaddr;
3e4d3af5 109 int idx, type;
60080265 110
2cb7c9cb 111 preempt_disable();
a866374a 112 pagefault_disable();
60080265 113
3e4d3af5 114 type = kmap_atomic_idx_push();
60080265
RB
115 idx = type + KM_TYPE_NR*smp_processor_id();
116 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
bb86bf28 117 set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
60080265
RB
118 flush_tlb_one(vaddr);
119
120 return (void*) vaddr;
121}
122
bb86bf28
RB
123void __init kmap_init(void)
124{
125 unsigned long kmap_vstart;
126
127 /* cache the first kmap pte */
128 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
129 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
130}