Commit | Line | Data |
---|---|---|
9f4c815c IM |
1 | /* |
2 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
1da177e4 | 3 | * Thanks to Ben LaHaise for precious feedback. |
9f4c815c | 4 | */ |
1da177e4 | 5 | |
1da177e4 LT |
6 | #include <linux/highmem.h> |
7 | #include <linux/module.h> | |
9f4c815c | 8 | #include <linux/sched.h> |
1da177e4 | 9 | #include <linux/slab.h> |
9f4c815c IM |
10 | #include <linux/mm.h> |
11 | ||
1da177e4 LT |
12 | #include <asm/processor.h> |
13 | #include <asm/tlbflush.h> | |
f8af095d | 14 | #include <asm/sections.h> |
9f4c815c IM |
15 | #include <asm/uaccess.h> |
16 | #include <asm/pgalloc.h> | |
1da177e4 | 17 | |
f0646e43 | 18 | pte_t *lookup_address(unsigned long address, int *level) |
9f4c815c | 19 | { |
1da177e4 LT |
20 | pgd_t *pgd = pgd_offset_k(address); |
21 | pud_t *pud; | |
22 | pmd_t *pmd; | |
9f4c815c | 23 | |
1da177e4 LT |
24 | if (pgd_none(*pgd)) |
25 | return NULL; | |
26 | pud = pud_offset(pgd, address); | |
27 | if (pud_none(*pud)) | |
28 | return NULL; | |
29 | pmd = pmd_offset(pud, address); | |
30 | if (pmd_none(*pmd)) | |
31 | return NULL; | |
f0646e43 | 32 | *level = 2; |
1da177e4 LT |
33 | if (pmd_large(*pmd)) |
34 | return (pte_t *)pmd; | |
f0646e43 | 35 | *level = 3; |
1da177e4 | 36 | |
9f4c815c IM |
37 | return pte_offset_kernel(pmd, address); |
38 | } | |
39 | ||
9f4c815c IM |
40 | static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
41 | { | |
1da177e4 | 42 | unsigned long flags; |
9f4c815c | 43 | struct page *page; |
1da177e4 | 44 | |
9f4c815c IM |
45 | /* change init_mm */ |
46 | set_pte_atomic(kpte, pte); | |
5311ab62 | 47 | if (SHARED_KERNEL_PMD) |
1da177e4 LT |
48 | return; |
49 | ||
50 | spin_lock_irqsave(&pgd_lock, flags); | |
51 | for (page = pgd_list; page; page = (struct page *)page->index) { | |
52 | pgd_t *pgd; | |
53 | pud_t *pud; | |
54 | pmd_t *pmd; | |
9f4c815c | 55 | |
1da177e4 LT |
56 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
57 | pud = pud_offset(pgd, address); | |
58 | pmd = pmd_offset(pud, address); | |
59 | set_pte_atomic((pte_t *)pmd, pte); | |
60 | } | |
61 | spin_unlock_irqrestore(&pgd_lock, flags); | |
62 | } | |
63 | ||
7afe15b9 | 64 | static int split_large_page(pte_t *kpte, unsigned long address) |
bb5c2dbd | 65 | { |
7afe15b9 | 66 | pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte)); |
bb5c2dbd IM |
67 | unsigned long addr; |
68 | pte_t *pbase, *tmp; | |
69 | struct page *base; | |
7afe15b9 | 70 | int i, level; |
bb5c2dbd IM |
71 | |
72 | base = alloc_pages(GFP_KERNEL, 0); | |
73 | if (!base) | |
74 | return -ENOMEM; | |
75 | ||
76 | down_write(&init_mm.mmap_sem); | |
77 | /* | |
78 | * Check for races, another CPU might have split this page | |
79 | * up for us already: | |
80 | */ | |
81 | tmp = lookup_address(address, &level); | |
5508a748 IM |
82 | if (tmp != kpte) { |
83 | WARN_ON_ONCE(1); | |
bb5c2dbd | 84 | goto out_unlock; |
5508a748 | 85 | } |
bb5c2dbd IM |
86 | |
87 | address = __pa(address); | |
88 | addr = address & LARGE_PAGE_MASK; | |
89 | pbase = (pte_t *)page_address(base); | |
90 | paravirt_alloc_pt(&init_mm, page_to_pfn(base)); | |
91 | ||
92 | for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) | |
93 | set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot)); | |
94 | ||
95 | /* | |
96 | * Install the new, split up pagetable: | |
97 | */ | |
98 | set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); | |
99 | base = NULL; | |
100 | ||
101 | out_unlock: | |
102 | up_write(&init_mm.mmap_sem); | |
103 | ||
104 | if (base) | |
105 | __free_pages(base, 0); | |
106 | ||
107 | return 0; | |
108 | } | |
109 | ||
9f4c815c IM |
110 | static int __change_page_attr(struct page *page, pgprot_t prot) |
111 | { | |
1da177e4 | 112 | struct page *kpte_page; |
9f4c815c | 113 | unsigned long address; |
bb5c2dbd | 114 | int level, err = 0; |
9f4c815c | 115 | pte_t *kpte; |
1da177e4 LT |
116 | |
117 | BUG_ON(PageHighMem(page)); | |
118 | address = (unsigned long)page_address(page); | |
119 | ||
97f99fed | 120 | repeat: |
f0646e43 | 121 | kpte = lookup_address(address, &level); |
1da177e4 LT |
122 | if (!kpte) |
123 | return -EINVAL; | |
9f4c815c | 124 | |
1da177e4 | 125 | kpte_page = virt_to_page(kpte); |
65d2f0bc AK |
126 | BUG_ON(PageLRU(kpte_page)); |
127 | BUG_ON(PageCompound(kpte_page)); | |
128 | ||
1da177e4 | 129 | /* |
78c94aba IM |
130 | * Better fail early if someone sets the kernel text to NX. |
131 | * Does not cover __inittext | |
1da177e4 | 132 | */ |
78c94aba IM |
133 | BUG_ON(address >= (unsigned long)&_text && |
134 | address < (unsigned long)&_etext && | |
135 | (pgprot_val(prot) & _PAGE_NX)); | |
65d2f0bc | 136 | |
78c94aba | 137 | if (level == 3) { |
7afe15b9 | 138 | set_pte_atomic(kpte, mk_pte(page, canon_pgprot(prot))); |
78c94aba | 139 | } else { |
7afe15b9 | 140 | err = split_large_page(kpte, address); |
bb5c2dbd IM |
141 | if (!err) |
142 | goto repeat; | |
1da177e4 | 143 | } |
bb5c2dbd | 144 | return err; |
9f4c815c | 145 | } |
1da177e4 | 146 | |
1da177e4 LT |
147 | /* |
148 | * Change the page attributes of an page in the linear mapping. | |
149 | * | |
150 | * This should be used when a page is mapped with a different caching policy | |
151 | * than write-back somewhere - some CPUs do not like it when mappings with | |
152 | * different caching policies exist. This changes the page attributes of the | |
153 | * in kernel linear mapping too. | |
9f4c815c | 154 | * |
1da177e4 LT |
155 | * The caller needs to ensure that there are no conflicting mappings elsewhere. |
156 | * This function only deals with the kernel linear map. | |
9f4c815c | 157 | * |
1da177e4 LT |
158 | * Caller must call global_flush_tlb() after this. |
159 | */ | |
160 | int change_page_attr(struct page *page, int numpages, pgprot_t prot) | |
161 | { | |
9f4c815c | 162 | int err = 0, i; |
1da177e4 | 163 | |
9f4c815c | 164 | for (i = 0; i < numpages; i++, page++) { |
1da177e4 | 165 | err = __change_page_attr(page, prot); |
9f4c815c IM |
166 | if (err) |
167 | break; | |
168 | } | |
9f4c815c | 169 | |
1da177e4 LT |
170 | return err; |
171 | } | |
9f4c815c | 172 | EXPORT_SYMBOL(change_page_attr); |
1da177e4 | 173 | |
78c94aba | 174 | int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot) |
626ab0e6 | 175 | { |
78c94aba | 176 | int i; |
5508a748 | 177 | unsigned long pfn = (__pa(addr) >> PAGE_SHIFT); |
1da177e4 | 178 | |
78c94aba IM |
179 | for (i = 0; i < numpages; i++) { |
180 | if (!pfn_valid(pfn + i)) { | |
5508a748 | 181 | WARN_ON_ONCE(1); |
78c94aba IM |
182 | break; |
183 | } else { | |
184 | int level; | |
185 | pte_t *pte = lookup_address(addr + i*PAGE_SIZE, &level); | |
5508a748 | 186 | BUG_ON(pte && pte_none(*pte)); |
78c94aba IM |
187 | } |
188 | } | |
5508a748 | 189 | |
78c94aba IM |
190 | return change_page_attr(virt_to_page(addr), i, prot); |
191 | } | |
192 | ||
193 | static void flush_kernel_map(void *arg) | |
194 | { | |
195 | /* | |
196 | * Flush all to work around Errata in early athlons regarding | |
197 | * large page flushing. | |
198 | */ | |
199 | __flush_tlb_all(); | |
200 | ||
201 | if (boot_cpu_data.x86_model >= 4) | |
202 | wbinvd(); | |
203 | } | |
204 | ||
205 | void global_flush_tlb(void) | |
206 | { | |
1da177e4 LT |
207 | BUG_ON(irqs_disabled()); |
208 | ||
78c94aba | 209 | on_each_cpu(flush_kernel_map, NULL, 1, 1); |
626ab0e6 | 210 | } |
9f4c815c | 211 | EXPORT_SYMBOL(global_flush_tlb); |
1da177e4 LT |
212 | |
213 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
214 | void kernel_map_pages(struct page *page, int numpages, int enable) | |
215 | { | |
216 | if (PageHighMem(page)) | |
217 | return; | |
9f4c815c | 218 | if (!enable) { |
f9b8404c IM |
219 | debug_check_no_locks_freed(page_address(page), |
220 | numpages * PAGE_SIZE); | |
9f4c815c | 221 | } |
de5097c2 | 222 | |
9f4c815c IM |
223 | /* |
224 | * the return value is ignored - the calls cannot fail, | |
1da177e4 LT |
225 | * large pages are disabled at boot time. |
226 | */ | |
227 | change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); | |
9f4c815c IM |
228 | |
229 | /* | |
230 | * we should perform an IPI and flush all tlbs, | |
1da177e4 LT |
231 | * but that can deadlock->flush only current cpu. |
232 | */ | |
233 | __flush_tlb_all(); | |
234 | } | |
235 | #endif |