Commit | Line | Data |
---|---|---|
4f76cd38 | 1 | #include <linux/mm.h> |
5a0e3ad6 | 2 | #include <linux/gfp.h> |
4f76cd38 | 3 | #include <asm/pgalloc.h> |
ee5aa8d3 | 4 | #include <asm/pgtable.h> |
4f76cd38 | 5 | #include <asm/tlb.h> |
a1d5a869 | 6 | #include <asm/fixmap.h> |
4f76cd38 | 7 | |
9e730237 VN |
8 | #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO |
9 | ||
14315592 IC |
10 | #ifdef CONFIG_HIGHPTE |
11 | #define PGALLOC_USER_GFP __GFP_HIGHMEM | |
12 | #else | |
13 | #define PGALLOC_USER_GFP 0 | |
14 | #endif | |
15 | ||
16 | gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP; | |
17 | ||
4f76cd38 JF |
18 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
19 | { | |
9e730237 | 20 | return (pte_t *)__get_free_page(PGALLOC_GFP); |
4f76cd38 JF |
21 | } |
22 | ||
23 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | |
24 | { | |
25 | struct page *pte; | |
26 | ||
14315592 | 27 | pte = alloc_pages(__userpte_alloc_gfp, 0); |
4f76cd38 JF |
28 | if (pte) |
29 | pgtable_page_ctor(pte); | |
30 | return pte; | |
31 | } | |
32 | ||
14315592 IC |
33 | static int __init setup_userpte(char *arg) |
34 | { | |
35 | if (!arg) | |
36 | return -EINVAL; | |
37 | ||
38 | /* | |
39 | * "userpte=nohigh" disables allocation of user pagetables in | |
40 | * high memory. | |
41 | */ | |
42 | if (strcmp(arg, "nohigh") == 0) | |
43 | __userpte_alloc_gfp &= ~__GFP_HIGHMEM; | |
44 | else | |
45 | return -EINVAL; | |
46 | return 0; | |
47 | } | |
48 | early_param("userpte", setup_userpte); | |
49 | ||
9e1b32ca | 50 | void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) |
397f687a JF |
51 | { |
52 | pgtable_page_dtor(pte); | |
6944a9c8 | 53 | paravirt_release_pte(page_to_pfn(pte)); |
397f687a JF |
54 | tlb_remove_page(tlb, pte); |
55 | } | |
56 | ||
170fdff7 | 57 | #if PAGETABLE_LEVELS > 2 |
9e1b32ca | 58 | void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) |
170fdff7 | 59 | { |
6944a9c8 | 60 | paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); |
170fdff7 JF |
61 | tlb_remove_page(tlb, virt_to_page(pmd)); |
62 | } | |
5a5f8f42 JF |
63 | |
64 | #if PAGETABLE_LEVELS > 3 | |
9e1b32ca | 65 | void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) |
5a5f8f42 | 66 | { |
2761fa09 | 67 | paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); |
5a5f8f42 JF |
68 | tlb_remove_page(tlb, virt_to_page(pud)); |
69 | } | |
70 | #endif /* PAGETABLE_LEVELS > 3 */ | |
170fdff7 JF |
71 | #endif /* PAGETABLE_LEVELS > 2 */ |
72 | ||
4f76cd38 JF |
73 | static inline void pgd_list_add(pgd_t *pgd) |
74 | { | |
75 | struct page *page = virt_to_page(pgd); | |
4f76cd38 | 76 | |
4f76cd38 | 77 | list_add(&page->lru, &pgd_list); |
4f76cd38 JF |
78 | } |
79 | ||
80 | static inline void pgd_list_del(pgd_t *pgd) | |
81 | { | |
82 | struct page *page = virt_to_page(pgd); | |
4f76cd38 | 83 | |
4f76cd38 | 84 | list_del(&page->lru); |
4f76cd38 JF |
85 | } |
86 | ||
4f76cd38 | 87 | #define UNSHARED_PTRS_PER_PGD \ |
68db065c | 88 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) |
4f76cd38 | 89 | |
617d34d9 JF |
90 | |
91 | static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) | |
92 | { | |
93 | BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); | |
94 | virt_to_page(pgd)->index = (pgoff_t)mm; | |
95 | } | |
96 | ||
97 | struct mm_struct *pgd_page_get_mm(struct page *page) | |
98 | { | |
99 | return (struct mm_struct *)page->index; | |
100 | } | |
101 | ||
102 | static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) | |
4f76cd38 | 103 | { |
4f76cd38 JF |
104 | /* If the pgd points to a shared pagetable level (either the |
105 | ptes in non-PAE, or shared PMD in PAE), then just copy the | |
106 | references from swapper_pg_dir. */ | |
107 | if (PAGETABLE_LEVELS == 2 || | |
85958b46 JF |
108 | (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || |
109 | PAGETABLE_LEVELS == 4) { | |
68db065c JF |
110 | clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, |
111 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | |
4f76cd38 | 112 | KERNEL_PGD_PTRS); |
4f76cd38 JF |
113 | } |
114 | ||
115 | /* list required to sync kernel mapping updates */ | |
617d34d9 JF |
116 | if (!SHARED_KERNEL_PMD) { |
117 | pgd_set_mm(pgd, mm); | |
4f76cd38 | 118 | pgd_list_add(pgd); |
617d34d9 | 119 | } |
4f76cd38 JF |
120 | } |
121 | ||
17b74627 | 122 | static void pgd_dtor(pgd_t *pgd) |
4f76cd38 | 123 | { |
4f76cd38 JF |
124 | if (SHARED_KERNEL_PMD) |
125 | return; | |
126 | ||
a79e53d8 | 127 | spin_lock(&pgd_lock); |
4f76cd38 | 128 | pgd_list_del(pgd); |
a79e53d8 | 129 | spin_unlock(&pgd_lock); |
4f76cd38 JF |
130 | } |
131 | ||
85958b46 JF |
132 | /* |
133 | * List of all pgd's needed for non-PAE so it can invalidate entries | |
134 | * in both cached and uncached pgd's; not needed for PAE since the | |
135 | * kernel pmd is shared. If PAE were not to share the pmd a similar | |
136 | * tactic would be needed. This is essentially codepath-based locking | |
137 | * against pageattr.c; it is the unique case in which a valid change | |
138 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | |
139 | * vmalloc faults work because attached pagetables are never freed. | |
140 | * -- wli | |
141 | */ | |
142 | ||
4f76cd38 | 143 | #ifdef CONFIG_X86_PAE |
d8d5900e JF |
144 | /* |
145 | * In PAE mode, we need to do a cr3 reload (=tlb flush) when | |
146 | * updating the top-level pagetable entries to guarantee the | |
147 | * processor notices the update. Since this is expensive, and | |
148 | * all 4 top-level entries are used almost immediately in a | |
149 | * new process's life, we just pre-populate them here. | |
150 | * | |
151 | * Also, if we're in a paravirt environment where the kernel pmd is | |
152 | * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate | |
153 | * and initialize the kernel pmds here. | |
154 | */ | |
155 | #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD | |
156 | ||
157 | void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) | |
158 | { | |
159 | paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); | |
160 | ||
161 | /* Note: almost everything apart from _PAGE_PRESENT is | |
162 | reserved at the pmd (PDPT) level. */ | |
163 | set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); | |
164 | ||
165 | /* | |
166 | * According to Intel App note "TLBs, Paging-Structure Caches, | |
167 | * and Their Invalidation", April 2007, document 317080-001, | |
168 | * section 8.1: in PAE mode we explicitly have to flush the | |
169 | * TLB via cr3 if the top-level pgd is changed... | |
170 | */ | |
4981d01e | 171 | flush_tlb_mm(mm); |
d8d5900e JF |
172 | } |
173 | #else /* !CONFIG_X86_PAE */ | |
174 | ||
175 | /* No need to prepopulate any pagetable entries in non-PAE modes. */ | |
176 | #define PREALLOCATED_PMDS 0 | |
177 | ||
178 | #endif /* CONFIG_X86_PAE */ | |
179 | ||
180 | static void free_pmds(pmd_t *pmds[]) | |
181 | { | |
182 | int i; | |
183 | ||
184 | for(i = 0; i < PREALLOCATED_PMDS; i++) | |
185 | if (pmds[i]) | |
186 | free_page((unsigned long)pmds[i]); | |
187 | } | |
188 | ||
189 | static int preallocate_pmds(pmd_t *pmds[]) | |
190 | { | |
191 | int i; | |
192 | bool failed = false; | |
193 | ||
194 | for(i = 0; i < PREALLOCATED_PMDS; i++) { | |
9e730237 | 195 | pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); |
d8d5900e JF |
196 | if (pmd == NULL) |
197 | failed = true; | |
198 | pmds[i] = pmd; | |
199 | } | |
200 | ||
201 | if (failed) { | |
202 | free_pmds(pmds); | |
203 | return -ENOMEM; | |
204 | } | |
205 | ||
206 | return 0; | |
207 | } | |
208 | ||
4f76cd38 JF |
209 | /* |
210 | * Mop up any pmd pages which may still be attached to the pgd. | |
211 | * Normally they will be freed by munmap/exit_mmap, but any pmd we | |
212 | * preallocate which never got a corresponding vma will need to be | |
213 | * freed manually. | |
214 | */ | |
215 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) | |
216 | { | |
217 | int i; | |
218 | ||
d8d5900e | 219 | for(i = 0; i < PREALLOCATED_PMDS; i++) { |
4f76cd38 JF |
220 | pgd_t pgd = pgdp[i]; |
221 | ||
222 | if (pgd_val(pgd) != 0) { | |
223 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); | |
224 | ||
225 | pgdp[i] = native_make_pgd(0); | |
226 | ||
6944a9c8 | 227 | paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); |
4f76cd38 JF |
228 | pmd_free(mm, pmd); |
229 | } | |
230 | } | |
231 | } | |
232 | ||
d8d5900e | 233 | static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) |
4f76cd38 JF |
234 | { |
235 | pud_t *pud; | |
236 | unsigned long addr; | |
237 | int i; | |
238 | ||
cf3e5050 JF |
239 | if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ |
240 | return; | |
241 | ||
4f76cd38 | 242 | pud = pud_offset(pgd, 0); |
4f76cd38 | 243 | |
d8d5900e JF |
244 | for (addr = i = 0; i < PREALLOCATED_PMDS; |
245 | i++, pud++, addr += PUD_SIZE) { | |
246 | pmd_t *pmd = pmds[i]; | |
4f76cd38 | 247 | |
68db065c | 248 | if (i >= KERNEL_PGD_BOUNDARY) |
4f76cd38 JF |
249 | memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), |
250 | sizeof(pmd_t) * PTRS_PER_PMD); | |
251 | ||
252 | pud_populate(mm, pud, pmd); | |
253 | } | |
4f76cd38 | 254 | } |
1ec1fe73 | 255 | |
d8d5900e | 256 | pgd_t *pgd_alloc(struct mm_struct *mm) |
1ec1fe73 | 257 | { |
d8d5900e JF |
258 | pgd_t *pgd; |
259 | pmd_t *pmds[PREALLOCATED_PMDS]; | |
1ec1fe73 | 260 | |
9e730237 | 261 | pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); |
d8d5900e JF |
262 | |
263 | if (pgd == NULL) | |
264 | goto out; | |
265 | ||
266 | mm->pgd = pgd; | |
267 | ||
268 | if (preallocate_pmds(pmds) != 0) | |
269 | goto out_free_pgd; | |
270 | ||
271 | if (paravirt_pgd_alloc(mm) != 0) | |
272 | goto out_free_pmds; | |
1ec1fe73 IM |
273 | |
274 | /* | |
d8d5900e JF |
275 | * Make sure that pre-populating the pmds is atomic with |
276 | * respect to anything walking the pgd_list, so that they | |
277 | * never see a partially populated pgd. | |
1ec1fe73 | 278 | */ |
a79e53d8 | 279 | spin_lock(&pgd_lock); |
4f76cd38 | 280 | |
617d34d9 | 281 | pgd_ctor(mm, pgd); |
d8d5900e | 282 | pgd_prepopulate_pmd(mm, pgd, pmds); |
4f76cd38 | 283 | |
a79e53d8 | 284 | spin_unlock(&pgd_lock); |
4f76cd38 JF |
285 | |
286 | return pgd; | |
d8d5900e JF |
287 | |
288 | out_free_pmds: | |
289 | free_pmds(pmds); | |
290 | out_free_pgd: | |
291 | free_page((unsigned long)pgd); | |
292 | out: | |
293 | return NULL; | |
4f76cd38 JF |
294 | } |
295 | ||
296 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
297 | { | |
298 | pgd_mop_up_pmds(mm, pgd); | |
299 | pgd_dtor(pgd); | |
eba0045f | 300 | paravirt_pgd_free(mm, pgd); |
4f76cd38 JF |
301 | free_page((unsigned long)pgd); |
302 | } | |
ee5aa8d3 JF |
303 | |
304 | int ptep_set_access_flags(struct vm_area_struct *vma, | |
305 | unsigned long address, pte_t *ptep, | |
306 | pte_t entry, int dirty) | |
307 | { | |
308 | int changed = !pte_same(*ptep, entry); | |
309 | ||
310 | if (changed && dirty) { | |
311 | *ptep = entry; | |
312 | pte_update_defer(vma->vm_mm, address, ptep); | |
313 | flush_tlb_page(vma, address); | |
314 | } | |
315 | ||
316 | return changed; | |
317 | } | |
f9fbf1a3 | 318 | |
db3eb96f AA |
319 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
320 | int pmdp_set_access_flags(struct vm_area_struct *vma, | |
321 | unsigned long address, pmd_t *pmdp, | |
322 | pmd_t entry, int dirty) | |
323 | { | |
324 | int changed = !pmd_same(*pmdp, entry); | |
325 | ||
326 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
327 | ||
328 | if (changed && dirty) { | |
329 | *pmdp = entry; | |
330 | pmd_update_defer(vma->vm_mm, address, pmdp); | |
331 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | |
332 | } | |
333 | ||
334 | return changed; | |
335 | } | |
336 | #endif | |
337 | ||
f9fbf1a3 JF |
338 | int ptep_test_and_clear_young(struct vm_area_struct *vma, |
339 | unsigned long addr, pte_t *ptep) | |
340 | { | |
341 | int ret = 0; | |
342 | ||
343 | if (pte_young(*ptep)) | |
344 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, | |
48e23957 | 345 | (unsigned long *) &ptep->pte); |
f9fbf1a3 JF |
346 | |
347 | if (ret) | |
348 | pte_update(vma->vm_mm, addr, ptep); | |
349 | ||
350 | return ret; | |
351 | } | |
c20311e1 | 352 | |
db3eb96f AA |
353 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
354 | int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
355 | unsigned long addr, pmd_t *pmdp) | |
356 | { | |
357 | int ret = 0; | |
358 | ||
359 | if (pmd_young(*pmdp)) | |
360 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, | |
f2d6bfe9 | 361 | (unsigned long *)pmdp); |
db3eb96f AA |
362 | |
363 | if (ret) | |
364 | pmd_update(vma->vm_mm, addr, pmdp); | |
365 | ||
366 | return ret; | |
367 | } | |
368 | #endif | |
369 | ||
c20311e1 JF |
370 | int ptep_clear_flush_young(struct vm_area_struct *vma, |
371 | unsigned long address, pte_t *ptep) | |
372 | { | |
373 | int young; | |
374 | ||
375 | young = ptep_test_and_clear_young(vma, address, ptep); | |
376 | if (young) | |
377 | flush_tlb_page(vma, address); | |
378 | ||
379 | return young; | |
380 | } | |
7c7e6e07 | 381 | |
db3eb96f AA |
382 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
383 | int pmdp_clear_flush_young(struct vm_area_struct *vma, | |
384 | unsigned long address, pmd_t *pmdp) | |
385 | { | |
386 | int young; | |
387 | ||
388 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
389 | ||
390 | young = pmdp_test_and_clear_young(vma, address, pmdp); | |
391 | if (young) | |
392 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | |
393 | ||
394 | return young; | |
395 | } | |
396 | ||
397 | void pmdp_splitting_flush(struct vm_area_struct *vma, | |
398 | unsigned long address, pmd_t *pmdp) | |
399 | { | |
400 | int set; | |
401 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
402 | set = !test_and_set_bit(_PAGE_BIT_SPLITTING, | |
f2d6bfe9 | 403 | (unsigned long *)pmdp); |
db3eb96f AA |
404 | if (set) { |
405 | pmd_update(vma->vm_mm, address, pmdp); | |
406 | /* need tlb flush only to serialize against gup-fast */ | |
407 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | |
408 | } | |
409 | } | |
410 | #endif | |
411 | ||
fd862dde GP |
412 | /** |
413 | * reserve_top_address - reserves a hole in the top of kernel address space | |
414 | * @reserve - size of hole to reserve | |
415 | * | |
416 | * Can be used to relocate the fixmap area and poke a hole in the top | |
417 | * of kernel address space to make room for a hypervisor. | |
418 | */ | |
419 | void __init reserve_top_address(unsigned long reserve) | |
420 | { | |
421 | #ifdef CONFIG_X86_32 | |
422 | BUG_ON(fixmaps_set > 0); | |
423 | printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", | |
424 | (int)-reserve); | |
425 | __FIXADDR_TOP = -reserve - PAGE_SIZE; | |
fd862dde GP |
426 | #endif |
427 | } | |
428 | ||
7c7e6e07 JF |
429 | int fixmaps_set; |
430 | ||
aeaaa59c | 431 | void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) |
7c7e6e07 JF |
432 | { |
433 | unsigned long address = __fix_to_virt(idx); | |
434 | ||
435 | if (idx >= __end_of_fixed_addresses) { | |
436 | BUG(); | |
437 | return; | |
438 | } | |
aeaaa59c | 439 | set_pte_vaddr(address, pte); |
7c7e6e07 JF |
440 | fixmaps_set++; |
441 | } | |
aeaaa59c | 442 | |
3b3809ac MH |
443 | void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, |
444 | pgprot_t flags) | |
aeaaa59c JF |
445 | { |
446 | __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); | |
447 | } |