Commit | Line | Data |
---|---|---|
eee24b5a AK |
1 | /* |
2 | * Copyright 2005, Paul Mackerras, IBM Corporation. | |
3 | * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation. | |
4 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #include <linux/sched.h> | |
589ee628 | 13 | #include <linux/mm_types.h> |
cd65d697 | 14 | #include <linux/mm.h> |
589ee628 | 15 | |
eee24b5a | 16 | #include <asm/pgalloc.h> |
cd65d697 BS |
17 | #include <asm/pgtable.h> |
18 | #include <asm/sections.h> | |
19 | #include <asm/mmu.h> | |
eee24b5a AK |
20 | #include <asm/tlb.h> |
21 | ||
22 | #include "mmu_decl.h" | |
23 | ||
6a1ea362 AK |
24 | #define CREATE_TRACE_POINTS |
25 | #include <trace/events/thp.h> | |
26 | ||
c2b4d8b7 AK |
27 | #if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE)) |
28 | #warning Limited user VSID range means pagetable space is wasted | |
29 | #endif | |
30 | ||
eee24b5a | 31 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
b0f36c10 AK |
32 | /* |
33 | * vmemmap is the starting address of the virtual address space where | |
34 | * struct pages are allocated for all possible PFNs present on the system | |
35 | * including holes and bad memory (hence sparse). These virtual struct | |
36 | * pages are stored in sequence in this virtual address space irrespective | |
37 | * of the fact whether the corresponding PFN is valid or not. This achieves | |
38 | * constant relationship between address of struct page and its PFN. | |
39 | * | |
40 | * During boot or memory hotplug operation when a new memory section is | |
41 | * added, physical memory allocation (including hash table bolting) will | |
42 | * be performed for the set of struct pages which are part of the memory | |
43 | * section. This saves memory by not allocating struct pages for PFNs | |
44 | * which are not valid. | |
45 | * | |
46 | * ---------------------------------------------- | |
47 | * | PHYSICAL ALLOCATION OF VIRTUAL STRUCT PAGES| | |
48 | * ---------------------------------------------- | |
49 | * | |
50 | * f000000000000000 c000000000000000 | |
51 | * vmemmap +--------------+ +--------------+ | |
52 | * + | page struct | +--------------> | page struct | | |
53 | * | +--------------+ +--------------+ | |
54 | * | | page struct | +--------------> | page struct | | |
55 | * | +--------------+ | +--------------+ | |
56 | * | | page struct | + +------> | page struct | | |
57 | * | +--------------+ | +--------------+ | |
58 | * | | page struct | | +--> | page struct | | |
59 | * | +--------------+ | | +--------------+ | |
60 | * | | page struct | | | | |
61 | * | +--------------+ | | | |
62 | * | | page struct | | | | |
63 | * | +--------------+ | | | |
64 | * | | page struct | | | | |
65 | * | +--------------+ | | | |
66 | * | | page struct | | | | |
67 | * | +--------------+ | | | |
68 | * | | page struct | +-------+ | | |
69 | * | +--------------+ | | |
70 | * | | page struct | +-----------+ | |
71 | * | +--------------+ | |
72 | * | | page struct | No mapping | |
73 | * | +--------------+ | |
74 | * | | page struct | No mapping | |
75 | * v +--------------+ | |
76 | * | |
77 | * ----------------------------------------- | |
78 | * | RELATION BETWEEN STRUCT PAGES AND PFNS| | |
79 | * ----------------------------------------- | |
80 | * | |
81 | * vmemmap +--------------+ +---------------+ | |
82 | * + | page struct | +-------------> | PFN | | |
83 | * | +--------------+ +---------------+ | |
84 | * | | page struct | +-------------> | PFN | | |
85 | * | +--------------+ +---------------+ | |
86 | * | | page struct | +-------------> | PFN | | |
87 | * | +--------------+ +---------------+ | |
88 | * | | page struct | +-------------> | PFN | | |
89 | * | +--------------+ +---------------+ | |
90 | * | | | | |
91 | * | +--------------+ | |
92 | * | | | | |
93 | * | +--------------+ | |
94 | * | | | | |
95 | * | +--------------+ +---------------+ | |
96 | * | | page struct | +-------------> | PFN | | |
97 | * | +--------------+ +---------------+ | |
98 | * | | | | |
99 | * | +--------------+ | |
100 | * | | | | |
101 | * | +--------------+ +---------------+ | |
102 | * | | page struct | +-------------> | PFN | | |
103 | * | +--------------+ +---------------+ | |
104 | * | | page struct | +-------------> | PFN | | |
105 | * v +--------------+ +---------------+ | |
106 | */ | |
eee24b5a AK |
107 | /* |
108 | * On hash-based CPUs, the vmemmap is bolted in the hash table. | |
109 | * | |
110 | */ | |
31a14fae AK |
111 | int __meminit hash__vmemmap_create_mapping(unsigned long start, |
112 | unsigned long page_size, | |
113 | unsigned long phys) | |
eee24b5a AK |
114 | { |
115 | int rc = htab_bolt_mapping(start, start + page_size, phys, | |
116 | pgprot_val(PAGE_KERNEL), | |
117 | mmu_vmemmap_psize, mmu_kernel_ssize); | |
118 | if (rc < 0) { | |
119 | int rc2 = htab_remove_mapping(start, start + page_size, | |
120 | mmu_vmemmap_psize, | |
121 | mmu_kernel_ssize); | |
122 | BUG_ON(rc2 && (rc2 != -ENOENT)); | |
123 | } | |
124 | return rc; | |
125 | } | |
126 | ||
127 | #ifdef CONFIG_MEMORY_HOTPLUG | |
31a14fae AK |
128 | void hash__vmemmap_remove_mapping(unsigned long start, |
129 | unsigned long page_size) | |
eee24b5a AK |
130 | { |
131 | int rc = htab_remove_mapping(start, start + page_size, | |
132 | mmu_vmemmap_psize, | |
133 | mmu_kernel_ssize); | |
134 | BUG_ON((rc < 0) && (rc != -ENOENT)); | |
135 | WARN_ON(rc == -ENOENT); | |
136 | } | |
137 | #endif | |
138 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | |
139 | ||
140 | /* | |
141 | * map_kernel_page currently only called by __ioremap | |
142 | * map_kernel_page adds an entry to the ioremap page table | |
143 | * and adds an entry to the HPT, possibly bolting it | |
144 | */ | |
31a14fae | 145 | int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) |
eee24b5a AK |
146 | { |
147 | pgd_t *pgdp; | |
148 | pud_t *pudp; | |
149 | pmd_t *pmdp; | |
150 | pte_t *ptep; | |
151 | ||
dd1842a2 | 152 | BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE); |
eee24b5a AK |
153 | if (slab_is_available()) { |
154 | pgdp = pgd_offset_k(ea); | |
155 | pudp = pud_alloc(&init_mm, pgdp, ea); | |
156 | if (!pudp) | |
157 | return -ENOMEM; | |
158 | pmdp = pmd_alloc(&init_mm, pudp, ea); | |
159 | if (!pmdp) | |
160 | return -ENOMEM; | |
161 | ptep = pte_alloc_kernel(pmdp, ea); | |
162 | if (!ptep) | |
163 | return -ENOMEM; | |
164 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | |
165 | __pgprot(flags))); | |
166 | } else { | |
167 | /* | |
168 | * If the mm subsystem is not fully up, we cannot create a | |
169 | * linux page table entry for this mapping. Simply bolt an | |
170 | * entry in the hardware page table. | |
171 | * | |
172 | */ | |
173 | if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, | |
174 | mmu_io_psize, mmu_kernel_ssize)) { | |
175 | printk(KERN_ERR "Failed to do bolted mapping IO " | |
176 | "memory at %016lx !\n", pa); | |
177 | return -ENOMEM; | |
178 | } | |
179 | } | |
180 | ||
181 | smp_wmb(); | |
182 | return 0; | |
183 | } | |
6a1ea362 AK |
184 | |
185 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
186 | ||
3df33f12 AK |
187 | unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, |
188 | pmd_t *pmdp, unsigned long clr, | |
189 | unsigned long set) | |
6a1ea362 AK |
190 | { |
191 | __be64 old_be, tmp; | |
192 | unsigned long old; | |
193 | ||
194 | #ifdef CONFIG_DEBUG_VM | |
ebd31197 | 195 | WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); |
af60a4cf | 196 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
6a1ea362 AK |
197 | #endif |
198 | ||
199 | __asm__ __volatile__( | |
200 | "1: ldarx %0,0,%3\n\ | |
201 | and. %1,%0,%6\n\ | |
202 | bne- 1b \n\ | |
203 | andc %1,%0,%4 \n\ | |
204 | or %1,%1,%7\n\ | |
205 | stdcx. %1,0,%3 \n\ | |
206 | bne- 1b" | |
207 | : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp) | |
208 | : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp), | |
209 | "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set)) | |
210 | : "cc" ); | |
211 | ||
212 | old = be64_to_cpu(old_be); | |
213 | ||
214 | trace_hugepage_update(addr, old, clr, set); | |
215 | if (old & H_PAGE_HASHPTE) | |
216 | hpte_do_hugepage_flush(mm, addr, pmdp, old); | |
217 | return old; | |
218 | } | |
219 | ||
3df33f12 AK |
220 | pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, |
221 | pmd_t *pmdp) | |
6a1ea362 AK |
222 | { |
223 | pmd_t pmd; | |
224 | ||
225 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
226 | VM_BUG_ON(pmd_trans_huge(*pmdp)); | |
ebd31197 | 227 | VM_BUG_ON(pmd_devmap(*pmdp)); |
6a1ea362 AK |
228 | |
229 | pmd = *pmdp; | |
230 | pmd_clear(pmdp); | |
231 | /* | |
232 | * Wait for all pending hash_page to finish. This is needed | |
233 | * in case of subpage collapse. When we collapse normal pages | |
234 | * to hugepage, we first clear the pmd, then invalidate all | |
235 | * the PTE entries. The assumption here is that any low level | |
236 | * page fault will see a none pmd and take the slow path that | |
237 | * will wait on mmap_sem. But we could very well be in a | |
238 | * hash_page with local ptep pointer value. Such a hash page | |
239 | * can result in adding new HPTE entries for normal subpages. | |
240 | * That means we could be modifying the page content as we | |
241 | * copy them to a huge page. So wait for parallel hash_page | |
242 | * to finish before invalidating HPTE entries. We can do this | |
243 | * by sending an IPI to all the cpus and executing a dummy | |
244 | * function there. | |
245 | */ | |
fa4531f7 | 246 | serialize_against_pte_lookup(vma->vm_mm); |
6a1ea362 AK |
247 | /* |
248 | * Now invalidate the hpte entries in the range | |
249 | * covered by pmd. This make sure we take a | |
250 | * fault and will find the pmd as none, which will | |
251 | * result in a major fault which takes mmap_sem and | |
252 | * hence wait for collapse to complete. Without this | |
253 | * the __collapse_huge_page_copy can result in copying | |
254 | * the old content. | |
255 | */ | |
256 | flush_tlb_pmd_range(vma->vm_mm, &pmd, address); | |
257 | return pmd; | |
258 | } | |
259 | ||
6a1ea362 AK |
260 | /* |
261 | * We want to put the pgtable in pmd and use pgtable for tracking | |
262 | * the base page size hptes | |
263 | */ | |
3df33f12 AK |
264 | void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
265 | pgtable_t pgtable) | |
6a1ea362 AK |
266 | { |
267 | pgtable_t *pgtable_slot; | |
af60a4cf AK |
268 | |
269 | assert_spin_locked(pmd_lockptr(mm, pmdp)); | |
6a1ea362 AK |
270 | /* |
271 | * we store the pgtable in the second half of PMD | |
272 | */ | |
273 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; | |
274 | *pgtable_slot = pgtable; | |
275 | /* | |
276 | * expose the deposited pgtable to other cpus. | |
277 | * before we set the hugepage PTE at pmd level | |
278 | * hash fault code looks at the deposted pgtable | |
279 | * to store hash index values. | |
280 | */ | |
281 | smp_wmb(); | |
282 | } | |
283 | ||
3df33f12 | 284 | pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
6a1ea362 AK |
285 | { |
286 | pgtable_t pgtable; | |
287 | pgtable_t *pgtable_slot; | |
288 | ||
af60a4cf AK |
289 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
290 | ||
6a1ea362 AK |
291 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; |
292 | pgtable = *pgtable_slot; | |
293 | /* | |
294 | * Once we withdraw, mark the entry NULL. | |
295 | */ | |
296 | *pgtable_slot = NULL; | |
297 | /* | |
298 | * We store HPTE information in the deposited PTE fragment. | |
299 | * zero out the content on withdraw. | |
300 | */ | |
301 | memset(pgtable, 0, PTE_FRAG_SIZE); | |
302 | return pgtable; | |
303 | } | |
304 | ||
6a1ea362 AK |
305 | /* |
306 | * A linux hugepage PMD was changed and the corresponding hash table entries | |
307 | * neesd to be flushed. | |
308 | */ | |
309 | void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, | |
310 | pmd_t *pmdp, unsigned long old_pmd) | |
311 | { | |
312 | int ssize; | |
313 | unsigned int psize; | |
314 | unsigned long vsid; | |
315 | unsigned long flags = 0; | |
6a1ea362 AK |
316 | |
317 | /* get the base page size,vsid and segment size */ | |
318 | #ifdef CONFIG_DEBUG_VM | |
319 | psize = get_slice_psize(mm, addr); | |
320 | BUG_ON(psize == MMU_PAGE_16M); | |
321 | #endif | |
322 | if (old_pmd & H_PAGE_COMBO) | |
323 | psize = MMU_PAGE_4K; | |
324 | else | |
325 | psize = MMU_PAGE_64K; | |
326 | ||
327 | if (!is_kernel_addr(addr)) { | |
328 | ssize = user_segment_size(addr); | |
f384796c | 329 | vsid = get_user_vsid(&mm->context, addr, ssize); |
6a1ea362 AK |
330 | WARN_ON(vsid == 0); |
331 | } else { | |
332 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); | |
333 | ssize = mmu_kernel_ssize; | |
334 | } | |
335 | ||
b426e4bd | 336 | if (mm_is_thread_local(mm)) |
6a1ea362 AK |
337 | flags |= HPTE_LOCAL_UPDATE; |
338 | ||
339 | return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); | |
340 | } | |
341 | ||
3df33f12 AK |
342 | pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, |
343 | unsigned long addr, pmd_t *pmdp) | |
6a1ea362 AK |
344 | { |
345 | pmd_t old_pmd; | |
346 | pgtable_t pgtable; | |
347 | unsigned long old; | |
348 | pgtable_t *pgtable_slot; | |
349 | ||
350 | old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); | |
351 | old_pmd = __pmd(old); | |
352 | /* | |
353 | * We have pmd == none and we are holding page_table_lock. | |
354 | * So we can safely go and clear the pgtable hash | |
355 | * index info. | |
356 | */ | |
357 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; | |
358 | pgtable = *pgtable_slot; | |
359 | /* | |
360 | * Let's zero out old valid and hash index details | |
361 | * hash fault look at them. | |
362 | */ | |
363 | memset(pgtable, 0, PTE_FRAG_SIZE); | |
364 | /* | |
fa4531f7 | 365 | * Serialize against find_current_mm_pte variants which does lock-less |
6a1ea362 AK |
366 | * lookup in page tables with local interrupts disabled. For huge pages |
367 | * it casts pmd_t to pte_t. Since format of pte_t is different from | |
368 | * pmd_t we want to prevent transit from pmd pointing to page table | |
369 | * to pmd pointing to huge page (and back) while interrupts are disabled. | |
370 | * We clear pmd to possibly replace it with page table pointer in | |
371 | * different code paths. So make sure we wait for the parallel | |
fa4531f7 | 372 | * find_curren_mm_pte to finish. |
6a1ea362 | 373 | */ |
fa4531f7 | 374 | serialize_against_pte_lookup(mm); |
6a1ea362 AK |
375 | return old_pmd; |
376 | } | |
377 | ||
3df33f12 | 378 | int hash__has_transparent_hugepage(void) |
6a1ea362 AK |
379 | { |
380 | ||
381 | if (!mmu_has_feature(MMU_FTR_16M_PAGE)) | |
382 | return 0; | |
383 | /* | |
384 | * We support THP only if PMD_SIZE is 16MB. | |
385 | */ | |
386 | if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT) | |
387 | return 0; | |
388 | /* | |
389 | * We need to make sure that we support 16MB hugepage in a segement | |
390 | * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE | |
391 | * of 64K. | |
392 | */ | |
393 | /* | |
394 | * If we have 64K HPTE, we will be using that by default | |
395 | */ | |
396 | if (mmu_psize_defs[MMU_PAGE_64K].shift && | |
397 | (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1)) | |
398 | return 0; | |
399 | /* | |
400 | * Ok we only have 4K HPTE | |
401 | */ | |
402 | if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1) | |
403 | return 0; | |
404 | ||
405 | return 1; | |
406 | } | |
407 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
cd65d697 BS |
408 | |
409 | #ifdef CONFIG_STRICT_KERNEL_RWX | |
fa7f9189 ME |
410 | static bool hash__change_memory_range(unsigned long start, unsigned long end, |
411 | unsigned long newpp) | |
cd65d697 | 412 | { |
cd65d697 BS |
413 | unsigned long idx; |
414 | unsigned int step, shift; | |
cd65d697 BS |
415 | |
416 | shift = mmu_psize_defs[mmu_linear_psize].shift; | |
417 | step = 1 << shift; | |
418 | ||
fa7f9189 ME |
419 | start = ALIGN_DOWN(start, step); |
420 | end = ALIGN(end, step); // aligns up | |
cd65d697 | 421 | |
fa7f9189 ME |
422 | if (start >= end) |
423 | return false; | |
cd65d697 | 424 | |
fa7f9189 ME |
425 | pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n", |
426 | start, end, newpp, step); | |
cd65d697 BS |
427 | |
428 | for (idx = start; idx < end; idx += step) | |
429 | /* Not sure if we can do much with the return value */ | |
430 | mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize, | |
431 | mmu_kernel_ssize); | |
432 | ||
fa7f9189 ME |
433 | return true; |
434 | } | |
435 | ||
436 | void hash__mark_rodata_ro(void) | |
437 | { | |
438 | unsigned long start, end; | |
439 | ||
440 | start = (unsigned long)_stext; | |
441 | end = (unsigned long)__init_begin; | |
442 | ||
443 | WARN_ON(!hash__change_memory_range(start, end, PP_RXXX)); | |
cd65d697 | 444 | } |
029d9252 ME |
445 | |
446 | void hash__mark_initmem_nx(void) | |
447 | { | |
448 | unsigned long start, end, pp; | |
449 | ||
450 | start = (unsigned long)__init_begin; | |
451 | end = (unsigned long)__init_end; | |
452 | ||
453 | pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL)); | |
454 | ||
455 | WARN_ON(!hash__change_memory_range(start, end, pp)); | |
456 | } | |
cd65d697 | 457 | #endif |