Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
fa28237c PM |
2 | /* |
3 | * Copyright 2007-2008 Paul Mackerras, IBM Corp. | |
fa28237c PM |
4 | */ |
5 | ||
6 | #include <linux/errno.h> | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/gfp.h> | |
fa28237c PM |
9 | #include <linux/types.h> |
10 | #include <linux/mm.h> | |
11 | #include <linux/hugetlb.h> | |
3691d614 | 12 | #include <linux/syscalls.h> |
fa28237c PM |
13 | |
14 | #include <asm/pgtable.h> | |
7c0f6ba6 | 15 | #include <linux/uaccess.h> |
fa28237c PM |
16 | |
17 | /* | |
18 | * Free all pages allocated for subpage protection maps and pointers. | |
19 | * Also makes sure that the subpage_prot_table structure is | |
20 | * reinitialized for the next user. | |
21 | */ | |
d28513bc | 22 | void subpage_prot_free(struct mm_struct *mm) |
fa28237c | 23 | { |
60458fba | 24 | struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context); |
fa28237c PM |
25 | unsigned long i, j, addr; |
26 | u32 **p; | |
27 | ||
ef629cc5 AK |
28 | if (!spt) |
29 | return; | |
30 | ||
fa28237c PM |
31 | for (i = 0; i < 4; ++i) { |
32 | if (spt->low_prot[i]) { | |
33 | free_page((unsigned long)spt->low_prot[i]); | |
34 | spt->low_prot[i] = NULL; | |
35 | } | |
36 | } | |
37 | addr = 0; | |
0da12a7a | 38 | for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) { |
fa28237c PM |
39 | p = spt->protptrs[i]; |
40 | if (!p) | |
41 | continue; | |
42 | spt->protptrs[i] = NULL; | |
43 | for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr; | |
44 | ++j, addr += PAGE_SIZE) | |
45 | if (p[j]) | |
46 | free_page((unsigned long)p[j]); | |
47 | free_page((unsigned long)p); | |
48 | } | |
49 | spt->maxaddr = 0; | |
ef629cc5 | 50 | kfree(spt); |
d28513bc DG |
51 | } |
52 | ||
fa28237c PM |
53 | static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, |
54 | int npages) | |
55 | { | |
56 | pgd_t *pgd; | |
57 | pud_t *pud; | |
58 | pmd_t *pmd; | |
59 | pte_t *pte; | |
60 | spinlock_t *ptl; | |
61 | ||
62 | pgd = pgd_offset(mm, addr); | |
63 | if (pgd_none(*pgd)) | |
64 | return; | |
65 | pud = pud_offset(pgd, addr); | |
66 | if (pud_none(*pud)) | |
67 | return; | |
68 | pmd = pmd_offset(pud, addr); | |
69 | if (pmd_none(*pmd)) | |
70 | return; | |
71 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); | |
72 | arch_enter_lazy_mmu_mode(); | |
73 | for (; npages > 0; --npages) { | |
88247e8d | 74 | pte_update(mm, addr, pte, 0, 0, 0); |
fa28237c PM |
75 | addr += PAGE_SIZE; |
76 | ++pte; | |
77 | } | |
78 | arch_leave_lazy_mmu_mode(); | |
79 | pte_unmap_unlock(pte - 1, ptl); | |
80 | } | |
81 | ||
82 | /* | |
83 | * Clear the subpage protection map for an address range, allowing | |
84 | * all accesses that are allowed by the pte permissions. | |
85 | */ | |
86 | static void subpage_prot_clear(unsigned long addr, unsigned long len) | |
87 | { | |
88 | struct mm_struct *mm = current->mm; | |
2c474c03 | 89 | struct subpage_prot_table *spt; |
fa28237c | 90 | u32 **spm, *spp; |
6b5e7229 JM |
91 | unsigned long i; |
92 | size_t nw; | |
fa28237c PM |
93 | unsigned long next, limit; |
94 | ||
2c474c03 AK |
95 | down_write(&mm->mmap_sem); |
96 | ||
97 | spt = mm_ctx_subpage_prot(&mm->context); | |
ef629cc5 | 98 | if (!spt) |
2c474c03 | 99 | goto err_out; |
ef629cc5 | 100 | |
fa28237c PM |
101 | limit = addr + len; |
102 | if (limit > spt->maxaddr) | |
103 | limit = spt->maxaddr; | |
104 | for (; addr < limit; addr = next) { | |
105 | next = pmd_addr_end(addr, limit); | |
b0d436c7 | 106 | if (addr < 0x100000000UL) { |
fa28237c PM |
107 | spm = spt->low_prot; |
108 | } else { | |
109 | spm = spt->protptrs[addr >> SBP_L3_SHIFT]; | |
110 | if (!spm) | |
111 | continue; | |
112 | } | |
113 | spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)]; | |
114 | if (!spp) | |
115 | continue; | |
116 | spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); | |
117 | ||
118 | i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); | |
119 | nw = PTRS_PER_PTE - i; | |
120 | if (addr + (nw << PAGE_SHIFT) > next) | |
121 | nw = (next - addr) >> PAGE_SHIFT; | |
122 | ||
123 | memset(spp, 0, nw * sizeof(u32)); | |
124 | ||
125 | /* now flush any existing HPTEs for the range */ | |
126 | hpte_flush_range(mm, addr, nw); | |
127 | } | |
2c474c03 AK |
128 | |
129 | err_out: | |
fa28237c PM |
130 | up_write(&mm->mmap_sem); |
131 | } | |
132 | ||
d8e355a2 AK |
133 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
134 | static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr, | |
135 | unsigned long end, struct mm_walk *walk) | |
136 | { | |
1757bbd9 | 137 | struct vm_area_struct *vma = walk->vma; |
78ddc534 | 138 | split_huge_pmd(vma, pmd, addr); |
d8e355a2 AK |
139 | return 0; |
140 | } | |
141 | ||
142 | static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, | |
143 | unsigned long len) | |
144 | { | |
145 | struct vm_area_struct *vma; | |
146 | struct mm_walk subpage_proto_walk = { | |
147 | .mm = mm, | |
148 | .pmd_entry = subpage_walk_pmd_entry, | |
149 | }; | |
150 | ||
151 | /* | |
152 | * We don't try too hard, we just mark all the vma in that range | |
153 | * VM_NOHUGEPAGE and split them. | |
154 | */ | |
155 | vma = find_vma(mm, addr); | |
156 | /* | |
157 | * If the range is in unmapped range, just return | |
158 | */ | |
159 | if (vma && ((addr + len) <= vma->vm_start)) | |
160 | return; | |
161 | ||
162 | while (vma) { | |
163 | if (vma->vm_start >= (addr + len)) | |
164 | break; | |
165 | vma->vm_flags |= VM_NOHUGEPAGE; | |
1757bbd9 | 166 | walk_page_vma(vma, &subpage_proto_walk); |
d8e355a2 AK |
167 | vma = vma->vm_next; |
168 | } | |
169 | } | |
170 | #else | |
171 | static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, | |
172 | unsigned long len) | |
173 | { | |
174 | return; | |
175 | } | |
176 | #endif | |
177 | ||
fa28237c PM |
178 | /* |
179 | * Copy in a subpage protection map for an address range. | |
180 | * The map has 2 bits per 4k subpage, so 32 bits per 64k page. | |
181 | * Each 2-bit field is 0 to allow any access, 1 to prevent writes, | |
182 | * 2 or 3 to prevent all accesses. | |
183 | * Note that the normal page protections also apply; the subpage | |
184 | * protection mechanism is an additional constraint, so putting 0 | |
185 | * in a 2-bit field won't allow writes to a page that is otherwise | |
186 | * write-protected. | |
187 | */ | |
3691d614 AV |
188 | SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, |
189 | unsigned long, len, u32 __user *, map) | |
fa28237c PM |
190 | { |
191 | struct mm_struct *mm = current->mm; | |
2c474c03 | 192 | struct subpage_prot_table *spt; |
fa28237c | 193 | u32 **spm, *spp; |
6b5e7229 JM |
194 | unsigned long i; |
195 | size_t nw; | |
fa28237c PM |
196 | unsigned long next, limit; |
197 | int err; | |
198 | ||
5b2b8071 AK |
199 | if (radix_enabled()) |
200 | return -ENOENT; | |
201 | ||
fa28237c PM |
202 | /* Check parameters */ |
203 | if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) || | |
be77e999 AK |
204 | addr >= mm->task_size || len >= mm->task_size || |
205 | addr + len > mm->task_size) | |
fa28237c PM |
206 | return -EINVAL; |
207 | ||
208 | if (is_hugepage_only_range(mm, addr, len)) | |
209 | return -EINVAL; | |
210 | ||
211 | if (!map) { | |
212 | /* Clear out the protection map for the address range */ | |
213 | subpage_prot_clear(addr, len); | |
214 | return 0; | |
215 | } | |
216 | ||
96d4f267 | 217 | if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32))) |
fa28237c PM |
218 | return -EFAULT; |
219 | ||
220 | down_write(&mm->mmap_sem); | |
ef629cc5 | 221 | |
2c474c03 | 222 | spt = mm_ctx_subpage_prot(&mm->context); |
ef629cc5 AK |
223 | if (!spt) { |
224 | /* | |
225 | * Allocate subpage prot table if not already done. | |
226 | * Do this with mmap_sem held | |
227 | */ | |
228 | spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL); | |
229 | if (!spt) { | |
230 | err = -ENOMEM; | |
231 | goto out; | |
232 | } | |
233 | mm->context.hash_context->spt = spt; | |
234 | } | |
235 | ||
d8e355a2 | 236 | subpage_mark_vma_nohuge(mm, addr, len); |
fa28237c PM |
237 | for (limit = addr + len; addr < limit; addr = next) { |
238 | next = pmd_addr_end(addr, limit); | |
239 | err = -ENOMEM; | |
b0d436c7 | 240 | if (addr < 0x100000000UL) { |
fa28237c PM |
241 | spm = spt->low_prot; |
242 | } else { | |
243 | spm = spt->protptrs[addr >> SBP_L3_SHIFT]; | |
244 | if (!spm) { | |
245 | spm = (u32 **)get_zeroed_page(GFP_KERNEL); | |
246 | if (!spm) | |
247 | goto out; | |
248 | spt->protptrs[addr >> SBP_L3_SHIFT] = spm; | |
249 | } | |
250 | } | |
251 | spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1); | |
252 | spp = *spm; | |
253 | if (!spp) { | |
254 | spp = (u32 *)get_zeroed_page(GFP_KERNEL); | |
255 | if (!spp) | |
256 | goto out; | |
257 | *spm = spp; | |
258 | } | |
259 | spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); | |
260 | ||
261 | local_irq_disable(); | |
262 | demote_segment_4k(mm, addr); | |
263 | local_irq_enable(); | |
264 | ||
265 | i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); | |
266 | nw = PTRS_PER_PTE - i; | |
267 | if (addr + (nw << PAGE_SHIFT) > next) | |
268 | nw = (next - addr) >> PAGE_SHIFT; | |
269 | ||
270 | up_write(&mm->mmap_sem); | |
fa28237c | 271 | if (__copy_from_user(spp, map, nw * sizeof(u32))) |
a967f161 | 272 | return -EFAULT; |
fa28237c PM |
273 | map += nw; |
274 | down_write(&mm->mmap_sem); | |
275 | ||
276 | /* now flush any existing HPTEs for the range */ | |
277 | hpte_flush_range(mm, addr, nw); | |
278 | } | |
279 | if (limit > spt->maxaddr) | |
280 | spt->maxaddr = limit; | |
281 | err = 0; | |
282 | out: | |
283 | up_write(&mm->mmap_sem); | |
fa28237c PM |
284 | return err; |
285 | } |