Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
14cf11af PM |
2 | /* |
3 | * MMU context allocation for 64-bit kernels. | |
4 | * | |
5 | * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org> | |
14cf11af PM |
6 | */ |
7 | ||
14cf11af PM |
8 | #include <linux/sched.h> |
9 | #include <linux/kernel.h> | |
10 | #include <linux/errno.h> | |
11 | #include <linux/string.h> | |
12 | #include <linux/types.h> | |
13 | #include <linux/mm.h> | |
4fb158f6 | 14 | #include <linux/pkeys.h> |
14cf11af PM |
15 | #include <linux/spinlock.h> |
16 | #include <linux/idr.h> | |
4b16f8e2 | 17 | #include <linux/export.h> |
5a0e3ad6 | 18 | #include <linux/gfp.h> |
851d2e2f | 19 | #include <linux/slab.h> |
01b0f0ea | 20 | #include <linux/cpu.h> |
14cf11af PM |
21 | |
22 | #include <asm/mmu_context.h> | |
5c1f6ee9 | 23 | #include <asm/pgalloc.h> |
14cf11af | 24 | |
ef1edbba ME |
25 | #include "internal.h" |
26 | ||
7317ac87 | 27 | static DEFINE_IDA(mmu_context_ida); |
14cf11af | 28 | |
c1ff840d | 29 | static int alloc_context_id(int min_id, int max_id) |
14cf11af | 30 | { |
b3fa6417 | 31 | return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL); |
e85a4710 | 32 | } |
a336f2f5 | 33 | |
387e220a | 34 | #ifdef CONFIG_PPC_64S_HASH_MMU |
c13f2b2b | 35 | void __init hash__reserve_context_id(int id) |
82228e36 | 36 | { |
b3fa6417 | 37 | int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL); |
82228e36 AK |
38 | |
39 | WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result); | |
40 | } | |
41 | ||
a336f2f5 ME |
42 | int hash__alloc_context_id(void) |
43 | { | |
e6f81a92 AK |
44 | unsigned long max; |
45 | ||
46 | if (mmu_has_feature(MMU_FTR_68_BIT_VA)) | |
47 | max = MAX_USER_CONTEXT; | |
48 | else | |
49 | max = MAX_USER_CONTEXT_65BIT_VA; | |
50 | ||
51 | return alloc_context_id(MIN_USER_CONTEXT, max); | |
a336f2f5 ME |
52 | } |
53 | EXPORT_SYMBOL_GPL(hash__alloc_context_id); | |
387e220a | 54 | #endif |
a336f2f5 | 55 | |
387e220a | 56 | #ifdef CONFIG_PPC_64S_HASH_MMU |
ca72d883 ME |
57 | static int realloc_context_ids(mm_context_t *ctx) |
58 | { | |
59 | int i, id; | |
60 | ||
61 | /* | |
62 | * id 0 (aka. ctx->id) is special, we always allocate a new one, even if | |
63 | * there wasn't one allocated previously (which happens in the exec | |
64 | * case where ctx is newly allocated). | |
65 | * | |
66 | * We have to be a bit careful here. We must keep the existing ids in | |
67 | * the array, so that we can test if they're non-zero to decide if we | |
68 | * need to allocate a new one. However in case of error we must free the | |
69 | * ids we've allocated but *not* any of the existing ones (or risk a | |
70 | * UAF). That's why we decrement i at the start of the error handling | |
71 | * loop, to skip the id that we just tested but couldn't reallocate. | |
72 | */ | |
73 | for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) { | |
74 | if (i == 0 || ctx->extended_id[i]) { | |
75 | id = hash__alloc_context_id(); | |
76 | if (id < 0) | |
77 | goto error; | |
78 | ||
79 | ctx->extended_id[i] = id; | |
80 | } | |
81 | } | |
82 | ||
83 | /* The caller expects us to return id */ | |
84 | return ctx->id; | |
85 | ||
86 | error: | |
87 | for (i--; i >= 0; i--) { | |
88 | if (ctx->extended_id[i]) | |
89 | ida_free(&mmu_context_ida, ctx->extended_id[i]); | |
90 | } | |
91 | ||
92 | return id; | |
93 | } | |
94 | ||
760573c1 ME |
95 | static int hash__init_new_context(struct mm_struct *mm) |
96 | { | |
97 | int index; | |
98 | ||
ef629cc5 AK |
99 | mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), |
100 | GFP_KERNEL); | |
65565a68 | 101 | if (!mm->context.hash_context) |
70110186 | 102 | return -ENOMEM; |
70110186 | 103 | |
760573c1 ME |
104 | /* |
105 | * The old code would re-promote on fork, we don't do that when using | |
106 | * slices as it could cause problem promoting slices that have been | |
107 | * forced down to 4K. | |
108 | * | |
109 | * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check | |
110 | * explicitly against context.id == 0. This ensures that we properly | |
111 | * initialize context slice details for newly allocated mm's (which will | |
112 | * have id == 0) and don't alter context slice inherited via fork (which | |
113 | * will have id != 0). | |
114 | * | |
115 | * We should not be calling init_new_context() on init_mm. Hence a | |
116 | * check against 0 is OK. | |
117 | */ | |
70110186 AK |
118 | if (mm->context.id == 0) { |
119 | memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context)); | |
1753dd18 | 120 | slice_init_new_context_exec(mm); |
70110186 AK |
121 | } else { |
122 | /* This is fork. Copy hash_context details from current->mm */ | |
123 | memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)); | |
ef629cc5 | 124 | #ifdef CONFIG_PPC_SUBPAGE_PROT |
7a7d744f | 125 | /* inherit subpage prot details if we have one. */ |
ef629cc5 AK |
126 | if (current->mm->context.hash_context->spt) { |
127 | mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table), | |
128 | GFP_KERNEL); | |
129 | if (!mm->context.hash_context->spt) { | |
ef629cc5 AK |
130 | kfree(mm->context.hash_context); |
131 | return -ENOMEM; | |
132 | } | |
133 | } | |
134 | #endif | |
65565a68 | 135 | } |
70110186 | 136 | |
ca72d883 | 137 | index = realloc_context_ids(&mm->context); |
65565a68 ME |
138 | if (index < 0) { |
139 | #ifdef CONFIG_PPC_SUBPAGE_PROT | |
140 | kfree(mm->context.hash_context->spt); | |
141 | #endif | |
142 | kfree(mm->context.hash_context); | |
ca72d883 | 143 | return index; |
70110186 | 144 | } |
760573c1 | 145 | |
4fb158f6 | 146 | pkey_mm_init(mm); |
760573c1 ME |
147 | return index; |
148 | } | |
149 | ||
425d3314 NP |
150 | void hash__setup_new_exec(void) |
151 | { | |
152 | slice_setup_new_exec(); | |
5434ae74 NP |
153 | |
154 | slb_setup_new_exec(); | |
425d3314 | 155 | } |
387e220a NP |
156 | #else |
157 | static inline int hash__init_new_context(struct mm_struct *mm) | |
158 | { | |
159 | BUILD_BUG(); | |
160 | return 0; | |
161 | } | |
162 | #endif | |
425d3314 | 163 | |
760573c1 | 164 | static int radix__init_new_context(struct mm_struct *mm) |
7e381c0f AK |
165 | { |
166 | unsigned long rts_field; | |
a25bd72b | 167 | int index, max_id; |
760573c1 | 168 | |
a25bd72b BH |
169 | max_id = (1 << mmu_pid_bits) - 1; |
170 | index = alloc_context_id(mmu_base_pid, max_id); | |
760573c1 ME |
171 | if (index < 0) |
172 | return index; | |
7e381c0f AK |
173 | |
174 | /* | |
175 | * set the process table entry, | |
176 | */ | |
b23d9c5b | 177 | rts_field = radix__get_tree_size(); |
7e381c0f | 178 | process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); |
760573c1 | 179 | |
3a6a0470 BH |
180 | /* |
181 | * Order the above store with subsequent update of the PID | |
182 | * register (at which point HW can start loading/caching | |
183 | * the entry) and the corresponding load by the MMU from | |
184 | * the L2 cache. | |
185 | */ | |
186 | asm volatile("ptesync;isync" : : : "memory"); | |
187 | ||
387e220a | 188 | #ifdef CONFIG_PPC_64S_HASH_MMU |
70110186 | 189 | mm->context.hash_context = NULL; |
387e220a | 190 | #endif |
1ab66d1f | 191 | |
760573c1 | 192 | return index; |
7e381c0f | 193 | } |
e85a4710 AG |
194 | |
195 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
196 | { | |
197 | int index; | |
198 | ||
760573c1 ME |
199 | if (radix_enabled()) |
200 | index = radix__init_new_context(mm); | |
201 | else | |
202 | index = hash__init_new_context(mm); | |
203 | ||
e85a4710 AG |
204 | if (index < 0) |
205 | return index; | |
206 | ||
9dfe5c53 | 207 | mm->context.id = index; |
14cf11af | 208 | |
5c1f6ee9 | 209 | mm->context.pte_frag = NULL; |
8a6c697b | 210 | mm->context.pmd_frag = NULL; |
15b244a8 | 211 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
88f54a35 | 212 | mm_iommu_init(mm); |
5c1f6ee9 | 213 | #endif |
a619e59c | 214 | atomic_set(&mm->context.active_cpus, 0); |
aff6f8cb | 215 | atomic_set(&mm->context.copros, 0); |
a619e59c | 216 | |
14cf11af PM |
217 | return 0; |
218 | } | |
219 | ||
e85a4710 | 220 | void __destroy_context(int context_id) |
14cf11af | 221 | { |
b3fa6417 | 222 | ida_free(&mmu_context_ida, context_id); |
e85a4710 AG |
223 | } |
224 | EXPORT_SYMBOL_GPL(__destroy_context); | |
14cf11af | 225 | |
f384796c AK |
226 | static void destroy_contexts(mm_context_t *ctx) |
227 | { | |
387e220a NP |
228 | if (radix_enabled()) { |
229 | ida_free(&mmu_context_ida, ctx->id); | |
230 | } else { | |
231 | #ifdef CONFIG_PPC_64S_HASH_MMU | |
232 | int index, context_id; | |
f384796c | 233 | |
387e220a NP |
234 | for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { |
235 | context_id = ctx->extended_id[index]; | |
236 | if (context_id) | |
237 | ida_free(&mmu_context_ida, context_id); | |
238 | } | |
239 | kfree(ctx->hash_context); | |
240 | #else | |
241 | BUILD_BUG(); // radix_enabled() should be constant true | |
242 | #endif | |
f384796c | 243 | } |
f384796c AK |
244 | } |
245 | ||
8a6c697b AK |
246 | static void pmd_frag_destroy(void *pmd_frag) |
247 | { | |
248 | int count; | |
249 | struct page *page; | |
250 | ||
251 | page = virt_to_page(pmd_frag); | |
252 | /* drop all the pending references */ | |
253 | count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT; | |
254 | /* We allow PTE_FRAG_NR fragments from a PTE page */ | |
4231aba0 | 255 | if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) { |
8a6c697b | 256 | pgtable_pmd_page_dtor(page); |
4231aba0 | 257 | __free_page(page); |
8a6c697b AK |
258 | } |
259 | } | |
260 | ||
34c604d2 | 261 | static void destroy_pagetable_cache(struct mm_struct *mm) |
8a6c697b AK |
262 | { |
263 | void *frag; | |
264 | ||
265 | frag = mm->context.pte_frag; | |
266 | if (frag) | |
267 | pte_frag_destroy(frag); | |
268 | ||
269 | frag = mm->context.pmd_frag; | |
270 | if (frag) | |
271 | pmd_frag_destroy(frag); | |
272 | return; | |
273 | } | |
274 | ||
e85a4710 AG |
275 | void destroy_context(struct mm_struct *mm) |
276 | { | |
15b244a8 | 277 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
4b6fad70 | 278 | WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list)); |
15b244a8 | 279 | #endif |
7aec584e AK |
280 | /* |
281 | * For tasks which were successfully initialized we end up calling | |
282 | * arch_exit_mmap() which clears the process table entry. And | |
283 | * arch_exit_mmap() is called before the required fullmm TLB flush | |
284 | * which does a RIC=2 flush. Hence for an initialized task, we do clear | |
285 | * any cached process table entries. | |
286 | * | |
287 | * The condition below handles the error case during task init. We have | |
288 | * set the process table entry early and if we fail a task | |
289 | * initialization, we need to ensure the process table entry is zeroed. | |
290 | * We need not worry about process table entry caches because the task | |
291 | * never ran with the PID value. | |
292 | */ | |
30b49ec7 | 293 | if (radix_enabled()) |
7aec584e | 294 | process_tb[mm->context.id].prtb0 = 0; |
30b49ec7 NP |
295 | else |
296 | subpage_prot_free(mm); | |
f384796c | 297 | destroy_contexts(&mm->context); |
30b49ec7 NP |
298 | mm->context.id = MMU_NO_CONTEXT; |
299 | } | |
300 | ||
301 | void arch_exit_mmap(struct mm_struct *mm) | |
302 | { | |
34c604d2 NP |
303 | destroy_pagetable_cache(mm); |
304 | ||
c6bb0b8d BH |
305 | if (radix_enabled()) { |
306 | /* | |
307 | * Radix doesn't have a valid bit in the process table | |
308 | * entries. However we know that at least P9 implementation | |
309 | * will avoid caching an entry with an invalid RTS field, | |
310 | * and 0 is invalid. So this will do. | |
30b49ec7 NP |
311 | * |
312 | * This runs before the "fullmm" tlb flush in exit_mmap, | |
313 | * which does a RIC=2 tlbie to clear the process table | |
314 | * entry. See the "fullmm" comments in tlb-radix.c. | |
315 | * | |
316 | * No barrier required here after the store because | |
317 | * this process will do the invalidate, which starts with | |
318 | * ptesync. | |
c6bb0b8d BH |
319 | */ |
320 | process_tb[mm->context.id].prtb0 = 0; | |
30b49ec7 | 321 | } |
14cf11af | 322 | } |
7e381c0f AK |
323 | |
324 | #ifdef CONFIG_PPC_RADIX_MMU | |
325 | void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) | |
326 | { | |
2bf1071a NP |
327 | mtspr(SPRN_PID, next->context.id); |
328 | isync(); | |
7e381c0f AK |
329 | } |
330 | #endif | |
01b0f0ea NP |
331 | |
332 | /** | |
333 | * cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined) | |
334 | * | |
335 | * This clears the CPU from mm_cpumask for all processes, and then flushes the | |
336 | * local TLB to ensure TLB coherency in case the CPU is onlined again. | |
337 | * | |
338 | * KVM guest translations are not necessarily flushed here. If KVM started | |
339 | * using mm_cpumask or the Linux APIs which do, this would have to be resolved. | |
340 | */ | |
341 | #ifdef CONFIG_HOTPLUG_CPU | |
342 | void cleanup_cpu_mmu_context(void) | |
343 | { | |
344 | int cpu = smp_processor_id(); | |
345 | ||
346 | clear_tasks_mm_cpumask(cpu); | |
347 | tlbiel_all(); | |
348 | } | |
349 | #endif |