Commit | Line | Data |
---|---|---|
14cf11af PM |
1 | /* |
2 | * MMU context allocation for 64-bit kernels. | |
3 | * | |
4 | * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | */ | |
12 | ||
14cf11af PM |
13 | #include <linux/sched.h> |
14 | #include <linux/kernel.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/string.h> | |
17 | #include <linux/types.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/spinlock.h> | |
20 | #include <linux/idr.h> | |
4b16f8e2 | 21 | #include <linux/export.h> |
5a0e3ad6 | 22 | #include <linux/gfp.h> |
851d2e2f | 23 | #include <linux/slab.h> |
14cf11af PM |
24 | |
25 | #include <asm/mmu_context.h> | |
5c1f6ee9 | 26 | #include <asm/pgalloc.h> |
14cf11af | 27 | |
9d670280 | 28 | #include "icswx.h" |
851d2e2f | 29 | |
14cf11af | 30 | static DEFINE_SPINLOCK(mmu_context_lock); |
7317ac87 | 31 | static DEFINE_IDA(mmu_context_ida); |
14cf11af | 32 | |
c1ff840d | 33 | static int alloc_context_id(int min_id, int max_id) |
14cf11af | 34 | { |
c1ff840d | 35 | int index, err; |
14cf11af PM |
36 | |
37 | again: | |
7317ac87 | 38 | if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) |
14cf11af PM |
39 | return -ENOMEM; |
40 | ||
41 | spin_lock(&mmu_context_lock); | |
c1ff840d | 42 | err = ida_get_new_above(&mmu_context_ida, min_id, &index); |
14cf11af PM |
43 | spin_unlock(&mmu_context_lock); |
44 | ||
45 | if (err == -EAGAIN) | |
46 | goto again; | |
47 | else if (err) | |
48 | return err; | |
49 | ||
c1ff840d | 50 | if (index > max_id) { |
f86c9747 | 51 | spin_lock(&mmu_context_lock); |
7317ac87 | 52 | ida_remove(&mmu_context_ida, index); |
f86c9747 | 53 | spin_unlock(&mmu_context_lock); |
14cf11af PM |
54 | return -ENOMEM; |
55 | } | |
56 | ||
e85a4710 AG |
57 | return index; |
58 | } | |
a336f2f5 | 59 | |
82228e36 AK |
60 | void hash__reserve_context_id(int id) |
61 | { | |
62 | int rc, result = 0; | |
63 | ||
64 | do { | |
65 | if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) | |
66 | break; | |
67 | ||
68 | spin_lock(&mmu_context_lock); | |
69 | rc = ida_get_new_above(&mmu_context_ida, id, &result); | |
70 | spin_unlock(&mmu_context_lock); | |
71 | } while (rc == -EAGAIN); | |
72 | ||
73 | WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result); | |
74 | } | |
75 | ||
a336f2f5 ME |
76 | int hash__alloc_context_id(void) |
77 | { | |
e6f81a92 AK |
78 | unsigned long max; |
79 | ||
80 | if (mmu_has_feature(MMU_FTR_68_BIT_VA)) | |
81 | max = MAX_USER_CONTEXT; | |
82 | else | |
83 | max = MAX_USER_CONTEXT_65BIT_VA; | |
84 | ||
85 | return alloc_context_id(MIN_USER_CONTEXT, max); | |
a336f2f5 ME |
86 | } |
87 | EXPORT_SYMBOL_GPL(hash__alloc_context_id); | |
88 | ||
760573c1 ME |
89 | static int hash__init_new_context(struct mm_struct *mm) |
90 | { | |
91 | int index; | |
92 | ||
93 | index = hash__alloc_context_id(); | |
94 | if (index < 0) | |
95 | return index; | |
96 | ||
957b778a AK |
97 | /* |
98 | * We do switch_slb() early in fork, even before we setup the | |
99 | * mm->context.addr_limit. Default to max task size so that we copy the | |
100 | * default values to paca which will help us to handle slb miss early. | |
101 | */ | |
92d9dfda | 102 | mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64; |
957b778a | 103 | |
760573c1 ME |
104 | /* |
105 | * The old code would re-promote on fork, we don't do that when using | |
106 | * slices as it could cause problem promoting slices that have been | |
107 | * forced down to 4K. | |
108 | * | |
109 | * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check | |
110 | * explicitly against context.id == 0. This ensures that we properly | |
111 | * initialize context slice details for newly allocated mm's (which will | |
112 | * have id == 0) and don't alter context slice inherited via fork (which | |
113 | * will have id != 0). | |
114 | * | |
115 | * We should not be calling init_new_context() on init_mm. Hence a | |
116 | * check against 0 is OK. | |
117 | */ | |
118 | if (mm->context.id == 0) | |
119 | slice_set_user_psize(mm, mmu_virtual_psize); | |
120 | ||
121 | subpage_prot_init_new_context(mm); | |
122 | ||
123 | return index; | |
124 | } | |
125 | ||
126 | static int radix__init_new_context(struct mm_struct *mm) | |
7e381c0f AK |
127 | { |
128 | unsigned long rts_field; | |
760573c1 ME |
129 | int index; |
130 | ||
131 | index = alloc_context_id(1, PRTB_ENTRIES - 1); | |
132 | if (index < 0) | |
133 | return index; | |
7e381c0f AK |
134 | |
135 | /* | |
136 | * set the process table entry, | |
137 | */ | |
b23d9c5b | 138 | rts_field = radix__get_tree_size(); |
7e381c0f | 139 | process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); |
760573c1 | 140 | |
1ab66d1f AP |
141 | mm->context.npu_context = NULL; |
142 | ||
760573c1 | 143 | return index; |
7e381c0f | 144 | } |
e85a4710 AG |
145 | |
146 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
147 | { | |
148 | int index; | |
149 | ||
760573c1 ME |
150 | if (radix_enabled()) |
151 | index = radix__init_new_context(mm); | |
152 | else | |
153 | index = hash__init_new_context(mm); | |
154 | ||
e85a4710 AG |
155 | if (index < 0) |
156 | return index; | |
157 | ||
9dfe5c53 | 158 | mm->context.id = index; |
851d2e2f THFL |
159 | #ifdef CONFIG_PPC_ICSWX |
160 | mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); | |
161 | if (!mm->context.cop_lockp) { | |
162 | __destroy_context(index); | |
163 | subpage_prot_free(mm); | |
79af2187 | 164 | mm->context.id = MMU_NO_CONTEXT; |
851d2e2f THFL |
165 | return -ENOMEM; |
166 | } | |
167 | spin_lock_init(mm->context.cop_lockp); | |
168 | #endif /* CONFIG_PPC_ICSWX */ | |
14cf11af | 169 | |
5c1f6ee9 AK |
170 | #ifdef CONFIG_PPC_64K_PAGES |
171 | mm->context.pte_frag = NULL; | |
15b244a8 AK |
172 | #endif |
173 | #ifdef CONFIG_SPAPR_TCE_IOMMU | |
88f54a35 | 174 | mm_iommu_init(mm); |
5c1f6ee9 | 175 | #endif |
14cf11af PM |
176 | return 0; |
177 | } | |
178 | ||
e85a4710 | 179 | void __destroy_context(int context_id) |
14cf11af PM |
180 | { |
181 | spin_lock(&mmu_context_lock); | |
7317ac87 | 182 | ida_remove(&mmu_context_ida, context_id); |
14cf11af | 183 | spin_unlock(&mmu_context_lock); |
e85a4710 AG |
184 | } |
185 | EXPORT_SYMBOL_GPL(__destroy_context); | |
14cf11af | 186 | |
5c1f6ee9 AK |
187 | #ifdef CONFIG_PPC_64K_PAGES |
188 | static void destroy_pagetable_page(struct mm_struct *mm) | |
189 | { | |
190 | int count; | |
191 | void *pte_frag; | |
192 | struct page *page; | |
193 | ||
194 | pte_frag = mm->context.pte_frag; | |
195 | if (!pte_frag) | |
196 | return; | |
197 | ||
198 | page = virt_to_page(pte_frag); | |
199 | /* drop all the pending references */ | |
200 | count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; | |
201 | /* We allow PTE_FRAG_NR fragments from a PTE page */ | |
fe896d18 | 202 | if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) { |
5c1f6ee9 AK |
203 | pgtable_page_dtor(page); |
204 | free_hot_cold_page(page, 0); | |
205 | } | |
206 | } | |
207 | ||
208 | #else | |
209 | static inline void destroy_pagetable_page(struct mm_struct *mm) | |
210 | { | |
211 | return; | |
212 | } | |
213 | #endif | |
214 | ||
e85a4710 AG |
215 | void destroy_context(struct mm_struct *mm) |
216 | { | |
15b244a8 | 217 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
4b6fad70 | 218 | WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list)); |
15b244a8 | 219 | #endif |
851d2e2f THFL |
220 | #ifdef CONFIG_PPC_ICSWX |
221 | drop_cop(mm->context.acop, mm); | |
222 | kfree(mm->context.cop_lockp); | |
223 | mm->context.cop_lockp = NULL; | |
224 | #endif /* CONFIG_PPC_ICSWX */ | |
5c1f6ee9 | 225 | |
c6bb0b8d BH |
226 | if (radix_enabled()) { |
227 | /* | |
228 | * Radix doesn't have a valid bit in the process table | |
229 | * entries. However we know that at least P9 implementation | |
230 | * will avoid caching an entry with an invalid RTS field, | |
231 | * and 0 is invalid. So this will do. | |
232 | */ | |
233 | process_tb[mm->context.id].prtb0 = 0; | |
234 | } else | |
7e381c0f | 235 | subpage_prot_free(mm); |
5c1f6ee9 | 236 | destroy_pagetable_page(mm); |
e85a4710 | 237 | __destroy_context(mm->context.id); |
5e8e7b40 | 238 | mm->context.id = MMU_NO_CONTEXT; |
14cf11af | 239 | } |
7e381c0f AK |
240 | |
241 | #ifdef CONFIG_PPC_RADIX_MMU | |
242 | void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) | |
243 | { | |
74e27c6a BH |
244 | |
245 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { | |
246 | isync(); | |
247 | mtspr(SPRN_PID, next->context.id); | |
248 | isync(); | |
249 | asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); | |
250 | } else { | |
251 | mtspr(SPRN_PID, next->context.id); | |
252 | isync(); | |
253 | } | |
7e381c0f AK |
254 | } |
255 | #endif |