Commit | Line | Data |
---|---|---|
3610cce8 | 1 | /* |
a53c8fab | 2 | * Copyright IBM Corp. 2007, 2011 |
3610cce8 MS |
3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
4 | */ | |
5 | ||
6 | #include <linux/sched.h> | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/errno.h> | |
5a0e3ad6 | 9 | #include <linux/gfp.h> |
3610cce8 MS |
10 | #include <linux/mm.h> |
11 | #include <linux/swap.h> | |
12 | #include <linux/smp.h> | |
13 | #include <linux/highmem.h> | |
3610cce8 MS |
14 | #include <linux/pagemap.h> |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/quicklist.h> | |
80217147 | 18 | #include <linux/rcupdate.h> |
e5992f2e | 19 | #include <linux/slab.h> |
b31288fa | 20 | #include <linux/swapops.h> |
0b46e0a3 | 21 | #include <linux/sysctl.h> |
3ac8e380 DD |
22 | #include <linux/ksm.h> |
23 | #include <linux/mman.h> | |
3610cce8 | 24 | |
3610cce8 MS |
25 | #include <asm/pgtable.h> |
26 | #include <asm/pgalloc.h> | |
27 | #include <asm/tlb.h> | |
28 | #include <asm/tlbflush.h> | |
6252d702 | 29 | #include <asm/mmu_context.h> |
3610cce8 | 30 | |
3610cce8 | 31 | #define ALLOC_ORDER 2 |
36409f63 | 32 | #define FRAG_MASK 0x03 |
239a6425 | 33 | |
ad4f99e8 | 34 | unsigned int HPAGE_SHIFT; |
bea41197 | 35 | |
043d0708 | 36 | unsigned long *crst_table_alloc(struct mm_struct *mm) |
3610cce8 MS |
37 | { |
38 | struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | |
39 | ||
40 | if (!page) | |
41 | return NULL; | |
3610cce8 MS |
42 | return (unsigned long *) page_to_phys(page); |
43 | } | |
44 | ||
80217147 MS |
45 | void crst_table_free(struct mm_struct *mm, unsigned long *table) |
46 | { | |
043d0708 | 47 | free_pages((unsigned long) table, ALLOC_ORDER); |
80217147 MS |
48 | } |
49 | ||
10607864 MS |
50 | static void __crst_table_upgrade(void *arg) |
51 | { | |
52 | struct mm_struct *mm = arg; | |
53 | ||
beef560b MS |
54 | if (current->active_mm == mm) { |
55 | clear_user_asce(); | |
56 | set_user_asce(mm); | |
57 | } | |
10607864 MS |
58 | __tlb_flush_local(); |
59 | } | |
60 | ||
6252d702 MS |
61 | int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) |
62 | { | |
63 | unsigned long *table, *pgd; | |
64 | unsigned long entry; | |
10607864 | 65 | int flush; |
6252d702 MS |
66 | |
67 | BUG_ON(limit > (1UL << 53)); | |
10607864 | 68 | flush = 0; |
6252d702 | 69 | repeat: |
043d0708 | 70 | table = crst_table_alloc(mm); |
6252d702 MS |
71 | if (!table) |
72 | return -ENOMEM; | |
80217147 | 73 | spin_lock_bh(&mm->page_table_lock); |
6252d702 MS |
74 | if (mm->context.asce_limit < limit) { |
75 | pgd = (unsigned long *) mm->pgd; | |
76 | if (mm->context.asce_limit <= (1UL << 31)) { | |
77 | entry = _REGION3_ENTRY_EMPTY; | |
78 | mm->context.asce_limit = 1UL << 42; | |
79 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
80 | _ASCE_USER_BITS | | |
81 | _ASCE_TYPE_REGION3; | |
82 | } else { | |
83 | entry = _REGION2_ENTRY_EMPTY; | |
84 | mm->context.asce_limit = 1UL << 53; | |
85 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
86 | _ASCE_USER_BITS | | |
87 | _ASCE_TYPE_REGION2; | |
88 | } | |
89 | crst_table_init(table, entry); | |
90 | pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); | |
91 | mm->pgd = (pgd_t *) table; | |
f481bfaf | 92 | mm->task_size = mm->context.asce_limit; |
6252d702 | 93 | table = NULL; |
10607864 | 94 | flush = 1; |
6252d702 | 95 | } |
80217147 | 96 | spin_unlock_bh(&mm->page_table_lock); |
6252d702 MS |
97 | if (table) |
98 | crst_table_free(mm, table); | |
99 | if (mm->context.asce_limit < limit) | |
100 | goto repeat; | |
10607864 MS |
101 | if (flush) |
102 | on_each_cpu(__crst_table_upgrade, mm, 0); | |
6252d702 MS |
103 | return 0; |
104 | } | |
105 | ||
106 | void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | |
107 | { | |
108 | pgd_t *pgd; | |
109 | ||
02a8f3ab | 110 | if (current->active_mm == mm) { |
beef560b | 111 | clear_user_asce(); |
10607864 | 112 | __tlb_flush_mm(mm); |
02a8f3ab | 113 | } |
6252d702 MS |
114 | while (mm->context.asce_limit > limit) { |
115 | pgd = mm->pgd; | |
116 | switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { | |
117 | case _REGION_ENTRY_TYPE_R2: | |
118 | mm->context.asce_limit = 1UL << 42; | |
119 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
120 | _ASCE_USER_BITS | | |
121 | _ASCE_TYPE_REGION3; | |
122 | break; | |
123 | case _REGION_ENTRY_TYPE_R3: | |
124 | mm->context.asce_limit = 1UL << 31; | |
125 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
126 | _ASCE_USER_BITS | | |
127 | _ASCE_TYPE_SEGMENT; | |
128 | break; | |
129 | default: | |
130 | BUG(); | |
131 | } | |
132 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); | |
f481bfaf | 133 | mm->task_size = mm->context.asce_limit; |
6252d702 MS |
134 | crst_table_free(mm, (unsigned long *) pgd); |
135 | } | |
10607864 | 136 | if (current->active_mm == mm) |
beef560b | 137 | set_user_asce(mm); |
6252d702 | 138 | } |
6252d702 | 139 | |
e5992f2e MS |
140 | #ifdef CONFIG_PGSTE |
141 | ||
142 | /** | |
143 | * gmap_alloc - allocate a guest address space | |
144 | * @mm: pointer to the parent mm_struct | |
c6c956b8 | 145 | * @limit: maximum size of the gmap address space |
e5992f2e MS |
146 | * |
147 | * Returns a guest address space structure. | |
148 | */ | |
c6c956b8 | 149 | struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit) |
36409f63 | 150 | { |
e5992f2e MS |
151 | struct gmap *gmap; |
152 | struct page *page; | |
153 | unsigned long *table; | |
c6c956b8 MS |
154 | unsigned long etype, atype; |
155 | ||
156 | if (limit < (1UL << 31)) { | |
157 | limit = (1UL << 31) - 1; | |
158 | atype = _ASCE_TYPE_SEGMENT; | |
159 | etype = _SEGMENT_ENTRY_EMPTY; | |
160 | } else if (limit < (1UL << 42)) { | |
161 | limit = (1UL << 42) - 1; | |
162 | atype = _ASCE_TYPE_REGION3; | |
163 | etype = _REGION3_ENTRY_EMPTY; | |
164 | } else if (limit < (1UL << 53)) { | |
165 | limit = (1UL << 53) - 1; | |
166 | atype = _ASCE_TYPE_REGION2; | |
167 | etype = _REGION2_ENTRY_EMPTY; | |
168 | } else { | |
169 | limit = -1UL; | |
170 | atype = _ASCE_TYPE_REGION1; | |
171 | etype = _REGION1_ENTRY_EMPTY; | |
172 | } | |
e5992f2e MS |
173 | gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); |
174 | if (!gmap) | |
175 | goto out; | |
176 | INIT_LIST_HEAD(&gmap->crst_list); | |
527e30b4 MS |
177 | INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL); |
178 | INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC); | |
179 | spin_lock_init(&gmap->guest_table_lock); | |
e5992f2e MS |
180 | gmap->mm = mm; |
181 | page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | |
182 | if (!page) | |
183 | goto out_free; | |
527e30b4 | 184 | page->index = 0; |
e5992f2e MS |
185 | list_add(&page->lru, &gmap->crst_list); |
186 | table = (unsigned long *) page_to_phys(page); | |
c6c956b8 | 187 | crst_table_init(table, etype); |
e5992f2e | 188 | gmap->table = table; |
c6c956b8 MS |
189 | gmap->asce = atype | _ASCE_TABLE_LENGTH | |
190 | _ASCE_USER_BITS | __pa(table); | |
191 | gmap->asce_end = limit; | |
527e30b4 | 192 | down_write(&mm->mmap_sem); |
e5992f2e | 193 | list_add(&gmap->list, &mm->context.gmap_list); |
527e30b4 | 194 | up_write(&mm->mmap_sem); |
e5992f2e MS |
195 | return gmap; |
196 | ||
197 | out_free: | |
198 | kfree(gmap); | |
199 | out: | |
200 | return NULL; | |
36409f63 | 201 | } |
e5992f2e | 202 | EXPORT_SYMBOL_GPL(gmap_alloc); |
36409f63 | 203 | |
e5992f2e MS |
204 | static void gmap_flush_tlb(struct gmap *gmap) |
205 | { | |
206 | if (MACHINE_HAS_IDTE) | |
c6c956b8 | 207 | __tlb_flush_asce(gmap->mm, gmap->asce); |
e5992f2e MS |
208 | else |
209 | __tlb_flush_global(); | |
210 | } | |
211 | ||
527e30b4 MS |
212 | static void gmap_radix_tree_free(struct radix_tree_root *root) |
213 | { | |
214 | struct radix_tree_iter iter; | |
215 | unsigned long indices[16]; | |
216 | unsigned long index; | |
217 | void **slot; | |
218 | int i, nr; | |
219 | ||
220 | /* A radix tree is freed by deleting all of its entries */ | |
221 | index = 0; | |
222 | do { | |
223 | nr = 0; | |
224 | radix_tree_for_each_slot(slot, root, &iter, index) { | |
225 | indices[nr] = iter.index; | |
226 | if (++nr == 16) | |
227 | break; | |
228 | } | |
229 | for (i = 0; i < nr; i++) { | |
230 | index = indices[i]; | |
231 | radix_tree_delete(root, index); | |
232 | } | |
233 | } while (nr > 0); | |
234 | } | |
235 | ||
e5992f2e MS |
236 | /** |
237 | * gmap_free - free a guest address space | |
238 | * @gmap: pointer to the guest address space structure | |
3610cce8 | 239 | */ |
e5992f2e MS |
240 | void gmap_free(struct gmap *gmap) |
241 | { | |
242 | struct page *page, *next; | |
e5992f2e MS |
243 | |
244 | /* Flush tlb. */ | |
245 | if (MACHINE_HAS_IDTE) | |
c6c956b8 | 246 | __tlb_flush_asce(gmap->mm, gmap->asce); |
e5992f2e MS |
247 | else |
248 | __tlb_flush_global(); | |
249 | ||
250 | /* Free all segment & region tables. */ | |
527e30b4 | 251 | list_for_each_entry_safe(page, next, &gmap->crst_list, lru) |
e5992f2e | 252 | __free_pages(page, ALLOC_ORDER); |
527e30b4 MS |
253 | gmap_radix_tree_free(&gmap->guest_to_host); |
254 | gmap_radix_tree_free(&gmap->host_to_guest); | |
255 | down_write(&gmap->mm->mmap_sem); | |
e5992f2e | 256 | list_del(&gmap->list); |
527e30b4 | 257 | up_write(&gmap->mm->mmap_sem); |
e5992f2e MS |
258 | kfree(gmap); |
259 | } | |
260 | EXPORT_SYMBOL_GPL(gmap_free); | |
261 | ||
262 | /** | |
263 | * gmap_enable - switch primary space to the guest address space | |
264 | * @gmap: pointer to the guest address space structure | |
265 | */ | |
266 | void gmap_enable(struct gmap *gmap) | |
267 | { | |
e5992f2e MS |
268 | S390_lowcore.gmap = (unsigned long) gmap; |
269 | } | |
270 | EXPORT_SYMBOL_GPL(gmap_enable); | |
271 | ||
272 | /** | |
273 | * gmap_disable - switch back to the standard primary address space | |
274 | * @gmap: pointer to the guest address space structure | |
275 | */ | |
276 | void gmap_disable(struct gmap *gmap) | |
277 | { | |
e5992f2e MS |
278 | S390_lowcore.gmap = 0UL; |
279 | } | |
280 | EXPORT_SYMBOL_GPL(gmap_disable); | |
281 | ||
a9162f23 CO |
282 | /* |
283 | * gmap_alloc_table is assumed to be called with mmap_sem held | |
284 | */ | |
527e30b4 MS |
285 | static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, |
286 | unsigned long init, unsigned long gaddr) | |
e5992f2e MS |
287 | { |
288 | struct page *page; | |
289 | unsigned long *new; | |
290 | ||
c86cce2a | 291 | /* since we dont free the gmap table until gmap_free we can unlock */ |
e5992f2e MS |
292 | page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); |
293 | if (!page) | |
294 | return -ENOMEM; | |
295 | new = (unsigned long *) page_to_phys(page); | |
296 | crst_table_init(new, init); | |
527e30b4 | 297 | spin_lock(&gmap->mm->page_table_lock); |
e5098611 | 298 | if (*table & _REGION_ENTRY_INVALID) { |
e5992f2e MS |
299 | list_add(&page->lru, &gmap->crst_list); |
300 | *table = (unsigned long) new | _REGION_ENTRY_LENGTH | | |
301 | (*table & _REGION_ENTRY_TYPE_MASK); | |
527e30b4 MS |
302 | page->index = gaddr; |
303 | page = NULL; | |
304 | } | |
305 | spin_unlock(&gmap->mm->page_table_lock); | |
306 | if (page) | |
e5992f2e | 307 | __free_pages(page, ALLOC_ORDER); |
e5992f2e MS |
308 | return 0; |
309 | } | |
310 | ||
527e30b4 MS |
311 | /** |
312 | * __gmap_segment_gaddr - find virtual address from segment pointer | |
313 | * @entry: pointer to a segment table entry in the guest address space | |
314 | * | |
315 | * Returns the virtual address in the guest address space for the segment | |
316 | */ | |
317 | static unsigned long __gmap_segment_gaddr(unsigned long *entry) | |
318 | { | |
319 | struct page *page; | |
fbc89c95 | 320 | unsigned long offset, mask; |
527e30b4 MS |
321 | |
322 | offset = (unsigned long) entry / sizeof(unsigned long); | |
323 | offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE; | |
fbc89c95 MS |
324 | mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); |
325 | page = virt_to_page((void *)((unsigned long) entry & mask)); | |
527e30b4 MS |
326 | return page->index + offset; |
327 | } | |
328 | ||
329 | /** | |
330 | * __gmap_unlink_by_vmaddr - unlink a single segment via a host address | |
331 | * @gmap: pointer to the guest address space structure | |
332 | * @vmaddr: address in the host process address space | |
333 | * | |
334 | * Returns 1 if a TLB flush is required | |
335 | */ | |
336 | static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr) | |
337 | { | |
338 | unsigned long *entry; | |
339 | int flush = 0; | |
340 | ||
341 | spin_lock(&gmap->guest_table_lock); | |
342 | entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT); | |
343 | if (entry) { | |
344 | flush = (*entry != _SEGMENT_ENTRY_INVALID); | |
345 | *entry = _SEGMENT_ENTRY_INVALID; | |
346 | } | |
347 | spin_unlock(&gmap->guest_table_lock); | |
348 | return flush; | |
349 | } | |
350 | ||
351 | /** | |
352 | * __gmap_unmap_by_gaddr - unmap a single segment via a guest address | |
353 | * @gmap: pointer to the guest address space structure | |
354 | * @gaddr: address in the guest address space | |
355 | * | |
356 | * Returns 1 if a TLB flush is required | |
357 | */ | |
358 | static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr) | |
359 | { | |
360 | unsigned long vmaddr; | |
361 | ||
362 | vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host, | |
363 | gaddr >> PMD_SHIFT); | |
364 | return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0; | |
365 | } | |
366 | ||
e5992f2e MS |
367 | /** |
368 | * gmap_unmap_segment - unmap segment from the guest address space | |
369 | * @gmap: pointer to the guest address space structure | |
6e0a0431 | 370 | * @to: address in the guest address space |
e5992f2e MS |
371 | * @len: length of the memory area to unmap |
372 | * | |
b4a96015 | 373 | * Returns 0 if the unmap succeeded, -EINVAL if not. |
e5992f2e MS |
374 | */ |
375 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) | |
376 | { | |
e5992f2e MS |
377 | unsigned long off; |
378 | int flush; | |
379 | ||
380 | if ((to | len) & (PMD_SIZE - 1)) | |
381 | return -EINVAL; | |
382 | if (len == 0 || to + len < to) | |
383 | return -EINVAL; | |
384 | ||
385 | flush = 0; | |
527e30b4 MS |
386 | down_write(&gmap->mm->mmap_sem); |
387 | for (off = 0; off < len; off += PMD_SIZE) | |
388 | flush |= __gmap_unmap_by_gaddr(gmap, to + off); | |
389 | up_write(&gmap->mm->mmap_sem); | |
e5992f2e MS |
390 | if (flush) |
391 | gmap_flush_tlb(gmap); | |
392 | return 0; | |
393 | } | |
394 | EXPORT_SYMBOL_GPL(gmap_unmap_segment); | |
395 | ||
396 | /** | |
397 | * gmap_mmap_segment - map a segment to the guest address space | |
398 | * @gmap: pointer to the guest address space structure | |
399 | * @from: source address in the parent address space | |
400 | * @to: target address in the guest address space | |
6e0a0431 | 401 | * @len: length of the memory area to map |
e5992f2e | 402 | * |
b4a96015 | 403 | * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not. |
e5992f2e MS |
404 | */ |
405 | int gmap_map_segment(struct gmap *gmap, unsigned long from, | |
406 | unsigned long to, unsigned long len) | |
407 | { | |
e5992f2e MS |
408 | unsigned long off; |
409 | int flush; | |
410 | ||
411 | if ((from | to | len) & (PMD_SIZE - 1)) | |
412 | return -EINVAL; | |
c6c956b8 MS |
413 | if (len == 0 || from + len < from || to + len < to || |
414 | from + len > TASK_MAX_SIZE || to + len > gmap->asce_end) | |
e5992f2e MS |
415 | return -EINVAL; |
416 | ||
417 | flush = 0; | |
527e30b4 | 418 | down_write(&gmap->mm->mmap_sem); |
e5992f2e | 419 | for (off = 0; off < len; off += PMD_SIZE) { |
527e30b4 MS |
420 | /* Remove old translation */ |
421 | flush |= __gmap_unmap_by_gaddr(gmap, to + off); | |
422 | /* Store new translation */ | |
423 | if (radix_tree_insert(&gmap->guest_to_host, | |
424 | (to + off) >> PMD_SHIFT, | |
425 | (void *) from + off)) | |
426 | break; | |
e5992f2e | 427 | } |
527e30b4 | 428 | up_write(&gmap->mm->mmap_sem); |
e5992f2e MS |
429 | if (flush) |
430 | gmap_flush_tlb(gmap); | |
527e30b4 MS |
431 | if (off >= len) |
432 | return 0; | |
e5992f2e MS |
433 | gmap_unmap_segment(gmap, to, len); |
434 | return -ENOMEM; | |
435 | } | |
436 | EXPORT_SYMBOL_GPL(gmap_map_segment); | |
437 | ||
c5034945 HC |
438 | /** |
439 | * __gmap_translate - translate a guest address to a user space address | |
c5034945 | 440 | * @gmap: pointer to guest mapping meta data structure |
6e0a0431 | 441 | * @gaddr: guest address |
c5034945 HC |
442 | * |
443 | * Returns user space address which corresponds to the guest address or | |
444 | * -EFAULT if no such mapping exists. | |
445 | * This function does not establish potentially missing page table entries. | |
446 | * The mmap_sem of the mm that belongs to the address space must be held | |
447 | * when this function gets called. | |
448 | */ | |
6e0a0431 | 449 | unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr) |
c5034945 | 450 | { |
527e30b4 | 451 | unsigned long vmaddr; |
c5034945 | 452 | |
527e30b4 MS |
453 | vmaddr = (unsigned long) |
454 | radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT); | |
455 | return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT; | |
c5034945 HC |
456 | } |
457 | EXPORT_SYMBOL_GPL(__gmap_translate); | |
458 | ||
459 | /** | |
460 | * gmap_translate - translate a guest address to a user space address | |
c5034945 | 461 | * @gmap: pointer to guest mapping meta data structure |
6e0a0431 | 462 | * @gaddr: guest address |
c5034945 HC |
463 | * |
464 | * Returns user space address which corresponds to the guest address or | |
465 | * -EFAULT if no such mapping exists. | |
466 | * This function does not establish potentially missing page table entries. | |
467 | */ | |
6e0a0431 | 468 | unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr) |
c5034945 HC |
469 | { |
470 | unsigned long rc; | |
471 | ||
472 | down_read(&gmap->mm->mmap_sem); | |
6e0a0431 | 473 | rc = __gmap_translate(gmap, gaddr); |
c5034945 HC |
474 | up_read(&gmap->mm->mmap_sem); |
475 | return rc; | |
476 | } | |
477 | EXPORT_SYMBOL_GPL(gmap_translate); | |
478 | ||
527e30b4 MS |
479 | /** |
480 | * gmap_unlink - disconnect a page table from the gmap shadow tables | |
481 | * @gmap: pointer to guest mapping meta data structure | |
482 | * @table: pointer to the host page table | |
483 | * @vmaddr: vm address associated with the host page table | |
484 | */ | |
485 | static void gmap_unlink(struct mm_struct *mm, unsigned long *table, | |
486 | unsigned long vmaddr) | |
487 | { | |
488 | struct gmap *gmap; | |
489 | int flush; | |
490 | ||
491 | list_for_each_entry(gmap, &mm->context.gmap_list, list) { | |
492 | flush = __gmap_unlink_by_vmaddr(gmap, vmaddr); | |
493 | if (flush) | |
494 | gmap_flush_tlb(gmap); | |
495 | } | |
496 | } | |
497 | ||
498 | /** | |
499 | * gmap_link - set up shadow page tables to connect a host to a guest address | |
500 | * @gmap: pointer to guest mapping meta data structure | |
501 | * @gaddr: guest address | |
502 | * @vmaddr: vm address | |
503 | * | |
504 | * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT | |
505 | * if the vm address is already mapped to a different guest segment. | |
506 | * The mmap_sem of the mm that belongs to the address space must be held | |
507 | * when this function gets called. | |
508 | */ | |
509 | int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) | |
e5992f2e | 510 | { |
c5034945 | 511 | struct mm_struct *mm; |
527e30b4 MS |
512 | unsigned long *table; |
513 | spinlock_t *ptl; | |
e5992f2e MS |
514 | pgd_t *pgd; |
515 | pud_t *pud; | |
516 | pmd_t *pmd; | |
527e30b4 | 517 | int rc; |
e5992f2e | 518 | |
527e30b4 | 519 | /* Create higher level tables in the gmap page table */ |
c6c956b8 MS |
520 | table = gmap->table; |
521 | if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) { | |
522 | table += (gaddr >> 53) & 0x7ff; | |
523 | if ((*table & _REGION_ENTRY_INVALID) && | |
524 | gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY, | |
925dfc02 | 525 | gaddr & 0xffe0000000000000UL)) |
c6c956b8 MS |
526 | return -ENOMEM; |
527 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
528 | } | |
529 | if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) { | |
530 | table += (gaddr >> 42) & 0x7ff; | |
531 | if ((*table & _REGION_ENTRY_INVALID) && | |
532 | gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY, | |
925dfc02 | 533 | gaddr & 0xfffffc0000000000UL)) |
c6c956b8 MS |
534 | return -ENOMEM; |
535 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
536 | } | |
537 | if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) { | |
538 | table += (gaddr >> 31) & 0x7ff; | |
539 | if ((*table & _REGION_ENTRY_INVALID) && | |
540 | gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY, | |
925dfc02 | 541 | gaddr & 0xffffffff80000000UL)) |
c6c956b8 MS |
542 | return -ENOMEM; |
543 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
544 | } | |
545 | table += (gaddr >> 20) & 0x7ff; | |
527e30b4 MS |
546 | /* Walk the parent mm page table */ |
547 | mm = gmap->mm; | |
548 | pgd = pgd_offset(mm, vmaddr); | |
549 | VM_BUG_ON(pgd_none(*pgd)); | |
550 | pud = pud_offset(pgd, vmaddr); | |
551 | VM_BUG_ON(pud_none(*pud)); | |
552 | pmd = pmd_offset(pud, vmaddr); | |
553 | VM_BUG_ON(pmd_none(*pmd)); | |
1e1836e8 AT |
554 | /* large pmds cannot yet be handled */ |
555 | if (pmd_large(*pmd)) | |
556 | return -EFAULT; | |
ab8e5235 | 557 | /* Link gmap segment table entry location to page table. */ |
527e30b4 MS |
558 | rc = radix_tree_preload(GFP_KERNEL); |
559 | if (rc) | |
560 | return rc; | |
561 | ptl = pmd_lock(mm, pmd); | |
562 | spin_lock(&gmap->guest_table_lock); | |
563 | if (*table == _SEGMENT_ENTRY_INVALID) { | |
564 | rc = radix_tree_insert(&gmap->host_to_guest, | |
565 | vmaddr >> PMD_SHIFT, table); | |
566 | if (!rc) | |
567 | *table = pmd_val(*pmd); | |
568 | } else | |
569 | rc = 0; | |
570 | spin_unlock(&gmap->guest_table_lock); | |
571 | spin_unlock(ptl); | |
572 | radix_tree_preload_end(); | |
573 | return rc; | |
ab8e5235 MS |
574 | } |
575 | ||
527e30b4 MS |
576 | /** |
577 | * gmap_fault - resolve a fault on a guest address | |
578 | * @gmap: pointer to guest mapping meta data structure | |
579 | * @gaddr: guest address | |
580 | * @fault_flags: flags to pass down to handle_mm_fault() | |
581 | * | |
582 | * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT | |
583 | * if the vm address is already mapped to a different guest segment. | |
ab8e5235 | 584 | */ |
527e30b4 MS |
585 | int gmap_fault(struct gmap *gmap, unsigned long gaddr, |
586 | unsigned int fault_flags) | |
ab8e5235 | 587 | { |
527e30b4 | 588 | unsigned long vmaddr; |
ab8e5235 MS |
589 | int rc; |
590 | ||
499069e1 | 591 | down_read(&gmap->mm->mmap_sem); |
527e30b4 MS |
592 | vmaddr = __gmap_translate(gmap, gaddr); |
593 | if (IS_ERR_VALUE(vmaddr)) { | |
594 | rc = vmaddr; | |
595 | goto out_up; | |
596 | } | |
597 | if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags)) { | |
598 | rc = -EFAULT; | |
599 | goto out_up; | |
600 | } | |
601 | rc = __gmap_link(gmap, gaddr, vmaddr); | |
602 | out_up: | |
499069e1 | 603 | up_read(&gmap->mm->mmap_sem); |
499069e1 | 604 | return rc; |
e5992f2e MS |
605 | } |
606 | EXPORT_SYMBOL_GPL(gmap_fault); | |
607 | ||
b31288fa KW |
608 | static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm) |
609 | { | |
610 | if (!non_swap_entry(entry)) | |
611 | dec_mm_counter(mm, MM_SWAPENTS); | |
612 | else if (is_migration_entry(entry)) { | |
613 | struct page *page = migration_entry_to_page(entry); | |
614 | ||
615 | if (PageAnon(page)) | |
616 | dec_mm_counter(mm, MM_ANONPAGES); | |
617 | else | |
618 | dec_mm_counter(mm, MM_FILEPAGES); | |
619 | } | |
620 | free_swap_and_cache(entry); | |
621 | } | |
622 | ||
527e30b4 MS |
623 | /* |
624 | * this function is assumed to be called with mmap_sem held | |
b31288fa | 625 | */ |
527e30b4 | 626 | void __gmap_zap(struct gmap *gmap, unsigned long gaddr) |
b31288fa | 627 | { |
527e30b4 MS |
628 | unsigned long vmaddr, ptev, pgstev; |
629 | pte_t *ptep, pte; | |
b31288fa KW |
630 | spinlock_t *ptl; |
631 | pgste_t pgste; | |
b31288fa | 632 | |
527e30b4 MS |
633 | /* Find the vm address for the guest address */ |
634 | vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host, | |
635 | gaddr >> PMD_SHIFT); | |
636 | if (!vmaddr) | |
637 | return; | |
638 | vmaddr |= gaddr & ~PMD_MASK; | |
639 | /* Get pointer to the page table entry */ | |
640 | ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); | |
b31288fa KW |
641 | if (unlikely(!ptep)) |
642 | return; | |
643 | pte = *ptep; | |
644 | if (!pte_swap(pte)) | |
645 | goto out_pte; | |
646 | /* Zap unused and logically-zero pages */ | |
647 | pgste = pgste_get_lock(ptep); | |
648 | pgstev = pgste_val(pgste); | |
649 | ptev = pte_val(pte); | |
650 | if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) || | |
651 | ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) { | |
527e30b4 MS |
652 | gmap_zap_swap_entry(pte_to_swp_entry(pte), gmap->mm); |
653 | pte_clear(gmap->mm, vmaddr, ptep); | |
b31288fa KW |
654 | } |
655 | pgste_set_unlock(ptep, pgste); | |
656 | out_pte: | |
66e9bbdb | 657 | pte_unmap_unlock(ptep, ptl); |
b31288fa | 658 | } |
b31288fa KW |
659 | EXPORT_SYMBOL_GPL(__gmap_zap); |
660 | ||
6e0a0431 | 661 | void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) |
388186bc | 662 | { |
527e30b4 | 663 | unsigned long gaddr, vmaddr, size; |
388186bc | 664 | struct vm_area_struct *vma; |
388186bc CB |
665 | |
666 | down_read(&gmap->mm->mmap_sem); | |
527e30b4 MS |
667 | for (gaddr = from; gaddr < to; |
668 | gaddr = (gaddr + PMD_SIZE) & PMD_MASK) { | |
669 | /* Find the vm address for the guest address */ | |
670 | vmaddr = (unsigned long) | |
671 | radix_tree_lookup(&gmap->guest_to_host, | |
672 | gaddr >> PMD_SHIFT); | |
673 | if (!vmaddr) | |
388186bc | 674 | continue; |
527e30b4 MS |
675 | vmaddr |= gaddr & ~PMD_MASK; |
676 | /* Find vma in the parent mm */ | |
677 | vma = find_vma(gmap->mm, vmaddr); | |
6e0a0431 | 678 | size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); |
527e30b4 | 679 | zap_page_range(vma, vmaddr, size, NULL); |
388186bc CB |
680 | } |
681 | up_read(&gmap->mm->mmap_sem); | |
682 | } | |
683 | EXPORT_SYMBOL_GPL(gmap_discard); | |
684 | ||
d3383632 MS |
685 | static LIST_HEAD(gmap_notifier_list); |
686 | static DEFINE_SPINLOCK(gmap_notifier_lock); | |
687 | ||
688 | /** | |
689 | * gmap_register_ipte_notifier - register a pte invalidation callback | |
690 | * @nb: pointer to the gmap notifier block | |
691 | */ | |
692 | void gmap_register_ipte_notifier(struct gmap_notifier *nb) | |
693 | { | |
694 | spin_lock(&gmap_notifier_lock); | |
695 | list_add(&nb->list, &gmap_notifier_list); | |
696 | spin_unlock(&gmap_notifier_lock); | |
697 | } | |
698 | EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier); | |
699 | ||
700 | /** | |
701 | * gmap_unregister_ipte_notifier - remove a pte invalidation callback | |
702 | * @nb: pointer to the gmap notifier block | |
703 | */ | |
704 | void gmap_unregister_ipte_notifier(struct gmap_notifier *nb) | |
705 | { | |
706 | spin_lock(&gmap_notifier_lock); | |
707 | list_del_init(&nb->list); | |
708 | spin_unlock(&gmap_notifier_lock); | |
709 | } | |
710 | EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier); | |
711 | ||
712 | /** | |
713 | * gmap_ipte_notify - mark a range of ptes for invalidation notification | |
714 | * @gmap: pointer to guest mapping meta data structure | |
6e0a0431 | 715 | * @gaddr: virtual address in the guest address space |
d3383632 MS |
716 | * @len: size of area |
717 | * | |
718 | * Returns 0 if for each page in the given range a gmap mapping exists and | |
719 | * the invalidation notification could be set. If the gmap mapping is missing | |
720 | * for one or more pages -EFAULT is returned. If no memory could be allocated | |
721 | * -ENOMEM is returned. This function establishes missing page table entries. | |
722 | */ | |
6e0a0431 | 723 | int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len) |
d3383632 MS |
724 | { |
725 | unsigned long addr; | |
726 | spinlock_t *ptl; | |
727 | pte_t *ptep, entry; | |
728 | pgste_t pgste; | |
729 | int rc = 0; | |
730 | ||
6e0a0431 | 731 | if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK)) |
d3383632 MS |
732 | return -EINVAL; |
733 | down_read(&gmap->mm->mmap_sem); | |
734 | while (len) { | |
735 | /* Convert gmap address and connect the page tables */ | |
527e30b4 | 736 | addr = __gmap_translate(gmap, gaddr); |
d3383632 MS |
737 | if (IS_ERR_VALUE(addr)) { |
738 | rc = addr; | |
739 | break; | |
740 | } | |
741 | /* Get the page mapped */ | |
bb4b42ce | 742 | if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) { |
d3383632 MS |
743 | rc = -EFAULT; |
744 | break; | |
745 | } | |
527e30b4 MS |
746 | rc = __gmap_link(gmap, gaddr, addr); |
747 | if (rc) | |
748 | break; | |
d3383632 MS |
749 | /* Walk the process page table, lock and get pte pointer */ |
750 | ptep = get_locked_pte(gmap->mm, addr, &ptl); | |
6972cae5 | 751 | VM_BUG_ON(!ptep); |
d3383632 MS |
752 | /* Set notification bit in the pgste of the pte */ |
753 | entry = *ptep; | |
e5098611 | 754 | if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) { |
d3383632 | 755 | pgste = pgste_get_lock(ptep); |
0d0dafc1 | 756 | pgste_val(pgste) |= PGSTE_IN_BIT; |
d3383632 | 757 | pgste_set_unlock(ptep, pgste); |
6e0a0431 | 758 | gaddr += PAGE_SIZE; |
d3383632 MS |
759 | len -= PAGE_SIZE; |
760 | } | |
a697e051 | 761 | pte_unmap_unlock(ptep, ptl); |
d3383632 MS |
762 | } |
763 | up_read(&gmap->mm->mmap_sem); | |
764 | return rc; | |
765 | } | |
766 | EXPORT_SYMBOL_GPL(gmap_ipte_notify); | |
767 | ||
768 | /** | |
769 | * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte. | |
770 | * @mm: pointer to the process mm_struct | |
9da4e380 | 771 | * @addr: virtual address in the process address space |
d3383632 MS |
772 | * @pte: pointer to the page table entry |
773 | * | |
774 | * This function is assumed to be called with the page table lock held | |
775 | * for the pte to notify. | |
776 | */ | |
527e30b4 | 777 | void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte) |
d3383632 | 778 | { |
527e30b4 MS |
779 | unsigned long offset, gaddr; |
780 | unsigned long *table; | |
d3383632 | 781 | struct gmap_notifier *nb; |
527e30b4 | 782 | struct gmap *gmap; |
d3383632 | 783 | |
527e30b4 MS |
784 | offset = ((unsigned long) pte) & (255 * sizeof(pte_t)); |
785 | offset = offset * (4096 / sizeof(pte_t)); | |
d3383632 | 786 | spin_lock(&gmap_notifier_lock); |
527e30b4 MS |
787 | list_for_each_entry(gmap, &mm->context.gmap_list, list) { |
788 | table = radix_tree_lookup(&gmap->host_to_guest, | |
789 | vmaddr >> PMD_SHIFT); | |
790 | if (!table) | |
791 | continue; | |
792 | gaddr = __gmap_segment_gaddr(table) + offset; | |
d3383632 | 793 | list_for_each_entry(nb, &gmap_notifier_list, list) |
527e30b4 | 794 | nb->notifier_call(gmap, gaddr); |
d3383632 MS |
795 | } |
796 | spin_unlock(&gmap_notifier_lock); | |
797 | } | |
0a61b222 | 798 | EXPORT_SYMBOL_GPL(gmap_do_ipte_notify); |
d3383632 | 799 | |
3eabaee9 MS |
800 | static inline int page_table_with_pgste(struct page *page) |
801 | { | |
802 | return atomic_read(&page->_mapcount) == 0; | |
803 | } | |
804 | ||
527e30b4 | 805 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) |
36409f63 MS |
806 | { |
807 | struct page *page; | |
808 | unsigned long *table; | |
809 | ||
810 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); | |
811 | if (!page) | |
812 | return NULL; | |
e89cfa58 | 813 | if (!pgtable_page_ctor(page)) { |
e89cfa58 KS |
814 | __free_page(page); |
815 | return NULL; | |
816 | } | |
3eabaee9 | 817 | atomic_set(&page->_mapcount, 0); |
36409f63 | 818 | table = (unsigned long *) page_to_phys(page); |
e5098611 | 819 | clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); |
0a61b222 | 820 | clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); |
36409f63 MS |
821 | return table; |
822 | } | |
823 | ||
824 | static inline void page_table_free_pgste(unsigned long *table) | |
825 | { | |
826 | struct page *page; | |
827 | ||
828 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
2320c579 | 829 | pgtable_page_dtor(page); |
36409f63 MS |
830 | atomic_set(&page->_mapcount, -1); |
831 | __free_page(page); | |
832 | } | |
36409f63 | 833 | |
24d5dd02 CB |
834 | int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, |
835 | unsigned long key, bool nq) | |
836 | { | |
837 | spinlock_t *ptl; | |
838 | pgste_t old, new; | |
839 | pte_t *ptep; | |
840 | ||
841 | down_read(&mm->mmap_sem); | |
ab3f285f | 842 | retry: |
edeb69e5 | 843 | ptep = get_locked_pte(mm, addr, &ptl); |
24d5dd02 CB |
844 | if (unlikely(!ptep)) { |
845 | up_read(&mm->mmap_sem); | |
846 | return -EFAULT; | |
847 | } | |
ab3f285f CB |
848 | if (!(pte_val(*ptep) & _PAGE_INVALID) && |
849 | (pte_val(*ptep) & _PAGE_PROTECT)) { | |
66e9bbdb | 850 | pte_unmap_unlock(ptep, ptl); |
dc77d344 CB |
851 | if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) { |
852 | up_read(&mm->mmap_sem); | |
853 | return -EFAULT; | |
ab3f285f | 854 | } |
dc77d344 CB |
855 | goto retry; |
856 | } | |
24d5dd02 CB |
857 | |
858 | new = old = pgste_get_lock(ptep); | |
859 | pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | | |
860 | PGSTE_ACC_BITS | PGSTE_FP_BIT); | |
861 | pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48; | |
862 | pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; | |
863 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { | |
0944fe3f | 864 | unsigned long address, bits, skey; |
24d5dd02 CB |
865 | |
866 | address = pte_val(*ptep) & PAGE_MASK; | |
0944fe3f | 867 | skey = (unsigned long) page_get_storage_key(address); |
24d5dd02 | 868 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
0944fe3f | 869 | skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT); |
24d5dd02 | 870 | /* Set storage key ACC and FP */ |
0944fe3f | 871 | page_set_storage_key(address, skey, !nq); |
24d5dd02 CB |
872 | /* Merge host changed & referenced into pgste */ |
873 | pgste_val(new) |= bits << 52; | |
24d5dd02 CB |
874 | } |
875 | /* changing the guest storage key is considered a change of the page */ | |
876 | if ((pgste_val(new) ^ pgste_val(old)) & | |
877 | (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) | |
0a61b222 | 878 | pgste_val(new) |= PGSTE_UC_BIT; |
24d5dd02 CB |
879 | |
880 | pgste_set_unlock(ptep, new); | |
66e9bbdb | 881 | pte_unmap_unlock(ptep, ptl); |
24d5dd02 CB |
882 | up_read(&mm->mmap_sem); |
883 | return 0; | |
884 | } | |
885 | EXPORT_SYMBOL(set_guest_storage_key); | |
886 | ||
9fcf93b5 JH |
887 | unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr) |
888 | { | |
889 | spinlock_t *ptl; | |
890 | pgste_t pgste; | |
891 | pte_t *ptep; | |
892 | uint64_t physaddr; | |
893 | unsigned long key = 0; | |
894 | ||
895 | down_read(&mm->mmap_sem); | |
896 | ptep = get_locked_pte(mm, addr, &ptl); | |
897 | if (unlikely(!ptep)) { | |
898 | up_read(&mm->mmap_sem); | |
899 | return -EFAULT; | |
900 | } | |
901 | pgste = pgste_get_lock(ptep); | |
902 | ||
903 | if (pte_val(*ptep) & _PAGE_INVALID) { | |
904 | key |= (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56; | |
905 | key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56; | |
906 | key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48; | |
907 | key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48; | |
908 | } else { | |
909 | physaddr = pte_val(*ptep) & PAGE_MASK; | |
910 | key = page_get_storage_key(physaddr); | |
911 | ||
912 | /* Reflect guest's logical view, not physical */ | |
913 | if (pgste_val(pgste) & PGSTE_GR_BIT) | |
914 | key |= _PAGE_REFERENCED; | |
915 | if (pgste_val(pgste) & PGSTE_GC_BIT) | |
916 | key |= _PAGE_CHANGED; | |
917 | } | |
918 | ||
919 | pgste_set_unlock(ptep, pgste); | |
920 | pte_unmap_unlock(ptep, ptl); | |
921 | up_read(&mm->mmap_sem); | |
922 | return key; | |
923 | } | |
924 | EXPORT_SYMBOL(get_guest_storage_key); | |
925 | ||
0b46e0a3 MS |
926 | static int page_table_allocate_pgste_min = 0; |
927 | static int page_table_allocate_pgste_max = 1; | |
928 | int page_table_allocate_pgste = 0; | |
929 | EXPORT_SYMBOL(page_table_allocate_pgste); | |
930 | ||
931 | static struct ctl_table page_table_sysctl[] = { | |
932 | { | |
933 | .procname = "allocate_pgste", | |
934 | .data = &page_table_allocate_pgste, | |
935 | .maxlen = sizeof(int), | |
936 | .mode = S_IRUGO | S_IWUSR, | |
937 | .proc_handler = proc_dointvec, | |
938 | .extra1 = &page_table_allocate_pgste_min, | |
939 | .extra2 = &page_table_allocate_pgste_max, | |
940 | }, | |
941 | { } | |
942 | }; | |
943 | ||
944 | static struct ctl_table page_table_sysctl_dir[] = { | |
945 | { | |
946 | .procname = "vm", | |
947 | .maxlen = 0, | |
948 | .mode = 0555, | |
949 | .child = page_table_sysctl, | |
950 | }, | |
951 | { } | |
952 | }; | |
953 | ||
954 | static int __init page_table_register_sysctl(void) | |
955 | { | |
956 | return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM; | |
957 | } | |
958 | __initcall(page_table_register_sysctl); | |
959 | ||
e5992f2e MS |
960 | #else /* CONFIG_PGSTE */ |
961 | ||
3eabaee9 MS |
962 | static inline int page_table_with_pgste(struct page *page) |
963 | { | |
964 | return 0; | |
965 | } | |
966 | ||
527e30b4 | 967 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) |
e5992f2e | 968 | { |
944291de | 969 | return NULL; |
e5992f2e MS |
970 | } |
971 | ||
972 | static inline void page_table_free_pgste(unsigned long *table) | |
973 | { | |
974 | } | |
975 | ||
527e30b4 MS |
976 | static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table, |
977 | unsigned long vmaddr) | |
e5992f2e MS |
978 | { |
979 | } | |
980 | ||
981 | #endif /* CONFIG_PGSTE */ | |
982 | ||
983 | static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) | |
984 | { | |
985 | unsigned int old, new; | |
986 | ||
987 | do { | |
988 | old = atomic_read(v); | |
989 | new = old ^ bits; | |
990 | } while (atomic_cmpxchg(v, old, new) != old); | |
991 | return new; | |
992 | } | |
993 | ||
994 | /* | |
995 | * page table entry allocation/free routines. | |
996 | */ | |
527e30b4 | 997 | unsigned long *page_table_alloc(struct mm_struct *mm) |
3610cce8 | 998 | { |
41459d36 HC |
999 | unsigned long *uninitialized_var(table); |
1000 | struct page *uninitialized_var(page); | |
36409f63 | 1001 | unsigned int mask, bit; |
3610cce8 | 1002 | |
0b46e0a3 | 1003 | if (mm_alloc_pgste(mm)) |
527e30b4 | 1004 | return page_table_alloc_pgste(mm); |
36409f63 | 1005 | /* Allocate fragments of a 4K page as 1K/2K page table */ |
80217147 | 1006 | spin_lock_bh(&mm->context.list_lock); |
36409f63 | 1007 | mask = FRAG_MASK; |
146e4b3c MS |
1008 | if (!list_empty(&mm->context.pgtable_list)) { |
1009 | page = list_first_entry(&mm->context.pgtable_list, | |
1010 | struct page, lru); | |
36409f63 MS |
1011 | table = (unsigned long *) page_to_phys(page); |
1012 | mask = atomic_read(&page->_mapcount); | |
1013 | mask = mask | (mask >> 4); | |
146e4b3c | 1014 | } |
36409f63 | 1015 | if ((mask & FRAG_MASK) == FRAG_MASK) { |
80217147 | 1016 | spin_unlock_bh(&mm->context.list_lock); |
146e4b3c MS |
1017 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); |
1018 | if (!page) | |
3610cce8 | 1019 | return NULL; |
e89cfa58 KS |
1020 | if (!pgtable_page_ctor(page)) { |
1021 | __free_page(page); | |
1022 | return NULL; | |
1023 | } | |
36409f63 | 1024 | atomic_set(&page->_mapcount, 1); |
146e4b3c | 1025 | table = (unsigned long *) page_to_phys(page); |
e5098611 | 1026 | clear_table(table, _PAGE_INVALID, PAGE_SIZE); |
80217147 | 1027 | spin_lock_bh(&mm->context.list_lock); |
146e4b3c | 1028 | list_add(&page->lru, &mm->context.pgtable_list); |
36409f63 MS |
1029 | } else { |
1030 | for (bit = 1; mask & bit; bit <<= 1) | |
1031 | table += PTRS_PER_PTE; | |
1032 | mask = atomic_xor_bits(&page->_mapcount, bit); | |
1033 | if ((mask & FRAG_MASK) == FRAG_MASK) | |
1034 | list_del(&page->lru); | |
3610cce8 | 1035 | } |
80217147 | 1036 | spin_unlock_bh(&mm->context.list_lock); |
3610cce8 MS |
1037 | return table; |
1038 | } | |
1039 | ||
36409f63 | 1040 | void page_table_free(struct mm_struct *mm, unsigned long *table) |
80217147 MS |
1041 | { |
1042 | struct page *page; | |
36409f63 | 1043 | unsigned int bit, mask; |
80217147 | 1044 | |
3eabaee9 | 1045 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
527e30b4 | 1046 | if (page_table_with_pgste(page)) |
36409f63 | 1047 | return page_table_free_pgste(table); |
36409f63 | 1048 | /* Free 1K/2K page table fragment of a 4K page */ |
36409f63 MS |
1049 | bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); |
1050 | spin_lock_bh(&mm->context.list_lock); | |
1051 | if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) | |
1052 | list_del(&page->lru); | |
1053 | mask = atomic_xor_bits(&page->_mapcount, bit); | |
1054 | if (mask & FRAG_MASK) | |
1055 | list_add(&page->lru, &mm->context.pgtable_list); | |
1056 | spin_unlock_bh(&mm->context.list_lock); | |
1057 | if (mask == 0) { | |
80217147 | 1058 | pgtable_page_dtor(page); |
36409f63 | 1059 | atomic_set(&page->_mapcount, -1); |
80217147 MS |
1060 | __free_page(page); |
1061 | } | |
1062 | } | |
1063 | ||
36409f63 | 1064 | static void __page_table_free_rcu(void *table, unsigned bit) |
3610cce8 | 1065 | { |
146e4b3c | 1066 | struct page *page; |
3610cce8 | 1067 | |
36409f63 MS |
1068 | if (bit == FRAG_MASK) |
1069 | return page_table_free_pgste(table); | |
36409f63 | 1070 | /* Free 1K/2K page table fragment of a 4K page */ |
146e4b3c | 1071 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
36409f63 | 1072 | if (atomic_xor_bits(&page->_mapcount, bit) == 0) { |
146e4b3c | 1073 | pgtable_page_dtor(page); |
36409f63 | 1074 | atomic_set(&page->_mapcount, -1); |
146e4b3c MS |
1075 | __free_page(page); |
1076 | } | |
1077 | } | |
3610cce8 | 1078 | |
527e30b4 MS |
1079 | void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, |
1080 | unsigned long vmaddr) | |
80217147 | 1081 | { |
36409f63 | 1082 | struct mm_struct *mm; |
80217147 | 1083 | struct page *page; |
36409f63 | 1084 | unsigned int bit, mask; |
80217147 | 1085 | |
36409f63 | 1086 | mm = tlb->mm; |
3eabaee9 MS |
1087 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
1088 | if (page_table_with_pgste(page)) { | |
527e30b4 | 1089 | gmap_unlink(mm, table, vmaddr); |
36409f63 MS |
1090 | table = (unsigned long *) (__pa(table) | FRAG_MASK); |
1091 | tlb_remove_table(tlb, table); | |
1092 | return; | |
80217147 | 1093 | } |
36409f63 | 1094 | bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); |
80217147 | 1095 | spin_lock_bh(&mm->context.list_lock); |
36409f63 MS |
1096 | if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) |
1097 | list_del(&page->lru); | |
1098 | mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4)); | |
1099 | if (mask & FRAG_MASK) | |
1100 | list_add_tail(&page->lru, &mm->context.pgtable_list); | |
80217147 | 1101 | spin_unlock_bh(&mm->context.list_lock); |
36409f63 MS |
1102 | table = (unsigned long *) (__pa(table) | (bit << 4)); |
1103 | tlb_remove_table(tlb, table); | |
1104 | } | |
1105 | ||
63df41d6 | 1106 | static void __tlb_remove_table(void *_table) |
36409f63 | 1107 | { |
e73b7fff MS |
1108 | const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK; |
1109 | void *table = (void *)((unsigned long) _table & ~mask); | |
1110 | unsigned type = (unsigned long) _table & mask; | |
36409f63 MS |
1111 | |
1112 | if (type) | |
1113 | __page_table_free_rcu(table, type); | |
1114 | else | |
1115 | free_pages((unsigned long) table, ALLOC_ORDER); | |
80217147 MS |
1116 | } |
1117 | ||
cd94154c MS |
1118 | static void tlb_remove_table_smp_sync(void *arg) |
1119 | { | |
1120 | /* Simply deliver the interrupt */ | |
1121 | } | |
1122 | ||
1123 | static void tlb_remove_table_one(void *table) | |
1124 | { | |
1125 | /* | |
1126 | * This isn't an RCU grace period and hence the page-tables cannot be | |
1127 | * assumed to be actually RCU-freed. | |
1128 | * | |
1129 | * It is however sufficient for software page-table walkers that rely | |
1130 | * on IRQ disabling. See the comment near struct mmu_table_batch. | |
1131 | */ | |
1132 | smp_call_function(tlb_remove_table_smp_sync, NULL, 1); | |
1133 | __tlb_remove_table(table); | |
1134 | } | |
1135 | ||
1136 | static void tlb_remove_table_rcu(struct rcu_head *head) | |
1137 | { | |
1138 | struct mmu_table_batch *batch; | |
1139 | int i; | |
1140 | ||
1141 | batch = container_of(head, struct mmu_table_batch, rcu); | |
1142 | ||
1143 | for (i = 0; i < batch->nr; i++) | |
1144 | __tlb_remove_table(batch->tables[i]); | |
1145 | ||
1146 | free_page((unsigned long)batch); | |
1147 | } | |
1148 | ||
1149 | void tlb_table_flush(struct mmu_gather *tlb) | |
1150 | { | |
1151 | struct mmu_table_batch **batch = &tlb->batch; | |
1152 | ||
1153 | if (*batch) { | |
cd94154c MS |
1154 | call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); |
1155 | *batch = NULL; | |
1156 | } | |
1157 | } | |
1158 | ||
1159 | void tlb_remove_table(struct mmu_gather *tlb, void *table) | |
1160 | { | |
1161 | struct mmu_table_batch **batch = &tlb->batch; | |
1162 | ||
5c474a1e | 1163 | tlb->mm->context.flush_mm = 1; |
cd94154c MS |
1164 | if (*batch == NULL) { |
1165 | *batch = (struct mmu_table_batch *) | |
1166 | __get_free_page(GFP_NOWAIT | __GFP_NOWARN); | |
1167 | if (*batch == NULL) { | |
5c474a1e | 1168 | __tlb_flush_mm_lazy(tlb->mm); |
cd94154c MS |
1169 | tlb_remove_table_one(table); |
1170 | return; | |
1171 | } | |
1172 | (*batch)->nr = 0; | |
1173 | } | |
1174 | (*batch)->tables[(*batch)->nr++] = table; | |
1175 | if ((*batch)->nr == MAX_TABLE_BATCH) | |
5c474a1e | 1176 | tlb_flush_mmu(tlb); |
cd94154c | 1177 | } |
36409f63 | 1178 | |
274023da | 1179 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
3eabaee9 | 1180 | static inline void thp_split_vma(struct vm_area_struct *vma) |
274023da GS |
1181 | { |
1182 | unsigned long addr; | |
274023da | 1183 | |
3eabaee9 MS |
1184 | for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) |
1185 | follow_page(vma, addr, FOLL_SPLIT); | |
274023da GS |
1186 | } |
1187 | ||
3eabaee9 | 1188 | static inline void thp_split_mm(struct mm_struct *mm) |
274023da | 1189 | { |
3eabaee9 | 1190 | struct vm_area_struct *vma; |
274023da | 1191 | |
3eabaee9 | 1192 | for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { |
274023da GS |
1193 | thp_split_vma(vma); |
1194 | vma->vm_flags &= ~VM_HUGEPAGE; | |
1195 | vma->vm_flags |= VM_NOHUGEPAGE; | |
274023da | 1196 | } |
3eabaee9 MS |
1197 | mm->def_flags |= VM_NOHUGEPAGE; |
1198 | } | |
1199 | #else | |
1200 | static inline void thp_split_mm(struct mm_struct *mm) | |
1201 | { | |
274023da GS |
1202 | } |
1203 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
1204 | ||
402b0862 CO |
1205 | /* |
1206 | * switch on pgstes for its userspace process (for kvm) | |
1207 | */ | |
1208 | int s390_enable_sie(void) | |
1209 | { | |
0b46e0a3 | 1210 | struct mm_struct *mm = current->mm; |
402b0862 | 1211 | |
74b6b522 | 1212 | /* Do we have pgstes? if yes, we are done */ |
0b46e0a3 | 1213 | if (mm_has_pgste(mm)) |
74b6b522 | 1214 | return 0; |
0b46e0a3 MS |
1215 | /* Fail if the page tables are 2K */ |
1216 | if (!mm_alloc_pgste(mm)) | |
1217 | return -EINVAL; | |
3eabaee9 | 1218 | down_write(&mm->mmap_sem); |
0b46e0a3 | 1219 | mm->context.has_pgste = 1; |
274023da GS |
1220 | /* split thp mappings and disable thp for future mappings */ |
1221 | thp_split_mm(mm); | |
3eabaee9 | 1222 | up_write(&mm->mmap_sem); |
0b46e0a3 | 1223 | return 0; |
402b0862 CO |
1224 | } |
1225 | EXPORT_SYMBOL_GPL(s390_enable_sie); | |
7db11a36 | 1226 | |
934bc131 DD |
1227 | /* |
1228 | * Enable storage key handling from now on and initialize the storage | |
1229 | * keys with the default key. | |
1230 | */ | |
a13cff31 DD |
1231 | static int __s390_enable_skey(pte_t *pte, unsigned long addr, |
1232 | unsigned long next, struct mm_walk *walk) | |
1233 | { | |
1234 | unsigned long ptev; | |
1235 | pgste_t pgste; | |
1236 | ||
1237 | pgste = pgste_get_lock(pte); | |
2faee8ff DD |
1238 | /* |
1239 | * Remove all zero page mappings, | |
1240 | * after establishing a policy to forbid zero page mappings | |
1241 | * following faults for that page will get fresh anonymous pages | |
1242 | */ | |
1243 | if (is_zero_pfn(pte_pfn(*pte))) { | |
1244 | ptep_flush_direct(walk->mm, addr, pte); | |
1245 | pte_val(*pte) = _PAGE_INVALID; | |
1246 | } | |
a13cff31 DD |
1247 | /* Clear storage key */ |
1248 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | | |
1249 | PGSTE_GR_BIT | PGSTE_GC_BIT); | |
1250 | ptev = pte_val(*pte); | |
1251 | if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) | |
1252 | page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); | |
1253 | pgste_set_unlock(pte, pgste); | |
1254 | return 0; | |
1255 | } | |
1256 | ||
3ac8e380 | 1257 | int s390_enable_skey(void) |
934bc131 | 1258 | { |
a13cff31 DD |
1259 | struct mm_walk walk = { .pte_entry = __s390_enable_skey }; |
1260 | struct mm_struct *mm = current->mm; | |
3ac8e380 DD |
1261 | struct vm_area_struct *vma; |
1262 | int rc = 0; | |
a13cff31 DD |
1263 | |
1264 | down_write(&mm->mmap_sem); | |
1265 | if (mm_use_skey(mm)) | |
1266 | goto out_up; | |
2faee8ff DD |
1267 | |
1268 | mm->context.use_skey = 1; | |
3ac8e380 DD |
1269 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
1270 | if (ksm_madvise(vma, vma->vm_start, vma->vm_end, | |
1271 | MADV_UNMERGEABLE, &vma->vm_flags)) { | |
1272 | mm->context.use_skey = 0; | |
1273 | rc = -ENOMEM; | |
1274 | goto out_up; | |
1275 | } | |
1276 | } | |
1277 | mm->def_flags &= ~VM_MERGEABLE; | |
2faee8ff | 1278 | |
a13cff31 DD |
1279 | walk.mm = mm; |
1280 | walk_page_range(0, TASK_SIZE, &walk); | |
a13cff31 DD |
1281 | |
1282 | out_up: | |
1283 | up_write(&mm->mmap_sem); | |
3ac8e380 | 1284 | return rc; |
934bc131 DD |
1285 | } |
1286 | EXPORT_SYMBOL_GPL(s390_enable_skey); | |
1287 | ||
a13cff31 DD |
1288 | /* |
1289 | * Reset CMMA state, make all pages stable again. | |
1290 | */ | |
1291 | static int __s390_reset_cmma(pte_t *pte, unsigned long addr, | |
1292 | unsigned long next, struct mm_walk *walk) | |
1293 | { | |
1294 | pgste_t pgste; | |
1295 | ||
1296 | pgste = pgste_get_lock(pte); | |
1297 | pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; | |
1298 | pgste_set_unlock(pte, pgste); | |
1299 | return 0; | |
1300 | } | |
1301 | ||
1302 | void s390_reset_cmma(struct mm_struct *mm) | |
1303 | { | |
1304 | struct mm_walk walk = { .pte_entry = __s390_reset_cmma }; | |
1305 | ||
1306 | down_write(&mm->mmap_sem); | |
1307 | walk.mm = mm; | |
1308 | walk_page_range(0, TASK_SIZE, &walk); | |
1309 | up_write(&mm->mmap_sem); | |
1310 | } | |
1311 | EXPORT_SYMBOL_GPL(s390_reset_cmma); | |
1312 | ||
a0bf4f14 DD |
1313 | /* |
1314 | * Test and reset if a guest page is dirty | |
1315 | */ | |
1316 | bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) | |
1317 | { | |
1318 | pte_t *pte; | |
1319 | spinlock_t *ptl; | |
1320 | bool dirty = false; | |
1321 | ||
1322 | pte = get_locked_pte(gmap->mm, address, &ptl); | |
1323 | if (unlikely(!pte)) | |
1324 | return false; | |
1325 | ||
1326 | if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte)) | |
1327 | dirty = true; | |
1328 | ||
1329 | spin_unlock(ptl); | |
1330 | return dirty; | |
1331 | } | |
1332 | EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty); | |
1333 | ||
75077afb | 1334 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1ae1c1d0 GS |
1335 | int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, |
1336 | pmd_t *pmdp) | |
1337 | { | |
1338 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
1339 | /* No need to flush TLB | |
1340 | * On s390 reference bits are in storage key and never in TLB */ | |
1341 | return pmdp_test_and_clear_young(vma, address, pmdp); | |
1342 | } | |
1343 | ||
1344 | int pmdp_set_access_flags(struct vm_area_struct *vma, | |
1345 | unsigned long address, pmd_t *pmdp, | |
1346 | pmd_t entry, int dirty) | |
1347 | { | |
1348 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
1349 | ||
152125b7 MS |
1350 | entry = pmd_mkyoung(entry); |
1351 | if (dirty) | |
1352 | entry = pmd_mkdirty(entry); | |
1ae1c1d0 GS |
1353 | if (pmd_same(*pmdp, entry)) |
1354 | return 0; | |
1355 | pmdp_invalidate(vma, address, pmdp); | |
1356 | set_pmd_at(vma->vm_mm, address, pmdp, entry); | |
1357 | return 1; | |
1358 | } | |
1359 | ||
75077afb GS |
1360 | static void pmdp_splitting_flush_sync(void *arg) |
1361 | { | |
1362 | /* Simply deliver the interrupt */ | |
1363 | } | |
1364 | ||
1365 | void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, | |
1366 | pmd_t *pmdp) | |
1367 | { | |
1368 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
1369 | if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT, | |
1370 | (unsigned long *) pmdp)) { | |
1371 | /* need to serialize against gup-fast (IRQ disabled) */ | |
1372 | smp_call_function(pmdp_splitting_flush_sync, NULL, 1); | |
1373 | } | |
1374 | } | |
9501d09f | 1375 | |
6b0b50b0 AK |
1376 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
1377 | pgtable_t pgtable) | |
9501d09f GS |
1378 | { |
1379 | struct list_head *lh = (struct list_head *) pgtable; | |
1380 | ||
ec66ad66 | 1381 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
9501d09f GS |
1382 | |
1383 | /* FIFO */ | |
c389a250 | 1384 | if (!pmd_huge_pte(mm, pmdp)) |
9501d09f GS |
1385 | INIT_LIST_HEAD(lh); |
1386 | else | |
c389a250 KS |
1387 | list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); |
1388 | pmd_huge_pte(mm, pmdp) = pgtable; | |
9501d09f GS |
1389 | } |
1390 | ||
6b0b50b0 | 1391 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
9501d09f GS |
1392 | { |
1393 | struct list_head *lh; | |
1394 | pgtable_t pgtable; | |
1395 | pte_t *ptep; | |
1396 | ||
ec66ad66 | 1397 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
9501d09f GS |
1398 | |
1399 | /* FIFO */ | |
c389a250 | 1400 | pgtable = pmd_huge_pte(mm, pmdp); |
9501d09f GS |
1401 | lh = (struct list_head *) pgtable; |
1402 | if (list_empty(lh)) | |
c389a250 | 1403 | pmd_huge_pte(mm, pmdp) = NULL; |
9501d09f | 1404 | else { |
c389a250 | 1405 | pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; |
9501d09f GS |
1406 | list_del(lh); |
1407 | } | |
1408 | ptep = (pte_t *) pgtable; | |
e5098611 | 1409 | pte_val(*ptep) = _PAGE_INVALID; |
9501d09f | 1410 | ptep++; |
e5098611 | 1411 | pte_val(*ptep) = _PAGE_INVALID; |
9501d09f GS |
1412 | return pgtable; |
1413 | } | |
75077afb | 1414 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |