2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/spinlock.h>
14 #include <linux/rcupdate.h>
15 #include <linux/slab.h>
16 #include <linux/swapops.h>
17 #include <linux/sysctl.h>
18 #include <linux/ksm.h>
19 #include <linux/mman.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
27 static inline pte_t ptep_flush_direct(struct mm_struct *mm,
28 unsigned long addr, pte_t *ptep)
34 if (unlikely(pte_val(old) & _PAGE_INVALID))
36 active = (mm == current->active_mm) ? 1 : 0;
37 count = atomic_add_return(0x10000, &mm->context.attach_count);
38 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
39 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
40 __ptep_ipte_local(addr, ptep);
42 __ptep_ipte(addr, ptep);
43 atomic_sub(0x10000, &mm->context.attach_count);
47 static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
48 unsigned long addr, pte_t *ptep)
54 if (unlikely(pte_val(old) & _PAGE_INVALID))
56 active = (mm == current->active_mm) ? 1 : 0;
57 count = atomic_add_return(0x10000, &mm->context.attach_count);
58 if ((count & 0xffff) <= active) {
59 pte_val(*ptep) |= _PAGE_INVALID;
60 mm->context.flush_mm = 1;
62 __ptep_ipte(addr, ptep);
63 atomic_sub(0x10000, &mm->context.attach_count);
67 static inline pgste_t pgste_get_lock(pte_t *ptep)
69 unsigned long new = 0;
76 " nihh %0,0xff7f\n" /* clear PCL bit in old */
77 " oihh %1,0x0080\n" /* set PCL bit in new */
80 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
81 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
86 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
90 " nihh %1,0xff7f\n" /* clear PCL bit */
92 : "=Q" (ptep[PTRS_PER_PTE])
93 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
98 static inline pgste_t pgste_get(pte_t *ptep)
100 unsigned long pgste = 0;
102 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
104 return __pgste(pgste);
107 static inline void pgste_set(pte_t *ptep, pgste_t pgste)
110 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
114 static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
115 struct mm_struct *mm)
118 unsigned long address, bits, skey;
120 if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID)
122 address = pte_val(pte) & PAGE_MASK;
123 skey = (unsigned long) page_get_storage_key(address);
124 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
125 /* Transfer page changed & referenced bit to guest bits in pgste */
126 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
127 /* Copy page access key and fetch protection bit to pgste */
128 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
129 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
135 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
136 struct mm_struct *mm)
139 unsigned long address;
142 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
144 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
145 address = pte_val(entry) & PAGE_MASK;
147 * Set page access key and fetch protection bit from pgste.
148 * The guest C/R information is still in the PGSTE, set real
151 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
152 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
153 page_set_storage_key(address, nkey, 0);
157 static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
160 if ((pte_val(entry) & _PAGE_PRESENT) &&
161 (pte_val(entry) & _PAGE_WRITE) &&
162 !(pte_val(entry) & _PAGE_INVALID)) {
163 if (!MACHINE_HAS_ESOP) {
165 * Without enhanced suppression-on-protection force
166 * the dirty bit on for all writable ptes.
168 pte_val(entry) |= _PAGE_DIRTY;
169 pte_val(entry) &= ~_PAGE_PROTECT;
171 if (!(pte_val(entry) & _PAGE_PROTECT))
172 /* This pte allows write access, set user-dirty */
173 pgste_val(pgste) |= PGSTE_UC_BIT;
180 static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
182 pte_t *ptep, pgste_t pgste)
185 if (pgste_val(pgste) & PGSTE_IN_BIT) {
186 pgste_val(pgste) &= ~PGSTE_IN_BIT;
187 ptep_notify(mm, addr, ptep);
193 static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
194 unsigned long addr, pte_t *ptep)
196 pgste_t pgste = __pgste(0);
198 if (mm_has_pgste(mm)) {
199 pgste = pgste_get_lock(ptep);
200 pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
205 static inline void ptep_xchg_commit(struct mm_struct *mm,
206 unsigned long addr, pte_t *ptep,
207 pgste_t pgste, pte_t old, pte_t new)
209 if (mm_has_pgste(mm)) {
210 if (pte_val(old) & _PAGE_INVALID)
211 pgste_set_key(ptep, pgste, new, mm);
212 if (pte_val(new) & _PAGE_INVALID) {
213 pgste = pgste_update_all(old, pgste, mm);
214 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
215 _PGSTE_GPS_USAGE_UNUSED)
216 pte_val(old) |= _PAGE_UNUSED;
218 pgste = pgste_set_pte(ptep, pgste, new);
219 pgste_set_unlock(ptep, pgste);
225 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
226 pte_t *ptep, pte_t new)
232 pgste = ptep_xchg_start(mm, addr, ptep);
233 old = ptep_flush_direct(mm, addr, ptep);
234 ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
238 EXPORT_SYMBOL(ptep_xchg_direct);
240 pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
241 pte_t *ptep, pte_t new)
247 pgste = ptep_xchg_start(mm, addr, ptep);
248 old = ptep_flush_lazy(mm, addr, ptep);
249 ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
253 EXPORT_SYMBOL(ptep_xchg_lazy);
255 pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
262 pgste = ptep_xchg_start(mm, addr, ptep);
263 old = ptep_flush_lazy(mm, addr, ptep);
264 if (mm_has_pgste(mm)) {
265 pgste = pgste_update_all(old, pgste, mm);
266 pgste_set(ptep, pgste);
270 EXPORT_SYMBOL(ptep_modify_prot_start);
272 void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
273 pte_t *ptep, pte_t pte)
277 if (mm_has_pgste(mm)) {
278 pgste = pgste_get(ptep);
279 pgste_set_key(ptep, pgste, pte, mm);
280 pgste = pgste_set_pte(ptep, pgste, pte);
281 pgste_set_unlock(ptep, pgste);
287 EXPORT_SYMBOL(ptep_modify_prot_commit);
289 static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
290 unsigned long addr, pmd_t *pmdp)
296 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
298 if (!MACHINE_HAS_IDTE) {
302 active = (mm == current->active_mm) ? 1 : 0;
303 count = atomic_add_return(0x10000, &mm->context.attach_count);
304 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
305 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
306 __pmdp_idte_local(addr, pmdp);
308 __pmdp_idte(addr, pmdp);
309 atomic_sub(0x10000, &mm->context.attach_count);
313 static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
314 unsigned long addr, pmd_t *pmdp)
320 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
322 active = (mm == current->active_mm) ? 1 : 0;
323 count = atomic_add_return(0x10000, &mm->context.attach_count);
324 if ((count & 0xffff) <= active) {
325 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
326 mm->context.flush_mm = 1;
327 } else if (MACHINE_HAS_IDTE)
328 __pmdp_idte(addr, pmdp);
331 atomic_sub(0x10000, &mm->context.attach_count);
335 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
336 pmd_t *pmdp, pmd_t new)
341 old = pmdp_flush_direct(mm, addr, pmdp);
346 EXPORT_SYMBOL(pmdp_xchg_direct);
348 pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
349 pmd_t *pmdp, pmd_t new)
354 old = pmdp_flush_lazy(mm, addr, pmdp);
359 EXPORT_SYMBOL(pmdp_xchg_lazy);
361 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
362 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
365 struct list_head *lh = (struct list_head *) pgtable;
367 assert_spin_locked(pmd_lockptr(mm, pmdp));
370 if (!pmd_huge_pte(mm, pmdp))
373 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
374 pmd_huge_pte(mm, pmdp) = pgtable;
377 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
379 struct list_head *lh;
383 assert_spin_locked(pmd_lockptr(mm, pmdp));
386 pgtable = pmd_huge_pte(mm, pmdp);
387 lh = (struct list_head *) pgtable;
389 pmd_huge_pte(mm, pmdp) = NULL;
391 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
394 ptep = (pte_t *) pgtable;
395 pte_val(*ptep) = _PAGE_INVALID;
397 pte_val(*ptep) = _PAGE_INVALID;
400 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
403 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
404 pte_t *ptep, pte_t entry)
408 /* the mm_has_pgste() check is done in set_pte_at() */
410 pgste = pgste_get_lock(ptep);
411 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
412 pgste_set_key(ptep, pgste, entry, mm);
413 pgste = pgste_set_pte(ptep, pgste, entry);
414 pgste_set_unlock(ptep, pgste);
418 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
423 pgste = pgste_get_lock(ptep);
424 pgste_val(pgste) |= PGSTE_IN_BIT;
425 pgste_set_unlock(ptep, pgste);
429 static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
431 if (!non_swap_entry(entry))
432 dec_mm_counter(mm, MM_SWAPENTS);
433 else if (is_migration_entry(entry)) {
434 struct page *page = migration_entry_to_page(entry);
436 dec_mm_counter(mm, mm_counter(page));
438 free_swap_and_cache(entry);
441 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
442 pte_t *ptep, int reset)
444 unsigned long pgstev;
448 /* Zap unused and logically-zero pages */
450 pgste = pgste_get_lock(ptep);
451 pgstev = pgste_val(pgste);
453 if (!reset && pte_swap(pte) &&
454 ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
455 (pgstev & _PGSTE_GPS_ZERO))) {
456 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
457 pte_clear(mm, addr, ptep);
460 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
461 pgste_set_unlock(ptep, pgste);
465 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
470 /* Clear storage key */
472 pgste = pgste_get_lock(ptep);
473 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
474 PGSTE_GR_BIT | PGSTE_GC_BIT);
475 ptev = pte_val(*ptep);
476 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
477 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
478 pgste_set_unlock(ptep, pgste);
483 * Test and reset if a guest page is dirty
485 bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
493 ptep = get_locked_pte(mm, addr, &ptl);
497 pgste = pgste_get_lock(ptep);
498 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
499 pgste_val(pgste) &= ~PGSTE_UC_BIT;
501 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
502 pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
503 __ptep_ipte(addr, ptep);
504 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
505 pte_val(pte) |= _PAGE_PROTECT;
507 pte_val(pte) |= _PAGE_INVALID;
510 pgste_set_unlock(ptep, pgste);
515 EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);
517 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
518 unsigned char key, bool nq)
525 down_read(&mm->mmap_sem);
526 ptep = get_locked_pte(mm, addr, &ptl);
527 if (unlikely(!ptep)) {
528 up_read(&mm->mmap_sem);
532 new = old = pgste_get_lock(ptep);
533 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
534 PGSTE_ACC_BITS | PGSTE_FP_BIT);
535 keyul = (unsigned long) key;
536 pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
537 pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
538 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
539 unsigned long address, bits, skey;
541 address = pte_val(*ptep) & PAGE_MASK;
542 skey = (unsigned long) page_get_storage_key(address);
543 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
544 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
545 /* Set storage key ACC and FP */
546 page_set_storage_key(address, skey, !nq);
547 /* Merge host changed & referenced into pgste */
548 pgste_val(new) |= bits << 52;
550 /* changing the guest storage key is considered a change of the page */
551 if ((pgste_val(new) ^ pgste_val(old)) &
552 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
553 pgste_val(new) |= PGSTE_UC_BIT;
555 pgste_set_unlock(ptep, new);
556 pte_unmap_unlock(ptep, ptl);
557 up_read(&mm->mmap_sem);
560 EXPORT_SYMBOL(set_guest_storage_key);
562 unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
569 down_read(&mm->mmap_sem);
570 ptep = get_locked_pte(mm, addr, &ptl);
571 if (unlikely(!ptep)) {
572 up_read(&mm->mmap_sem);
575 pgste = pgste_get_lock(ptep);
577 if (pte_val(*ptep) & _PAGE_INVALID) {
578 key = (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
579 key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
580 key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
581 key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
583 key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
585 /* Reflect guest's logical view, not physical */
586 if (pgste_val(pgste) & PGSTE_GR_BIT)
587 key |= _PAGE_REFERENCED;
588 if (pgste_val(pgste) & PGSTE_GC_BIT)
589 key |= _PAGE_CHANGED;
592 pgste_set_unlock(ptep, pgste);
593 pte_unmap_unlock(ptep, ptl);
594 up_read(&mm->mmap_sem);
597 EXPORT_SYMBOL(get_guest_storage_key);