powerpc/perf: Fix incorrect event codes in power9-event-list
[linux-2.6-block.git] / arch / powerpc / mm / pgtable-book3s64.c
CommitLineData
3df33f12
AK
1/*
2 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/sched.h>
11#include <asm/pgalloc.h>
12#include <asm/tlb.h>
13
14#include "mmu_decl.h"
15#include <trace/events/thp.h>
16
17#ifdef CONFIG_TRANSPARENT_HUGEPAGE
18/*
19 * This is called when relaxing access to a hugepage. It's also called in the page
20 * fault path when we don't hit any of the major fault cases, ie, a minor
21 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
22 * handled those two for us, we additionally deal with missing execute
23 * permission here on some processors
24 */
25int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
26 pmd_t *pmdp, pmd_t entry, int dirty)
27{
28 int changed;
29#ifdef CONFIG_DEBUG_VM
30 WARN_ON(!pmd_trans_huge(*pmdp));
31 assert_spin_locked(&vma->vm_mm->page_table_lock);
32#endif
33 changed = !pmd_same(*(pmdp), entry);
34 if (changed) {
35 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
d8e91e93 36 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
3df33f12
AK
37 }
38 return changed;
39}
40
41int pmdp_test_and_clear_young(struct vm_area_struct *vma,
42 unsigned long address, pmd_t *pmdp)
43{
44 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
45}
46/*
47 * set a new huge pmd. We should not be called for updating
48 * an existing pmd entry. That should go via pmd_hugepage_update.
49 */
50void set_pmd_at(struct mm_struct *mm, unsigned long addr,
51 pmd_t *pmdp, pmd_t pmd)
52{
53#ifdef CONFIG_DEBUG_VM
54 WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
55 assert_spin_locked(&mm->page_table_lock);
56 WARN_ON(!pmd_trans_huge(pmd));
57#endif
58 trace_hugepage_set_pmd(addr, pmd_val(pmd));
59 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
60}
61/*
62 * We use this to invalidate a pmdp entry before switching from a
63 * hugepte to regular pmd entry.
64 */
65void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
66 pmd_t *pmdp)
67{
68 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
d8e91e93 69 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
3df33f12
AK
70 /*
71 * This ensures that generic code that rely on IRQ disabling
72 * to prevent a parallel THP split work as expected.
73 */
74 kick_all_cpus_sync();
75}
76
77static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
78{
79 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
80}
81
82pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
83{
84 unsigned long pmdv;
85
86 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
87 return pmd_set_protbits(__pmd(pmdv), pgprot);
88}
89
90pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
91{
92 return pfn_pmd(page_to_pfn(page), pgprot);
93}
94
95pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
96{
97 unsigned long pmdv;
98
99 pmdv = pmd_val(pmd);
100 pmdv &= _HPAGE_CHG_MASK;
101 return pmd_set_protbits(__pmd(pmdv), newprot);
102}
103
104/*
105 * This is called at the end of handling a user page fault, when the
106 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
107 * We use it to preload an HPTE into the hash table corresponding to
108 * the updated linux HUGE PMD entry.
109 */
110void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
111 pmd_t *pmd)
112{
113 return;
114}
115#endif /* CONFIG_TRANSPARENT_HUGEPAGE */