License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-2.6-block.git] / arch / powerpc / mm / hugetlbpage-book3e.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
41151e77
BB
2/*
3 * PPC Huge TLB Page Support for Book3E MMU
4 *
5 * Copyright (C) 2009 David Gibson, IBM Corporation.
6 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
7 *
8 */
9#include <linux/mm.h>
10#include <linux/hugetlb.h>
11
37c5e942
SW
12#include <asm/mmu.h>
13
bbead78c
SW
14#ifdef CONFIG_PPC_FSL_BOOK3E
15#ifdef CONFIG_PPC64
16static inline int tlb1_next(void)
17{
18 struct paca_struct *paca = get_paca();
19 struct tlb_core_data *tcd;
20 int this, next;
21
22 tcd = paca->tcd_ptr;
23 this = tcd->esel_next;
24
25 next = this + 1;
26 if (next >= tcd->esel_max)
27 next = tcd->esel_first;
28
29 tcd->esel_next = next;
30 return this;
31}
32#else
33static inline int tlb1_next(void)
34{
35 int index, ncams;
36
37 ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
38
69111bac 39 index = this_cpu_read(next_tlbcam_idx);
bbead78c
SW
40
41 /* Just round-robin the entries and wrap when we hit the end */
42 if (unlikely(index == ncams - 1))
69111bac 43 __this_cpu_write(next_tlbcam_idx, tlbcam_index);
bbead78c 44 else
69111bac 45 __this_cpu_inc(next_tlbcam_idx);
bbead78c
SW
46
47 return index;
48}
49#endif /* !PPC64 */
50#endif /* FSL */
51
41151e77
BB
52static inline int mmu_get_tsize(int psize)
53{
54 return mmu_psize_defs[psize].enc;
55}
56
2749539b
SW
57#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_PPC64)
58#include <asm/paca.h>
59
60static inline void book3e_tlb_lock(void)
61{
62 struct paca_struct *paca = get_paca();
63 unsigned long tmp;
64 int token = smp_processor_id() + 1;
65
37c5e942
SW
66 /*
67 * Besides being unnecessary in the absence of SMT, this
68 * check prevents trying to do lbarx/stbcx. on e5500 which
69 * doesn't implement either feature.
70 */
71 if (!cpu_has_feature(CPU_FTR_SMT))
72 return;
73
2749539b
SW
74 asm volatile("1: lbarx %0, 0, %1;"
75 "cmpwi %0, 0;"
76 "bne 2f;"
77 "stbcx. %2, 0, %1;"
78 "bne 1b;"
79 "b 3f;"
80 "2: lbzx %0, 0, %1;"
81 "cmpwi %0, 0;"
82 "bne 2b;"
83 "b 1b;"
84 "3:"
85 : "=&r" (tmp)
86 : "r" (&paca->tcd_ptr->lock), "r" (token)
87 : "memory");
88}
89
90static inline void book3e_tlb_unlock(void)
91{
92 struct paca_struct *paca = get_paca();
93
37c5e942
SW
94 if (!cpu_has_feature(CPU_FTR_SMT))
95 return;
96
2749539b
SW
97 isync();
98 paca->tcd_ptr->lock = 0;
99}
100#else
101static inline void book3e_tlb_lock(void)
102{
103}
104
105static inline void book3e_tlb_unlock(void)
106{
107}
108#endif
109
41151e77
BB
110static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
111{
112 int found = 0;
113
114 mtspr(SPRN_MAS6, pid << 16);
115 if (mmu_has_feature(MMU_FTR_USE_TLBRSRV)) {
116 asm volatile(
117 "li %0,0\n"
118 "tlbsx. 0,%1\n"
119 "bne 1f\n"
120 "li %0,1\n"
121 "1:\n"
122 : "=&r"(found) : "r"(ea));
123 } else {
124 asm volatile(
125 "tlbsx 0,%1\n"
126 "mfspr %0,0x271\n"
127 "srwi %0,%0,31\n"
128 : "=&r"(found) : "r"(ea));
129 }
130
131 return found;
132}
133
d93e4d7d
BB
134void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
135 pte_t pte)
41151e77
BB
136{
137 unsigned long mas1, mas2;
138 u64 mas7_3;
139 unsigned long psize, tsize, shift;
140 unsigned long flags;
d93e4d7d 141 struct mm_struct *mm;
41151e77
BB
142
143#ifdef CONFIG_PPC_FSL_BOOK3E
bbead78c 144 int index;
41151e77
BB
145#endif
146
147 if (unlikely(is_kernel_addr(ea)))
148 return;
149
d93e4d7d
BB
150 mm = vma->vm_mm;
151
d93e4d7d 152 psize = vma_mmu_pagesize(vma);
8c1674de
BB
153 shift = __ilog2(psize);
154 tsize = shift - 10;
41151e77
BB
155 /*
156 * We can't be interrupted while we're setting up the MAS
157 * regusters or after we've confirmed that no tlb exists.
158 */
159 local_irq_save(flags);
160
2749539b
SW
161 book3e_tlb_lock();
162
41151e77 163 if (unlikely(book3e_tlb_exists(ea, mm->context.id))) {
2749539b 164 book3e_tlb_unlock();
41151e77
BB
165 local_irq_restore(flags);
166 return;
167 }
168
169#ifdef CONFIG_PPC_FSL_BOOK3E
41151e77 170 /* We have to use the CAM(TLB1) on FSL parts for hugepages */
bbead78c 171 index = tlb1_next();
41151e77 172 mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
41151e77 173#endif
bbead78c 174
41151e77
BB
175 mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
176 mas2 = ea & ~((1UL << shift) - 1);
177 mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
178 mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT;
179 mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK;
180 if (!pte_dirty(pte))
181 mas7_3 &= ~(MAS3_SW|MAS3_UW);
182
183 mtspr(SPRN_MAS1, mas1);
184 mtspr(SPRN_MAS2, mas2);
185
186 if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) {
187 mtspr(SPRN_MAS7_MAS3, mas7_3);
188 } else {
7c732cba
KH
189 if (mmu_has_feature(MMU_FTR_BIG_PHYS))
190 mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
41151e77
BB
191 mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
192 }
193
194 asm volatile ("tlbwe");
195
2749539b 196 book3e_tlb_unlock();
41151e77
BB
197 local_irq_restore(flags);
198}
199
200void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
201{
202 struct hstate *hstate = hstate_file(vma->vm_file);
203 unsigned long tsize = huge_page_shift(hstate) - 10;
204
d742aa15 205 __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0);
41151e77 206}