Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* arch/sparc64/mm/tlb.c |
2 | * | |
3 | * Copyright (C) 2004 David S. Miller <davem@redhat.com> | |
4 | */ | |
5 | ||
6 | #include <linux/kernel.h> | |
1da177e4 LT |
7 | #include <linux/percpu.h> |
8 | #include <linux/mm.h> | |
9 | #include <linux/swap.h> | |
c9f2946f | 10 | #include <linux/preempt.h> |
1da177e4 LT |
11 | |
12 | #include <asm/pgtable.h> | |
13 | #include <asm/pgalloc.h> | |
14 | #include <asm/tlbflush.h> | |
15 | #include <asm/cacheflush.h> | |
16 | #include <asm/mmu_context.h> | |
17 | #include <asm/tlb.h> | |
18 | ||
19 | /* Heavily inspired by the ppc64 code. */ | |
20 | ||
90f08e39 | 21 | static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); |
1da177e4 LT |
22 | |
23 | void flush_tlb_pending(void) | |
24 | { | |
90f08e39 | 25 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); |
f36391d2 | 26 | struct mm_struct *mm = tb->mm; |
1da177e4 | 27 | |
f36391d2 DM |
28 | if (!tb->tlb_nr) |
29 | goto out; | |
74bf4312 | 30 | |
f36391d2 DM |
31 | flush_tsb_user(tb); |
32 | ||
33 | if (CTX_VALID(mm->context)) { | |
34 | if (tb->tlb_nr == 1) { | |
35 | global_flush_tlb_page(mm, tb->vaddrs[0]); | |
36 | } else { | |
1da177e4 | 37 | #ifdef CONFIG_SMP |
90f08e39 PZ |
38 | smp_flush_tlb_pending(tb->mm, tb->tlb_nr, |
39 | &tb->vaddrs[0]); | |
1da177e4 | 40 | #else |
90f08e39 PZ |
41 | __flush_tlb_pending(CTX_HWBITS(tb->mm->context), |
42 | tb->tlb_nr, &tb->vaddrs[0]); | |
1da177e4 LT |
43 | #endif |
44 | } | |
1da177e4 | 45 | } |
c9f2946f | 46 | |
f36391d2 DM |
47 | tb->tlb_nr = 0; |
48 | ||
49 | out: | |
90f08e39 | 50 | put_cpu_var(tlb_batch); |
1da177e4 LT |
51 | } |
52 | ||
f36391d2 DM |
53 | void arch_enter_lazy_mmu_mode(void) |
54 | { | |
494fc421 | 55 | struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); |
f36391d2 DM |
56 | |
57 | tb->active = 1; | |
58 | } | |
59 | ||
60 | void arch_leave_lazy_mmu_mode(void) | |
61 | { | |
494fc421 | 62 | struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); |
f36391d2 DM |
63 | |
64 | if (tb->tlb_nr) | |
65 | flush_tlb_pending(); | |
66 | tb->active = 0; | |
67 | } | |
68 | ||
9e695d2e | 69 | static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, |
24e49ee3 | 70 | bool exec, bool huge) |
1da177e4 | 71 | { |
90f08e39 | 72 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); |
1da177e4 LT |
73 | unsigned long nr; |
74 | ||
75 | vaddr &= PAGE_MASK; | |
9e695d2e | 76 | if (exec) |
1da177e4 LT |
77 | vaddr |= 0x1UL; |
78 | ||
9e695d2e DM |
79 | nr = tb->tlb_nr; |
80 | ||
81 | if (unlikely(nr != 0 && mm != tb->mm)) { | |
82 | flush_tlb_pending(); | |
83 | nr = 0; | |
84 | } | |
85 | ||
f36391d2 | 86 | if (!tb->active) { |
24e49ee3 | 87 | flush_tsb_user_page(mm, vaddr, huge); |
23a01138 | 88 | global_flush_tlb_page(mm, vaddr); |
f0af9707 | 89 | goto out; |
f36391d2 DM |
90 | } |
91 | ||
24e49ee3 | 92 | if (nr == 0) { |
9e695d2e | 93 | tb->mm = mm; |
24e49ee3 NG |
94 | tb->huge = huge; |
95 | } | |
96 | ||
97 | if (tb->huge != huge) { | |
98 | flush_tlb_pending(); | |
99 | tb->huge = huge; | |
100 | nr = 0; | |
101 | } | |
9e695d2e DM |
102 | |
103 | tb->vaddrs[nr] = vaddr; | |
104 | tb->tlb_nr = ++nr; | |
105 | if (nr >= TLB_BATCH_NR) | |
106 | flush_tlb_pending(); | |
107 | ||
f0af9707 | 108 | out: |
9e695d2e DM |
109 | put_cpu_var(tlb_batch); |
110 | } | |
111 | ||
112 | void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, | |
113 | pte_t *ptep, pte_t orig, int fullmm) | |
114 | { | |
24e49ee3 NG |
115 | bool huge = is_hugetlb_pte(orig); |
116 | ||
7a591cfe DM |
117 | if (tlb_type != hypervisor && |
118 | pte_dirty(orig)) { | |
1da177e4 LT |
119 | unsigned long paddr, pfn = pte_pfn(orig); |
120 | struct address_space *mapping; | |
121 | struct page *page; | |
122 | ||
123 | if (!pfn_valid(pfn)) | |
124 | goto no_cache_flush; | |
125 | ||
126 | page = pfn_to_page(pfn); | |
127 | if (PageReserved(page)) | |
128 | goto no_cache_flush; | |
129 | ||
130 | /* A real file page? */ | |
131 | mapping = page_mapping(page); | |
132 | if (!mapping) | |
133 | goto no_cache_flush; | |
134 | ||
135 | paddr = (unsigned long) page_address(page); | |
136 | if ((paddr ^ vaddr) & (1 << 13)) | |
137 | flush_dcache_page_all(mm, page); | |
138 | } | |
139 | ||
140 | no_cache_flush: | |
9e695d2e | 141 | if (!fullmm) |
24e49ee3 | 142 | tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge); |
9e695d2e DM |
143 | } |
144 | ||
145 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
146 | static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, | |
5b1e94fa | 147 | pmd_t pmd) |
9e695d2e DM |
148 | { |
149 | unsigned long end; | |
150 | pte_t *pte; | |
151 | ||
152 | pte = pte_offset_map(&pmd, vaddr); | |
153 | end = vaddr + HPAGE_SIZE; | |
154 | while (vaddr < end) { | |
5b1e94fa DM |
155 | if (pte_val(*pte) & _PAGE_VALID) { |
156 | bool exec = pte_exec(*pte); | |
157 | ||
24e49ee3 | 158 | tlb_batch_add_one(mm, vaddr, exec, false); |
5b1e94fa | 159 | } |
9e695d2e DM |
160 | pte++; |
161 | vaddr += PAGE_SIZE; | |
162 | } | |
163 | pte_unmap(pte); | |
164 | } | |
1da177e4 | 165 | |
9e695d2e DM |
166 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
167 | pmd_t *pmdp, pmd_t pmd) | |
168 | { | |
169 | pmd_t orig = *pmdp; | |
170 | ||
171 | *pmdp = pmd; | |
172 | ||
173 | if (mm == &init_mm) | |
1da177e4 | 174 | return; |
9e695d2e | 175 | |
a7b9403f DM |
176 | if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { |
177 | if (pmd_val(pmd) & _PAGE_PMD_HUGE) | |
9e695d2e DM |
178 | mm->context.huge_pte_count++; |
179 | else | |
180 | mm->context.huge_pte_count--; | |
0fbebed6 DM |
181 | |
182 | /* Do not try to allocate the TSB hash table if we | |
183 | * don't have one already. We have various locks held | |
184 | * and thus we'll end up doing a GFP_KERNEL allocation | |
185 | * in an atomic context. | |
186 | * | |
187 | * Instead, we let the first TLB miss on a hugepage | |
188 | * take care of this. | |
189 | */ | |
90f08e39 | 190 | } |
1da177e4 | 191 | |
9e695d2e | 192 | if (!pmd_none(orig)) { |
9e695d2e | 193 | addr &= HPAGE_MASK; |
a7b9403f | 194 | if (pmd_trans_huge(orig)) { |
5b1e94fa DM |
195 | pte_t orig_pte = __pte(pmd_val(orig)); |
196 | bool exec = pte_exec(orig_pte); | |
197 | ||
24e49ee3 NG |
198 | tlb_batch_add_one(mm, addr, exec, true); |
199 | tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, | |
200 | true); | |
37b3a8ff | 201 | } else { |
5b1e94fa | 202 | tlb_batch_pmd_scan(mm, addr, orig); |
37b3a8ff | 203 | } |
1da177e4 | 204 | } |
9e695d2e | 205 | } |
1da177e4 | 206 | |
51e5ef1b DM |
207 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
208 | pmd_t *pmdp) | |
209 | { | |
210 | pmd_t entry = *pmdp; | |
211 | ||
212 | pmd_val(entry) &= ~_PAGE_VALID; | |
213 | ||
214 | set_pmd_at(vma->vm_mm, address, pmdp, entry); | |
215 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | |
216 | } | |
217 | ||
6b0b50b0 AK |
218 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
219 | pgtable_t pgtable) | |
9e695d2e DM |
220 | { |
221 | struct list_head *lh = (struct list_head *) pgtable; | |
1da177e4 | 222 | |
9e695d2e | 223 | assert_spin_locked(&mm->page_table_lock); |
90f08e39 | 224 | |
9e695d2e | 225 | /* FIFO */ |
c389a250 | 226 | if (!pmd_huge_pte(mm, pmdp)) |
9e695d2e DM |
227 | INIT_LIST_HEAD(lh); |
228 | else | |
c389a250 KS |
229 | list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); |
230 | pmd_huge_pte(mm, pmdp) = pgtable; | |
9e695d2e DM |
231 | } |
232 | ||
6b0b50b0 | 233 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
9e695d2e DM |
234 | { |
235 | struct list_head *lh; | |
236 | pgtable_t pgtable; | |
237 | ||
238 | assert_spin_locked(&mm->page_table_lock); | |
239 | ||
240 | /* FIFO */ | |
c389a250 | 241 | pgtable = pmd_huge_pte(mm, pmdp); |
9e695d2e DM |
242 | lh = (struct list_head *) pgtable; |
243 | if (list_empty(lh)) | |
c389a250 | 244 | pmd_huge_pte(mm, pmdp) = NULL; |
9e695d2e | 245 | else { |
c389a250 | 246 | pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; |
9e695d2e DM |
247 | list_del(lh); |
248 | } | |
249 | pte_val(pgtable[0]) = 0; | |
250 | pte_val(pgtable[1]) = 0; | |
251 | ||
252 | return pgtable; | |
1da177e4 | 253 | } |
9e695d2e | 254 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |