Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file contains the routines for flushing entries from the | |
3 | * TLB and MMU hash table. | |
4 | * | |
5 | * Derived from arch/ppc64/mm/init.c: | |
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
7 | * | |
8 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | |
9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
10 | * Copyright (C) 1996 Paul Mackerras | |
1da177e4 LT |
11 | * |
12 | * Derived from "arch/i386/mm/init.c" | |
13 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
14 | * | |
15 | * Dave Engebretsen <engebret@us.ibm.com> | |
16 | * Rework for PPC64 port. | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License | |
20 | * as published by the Free Software Foundation; either version | |
21 | * 2 of the License, or (at your option) any later version. | |
22 | */ | |
3c726f8d | 23 | |
1da177e4 LT |
24 | #include <linux/kernel.h> |
25 | #include <linux/mm.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/percpu.h> | |
28 | #include <linux/hardirq.h> | |
29 | #include <asm/pgalloc.h> | |
30 | #include <asm/tlbflush.h> | |
31 | #include <asm/tlb.h> | |
3c726f8d | 32 | #include <asm/bug.h> |
1da177e4 LT |
33 | |
34 | DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | |
35 | ||
1da177e4 | 36 | /* |
a741e679 BH |
37 | * A linux PTE was changed and the corresponding hash table entry |
38 | * neesd to be flushed. This function will either perform the flush | |
39 | * immediately or will batch it up if the current CPU has an active | |
40 | * batch on it. | |
41 | * | |
42 | * Must be called from within some kind of spinlock/non-preempt region... | |
1da177e4 | 43 | */ |
a741e679 BH |
44 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
45 | pte_t *ptep, unsigned long pte, int huge) | |
1da177e4 | 46 | { |
1da177e4 | 47 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
a741e679 | 48 | unsigned long vsid, vaddr; |
bf72aeba | 49 | unsigned int psize; |
1189be65 | 50 | int ssize; |
a741e679 | 51 | real_pte_t rpte; |
61b1a942 | 52 | int i; |
1da177e4 | 53 | |
1da177e4 LT |
54 | i = batch->index; |
55 | ||
3c726f8d BH |
56 | /* We mask the address for the base page size. Huge pages will |
57 | * have applied their own masking already | |
58 | */ | |
59 | addr &= PAGE_MASK; | |
60 | ||
16c2d476 BH |
61 | /* Get page size (maybe move back to caller). |
62 | * | |
63 | * NOTE: when using special 64K mappings in 4K environment like | |
64 | * for SPEs, we obtain the page size from the slice, which thus | |
65 | * must still exist (and thus the VMA not reused) at the time | |
66 | * of this call | |
67 | */ | |
3c726f8d BH |
68 | if (huge) { |
69 | #ifdef CONFIG_HUGETLB_PAGE | |
d258e64e | 70 | psize = get_slice_psize(mm, addr); |
3c726f8d BH |
71 | #else |
72 | BUG(); | |
16c2d476 | 73 | psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ |
3c726f8d | 74 | #endif |
bf72aeba | 75 | } else |
16c2d476 | 76 | psize = pte_pagesize_index(mm, addr, pte); |
3c726f8d | 77 | |
a741e679 BH |
78 | /* Build full vaddr */ |
79 | if (!is_kernel_addr(addr)) { | |
1189be65 PM |
80 | ssize = user_segment_size(addr); |
81 | vsid = get_vsid(mm->context.id, addr, ssize); | |
a741e679 | 82 | WARN_ON(vsid == 0); |
1189be65 PM |
83 | } else { |
84 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); | |
85 | ssize = mmu_kernel_ssize; | |
86 | } | |
87 | vaddr = hpt_va(addr, vsid, ssize); | |
a741e679 BH |
88 | rpte = __real_pte(__pte(pte), ptep); |
89 | ||
90 | /* | |
91 | * Check if we have an active batch on this CPU. If not, just | |
92 | * flush now and return. For now, we don global invalidates | |
93 | * in that case, might be worth testing the mm cpu mask though | |
94 | * and decide to use local invalidates instead... | |
95 | */ | |
96 | if (!batch->active) { | |
1189be65 | 97 | flush_hash_page(vaddr, rpte, psize, ssize, 0); |
a741e679 BH |
98 | return; |
99 | } | |
100 | ||
1da177e4 LT |
101 | /* |
102 | * This can happen when we are in the middle of a TLB batch and | |
103 | * we encounter memory pressure (eg copy_page_range when it tries | |
104 | * to allocate a new pte). If we have to reclaim memory and end | |
105 | * up scanning and resetting referenced bits then our batch context | |
106 | * will change mid stream. | |
3c726f8d BH |
107 | * |
108 | * We also need to ensure only one page size is present in a given | |
109 | * batch | |
1da177e4 | 110 | */ |
1189be65 PM |
111 | if (i != 0 && (mm != batch->mm || batch->psize != psize || |
112 | batch->ssize != ssize)) { | |
a741e679 | 113 | __flush_tlb_pending(batch); |
1da177e4 LT |
114 | i = 0; |
115 | } | |
1da177e4 | 116 | if (i == 0) { |
1da177e4 | 117 | batch->mm = mm; |
3c726f8d | 118 | batch->psize = psize; |
1189be65 | 119 | batch->ssize = ssize; |
1da177e4 | 120 | } |
a741e679 BH |
121 | batch->pte[i] = rpte; |
122 | batch->vaddr[i] = vaddr; | |
1da177e4 LT |
123 | batch->index = ++i; |
124 | if (i >= PPC64_TLB_BATCH_NR) | |
a741e679 | 125 | __flush_tlb_pending(batch); |
1da177e4 LT |
126 | } |
127 | ||
a741e679 BH |
128 | /* |
129 | * This function is called when terminating an mmu batch or when a batch | |
130 | * is full. It will perform the flush of all the entries currently stored | |
131 | * in a batch. | |
132 | * | |
133 | * Must be called from within some kind of spinlock/non-preempt region... | |
134 | */ | |
1da177e4 LT |
135 | void __flush_tlb_pending(struct ppc64_tlb_batch *batch) |
136 | { | |
56aa4129 | 137 | const struct cpumask *tmp; |
a741e679 | 138 | int i, local = 0; |
1da177e4 | 139 | |
1da177e4 | 140 | i = batch->index; |
56aa4129 RR |
141 | tmp = cpumask_of(smp_processor_id()); |
142 | if (cpumask_equal(mm_cpumask(batch->mm), tmp)) | |
1da177e4 | 143 | local = 1; |
1da177e4 | 144 | if (i == 1) |
3c726f8d | 145 | flush_hash_page(batch->vaddr[0], batch->pte[0], |
1189be65 | 146 | batch->psize, batch->ssize, local); |
1da177e4 | 147 | else |
61b1a942 | 148 | flush_hash_range(i, local); |
1da177e4 | 149 | batch->index = 0; |
1da177e4 LT |
150 | } |
151 | ||
c7cc58a1 BH |
152 | void tlb_flush(struct mmu_gather *tlb) |
153 | { | |
154 | struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch); | |
155 | ||
156 | /* If there's a TLB batch pending, then we must flush it because the | |
157 | * pages are going to be freed and we really don't want to have a CPU | |
158 | * access a freed page because it has a stale TLB | |
159 | */ | |
160 | if (tlbbatch->index) | |
161 | __flush_tlb_pending(tlbbatch); | |
162 | ||
163 | /* Push out batch of freed page tables */ | |
164 | pte_free_finish(); | |
165 | } | |
166 | ||
3d5134ee BH |
167 | /** |
168 | * __flush_hash_table_range - Flush all HPTEs for a given address range | |
169 | * from the hash table (and the TLB). But keeps | |
170 | * the linux PTEs intact. | |
171 | * | |
172 | * @mm : mm_struct of the target address space (generally init_mm) | |
173 | * @start : starting address | |
174 | * @end : ending address (not included in the flush) | |
175 | * | |
176 | * This function is mostly to be used by some IO hotplug code in order | |
177 | * to remove all hash entries from a given address range used to map IO | |
178 | * space on a removed PCI-PCI bidge without tearing down the full mapping | |
179 | * since 64K pages may overlap with other bridges when using 64K pages | |
180 | * with 4K HW pages on IO space. | |
181 | * | |
182 | * Because of that usage pattern, it's only available with CONFIG_HOTPLUG | |
183 | * and is implemented for small size rather than speed. | |
184 | */ | |
185 | #ifdef CONFIG_HOTPLUG | |
186 | ||
187 | void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, | |
188 | unsigned long end) | |
189 | { | |
190 | unsigned long flags; | |
191 | ||
192 | start = _ALIGN_DOWN(start, PAGE_SIZE); | |
193 | end = _ALIGN_UP(end, PAGE_SIZE); | |
194 | ||
195 | BUG_ON(!mm->pgd); | |
196 | ||
197 | /* Note: Normally, we should only ever use a batch within a | |
198 | * PTE locked section. This violates the rule, but will work | |
199 | * since we don't actually modify the PTEs, we just flush the | |
200 | * hash while leaving the PTEs intact (including their reference | |
201 | * to being hashed). This is not the most performance oriented | |
202 | * way to do things but is fine for our needs here. | |
203 | */ | |
204 | local_irq_save(flags); | |
205 | arch_enter_lazy_mmu_mode(); | |
206 | for (; start < end; start += PAGE_SIZE) { | |
207 | pte_t *ptep = find_linux_pte(mm->pgd, start); | |
208 | unsigned long pte; | |
209 | ||
210 | if (ptep == NULL) | |
211 | continue; | |
212 | pte = pte_val(*ptep); | |
213 | if (!(pte & _PAGE_HASHPTE)) | |
214 | continue; | |
215 | hpte_need_flush(mm, start, ptep, pte, 0); | |
216 | } | |
217 | arch_leave_lazy_mmu_mode(); | |
218 | local_irq_restore(flags); | |
219 | } | |
220 | ||
221 | #endif /* CONFIG_HOTPLUG */ |