Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file contains the routines for flushing entries from the | |
3 | * TLB and MMU hash table. | |
4 | * | |
5 | * Derived from arch/ppc64/mm/init.c: | |
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
7 | * | |
8 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | |
9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
10 | * Copyright (C) 1996 Paul Mackerras | |
1da177e4 LT |
11 | * |
12 | * Derived from "arch/i386/mm/init.c" | |
13 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
14 | * | |
15 | * Dave Engebretsen <engebret@us.ibm.com> | |
16 | * Rework for PPC64 port. | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License | |
20 | * as published by the Free Software Foundation; either version | |
21 | * 2 of the License, or (at your option) any later version. | |
22 | */ | |
3c726f8d | 23 | |
1da177e4 LT |
24 | #include <linux/kernel.h> |
25 | #include <linux/mm.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/percpu.h> | |
28 | #include <linux/hardirq.h> | |
29 | #include <asm/pgalloc.h> | |
30 | #include <asm/tlbflush.h> | |
31 | #include <asm/tlb.h> | |
3c726f8d | 32 | #include <asm/bug.h> |
1da177e4 LT |
33 | |
34 | DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | |
35 | ||
36 | /* This is declared as we are using the more or less generic | |
b8b572e1 | 37 | * arch/powerpc/include/asm/tlb.h file -- tgall |
1da177e4 LT |
38 | */ |
39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |
1da177e4 LT |
40 | |
41 | /* | |
a741e679 BH |
42 | * A linux PTE was changed and the corresponding hash table entry |
43 | * neesd to be flushed. This function will either perform the flush | |
44 | * immediately or will batch it up if the current CPU has an active | |
45 | * batch on it. | |
46 | * | |
47 | * Must be called from within some kind of spinlock/non-preempt region... | |
1da177e4 | 48 | */ |
a741e679 BH |
49 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
50 | pte_t *ptep, unsigned long pte, int huge) | |
1da177e4 | 51 | { |
1da177e4 | 52 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
a741e679 | 53 | unsigned long vsid, vaddr; |
bf72aeba | 54 | unsigned int psize; |
1189be65 | 55 | int ssize; |
a741e679 | 56 | real_pte_t rpte; |
61b1a942 | 57 | int i; |
1da177e4 | 58 | |
1da177e4 LT |
59 | i = batch->index; |
60 | ||
3c726f8d BH |
61 | /* We mask the address for the base page size. Huge pages will |
62 | * have applied their own masking already | |
63 | */ | |
64 | addr &= PAGE_MASK; | |
65 | ||
16c2d476 BH |
66 | /* Get page size (maybe move back to caller). |
67 | * | |
68 | * NOTE: when using special 64K mappings in 4K environment like | |
69 | * for SPEs, we obtain the page size from the slice, which thus | |
70 | * must still exist (and thus the VMA not reused) at the time | |
71 | * of this call | |
72 | */ | |
3c726f8d BH |
73 | if (huge) { |
74 | #ifdef CONFIG_HUGETLB_PAGE | |
d258e64e | 75 | psize = get_slice_psize(mm, addr); |
3c726f8d BH |
76 | #else |
77 | BUG(); | |
16c2d476 | 78 | psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ |
3c726f8d | 79 | #endif |
bf72aeba | 80 | } else |
16c2d476 | 81 | psize = pte_pagesize_index(mm, addr, pte); |
3c726f8d | 82 | |
a741e679 BH |
83 | /* Build full vaddr */ |
84 | if (!is_kernel_addr(addr)) { | |
1189be65 PM |
85 | ssize = user_segment_size(addr); |
86 | vsid = get_vsid(mm->context.id, addr, ssize); | |
a741e679 | 87 | WARN_ON(vsid == 0); |
1189be65 PM |
88 | } else { |
89 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); | |
90 | ssize = mmu_kernel_ssize; | |
91 | } | |
92 | vaddr = hpt_va(addr, vsid, ssize); | |
a741e679 BH |
93 | rpte = __real_pte(__pte(pte), ptep); |
94 | ||
95 | /* | |
96 | * Check if we have an active batch on this CPU. If not, just | |
97 | * flush now and return. For now, we don global invalidates | |
98 | * in that case, might be worth testing the mm cpu mask though | |
99 | * and decide to use local invalidates instead... | |
100 | */ | |
101 | if (!batch->active) { | |
1189be65 | 102 | flush_hash_page(vaddr, rpte, psize, ssize, 0); |
a741e679 BH |
103 | return; |
104 | } | |
105 | ||
1da177e4 LT |
106 | /* |
107 | * This can happen when we are in the middle of a TLB batch and | |
108 | * we encounter memory pressure (eg copy_page_range when it tries | |
109 | * to allocate a new pte). If we have to reclaim memory and end | |
110 | * up scanning and resetting referenced bits then our batch context | |
111 | * will change mid stream. | |
3c726f8d BH |
112 | * |
113 | * We also need to ensure only one page size is present in a given | |
114 | * batch | |
1da177e4 | 115 | */ |
1189be65 PM |
116 | if (i != 0 && (mm != batch->mm || batch->psize != psize || |
117 | batch->ssize != ssize)) { | |
a741e679 | 118 | __flush_tlb_pending(batch); |
1da177e4 LT |
119 | i = 0; |
120 | } | |
1da177e4 | 121 | if (i == 0) { |
1da177e4 | 122 | batch->mm = mm; |
3c726f8d | 123 | batch->psize = psize; |
1189be65 | 124 | batch->ssize = ssize; |
1da177e4 | 125 | } |
a741e679 BH |
126 | batch->pte[i] = rpte; |
127 | batch->vaddr[i] = vaddr; | |
1da177e4 LT |
128 | batch->index = ++i; |
129 | if (i >= PPC64_TLB_BATCH_NR) | |
a741e679 | 130 | __flush_tlb_pending(batch); |
1da177e4 LT |
131 | } |
132 | ||
a741e679 BH |
133 | /* |
134 | * This function is called when terminating an mmu batch or when a batch | |
135 | * is full. It will perform the flush of all the entries currently stored | |
136 | * in a batch. | |
137 | * | |
138 | * Must be called from within some kind of spinlock/non-preempt region... | |
139 | */ | |
1da177e4 LT |
140 | void __flush_tlb_pending(struct ppc64_tlb_batch *batch) |
141 | { | |
56aa4129 | 142 | const struct cpumask *tmp; |
a741e679 | 143 | int i, local = 0; |
1da177e4 | 144 | |
1da177e4 | 145 | i = batch->index; |
56aa4129 RR |
146 | tmp = cpumask_of(smp_processor_id()); |
147 | if (cpumask_equal(mm_cpumask(batch->mm), tmp)) | |
1da177e4 | 148 | local = 1; |
1da177e4 | 149 | if (i == 1) |
3c726f8d | 150 | flush_hash_page(batch->vaddr[0], batch->pte[0], |
1189be65 | 151 | batch->psize, batch->ssize, local); |
1da177e4 | 152 | else |
61b1a942 | 153 | flush_hash_range(i, local); |
1da177e4 | 154 | batch->index = 0; |
1da177e4 LT |
155 | } |
156 | ||
3d5134ee BH |
157 | /** |
158 | * __flush_hash_table_range - Flush all HPTEs for a given address range | |
159 | * from the hash table (and the TLB). But keeps | |
160 | * the linux PTEs intact. | |
161 | * | |
162 | * @mm : mm_struct of the target address space (generally init_mm) | |
163 | * @start : starting address | |
164 | * @end : ending address (not included in the flush) | |
165 | * | |
166 | * This function is mostly to be used by some IO hotplug code in order | |
167 | * to remove all hash entries from a given address range used to map IO | |
168 | * space on a removed PCI-PCI bidge without tearing down the full mapping | |
169 | * since 64K pages may overlap with other bridges when using 64K pages | |
170 | * with 4K HW pages on IO space. | |
171 | * | |
172 | * Because of that usage pattern, it's only available with CONFIG_HOTPLUG | |
173 | * and is implemented for small size rather than speed. | |
174 | */ | |
175 | #ifdef CONFIG_HOTPLUG | |
176 | ||
177 | void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, | |
178 | unsigned long end) | |
179 | { | |
180 | unsigned long flags; | |
181 | ||
182 | start = _ALIGN_DOWN(start, PAGE_SIZE); | |
183 | end = _ALIGN_UP(end, PAGE_SIZE); | |
184 | ||
185 | BUG_ON(!mm->pgd); | |
186 | ||
187 | /* Note: Normally, we should only ever use a batch within a | |
188 | * PTE locked section. This violates the rule, but will work | |
189 | * since we don't actually modify the PTEs, we just flush the | |
190 | * hash while leaving the PTEs intact (including their reference | |
191 | * to being hashed). This is not the most performance oriented | |
192 | * way to do things but is fine for our needs here. | |
193 | */ | |
194 | local_irq_save(flags); | |
195 | arch_enter_lazy_mmu_mode(); | |
196 | for (; start < end; start += PAGE_SIZE) { | |
197 | pte_t *ptep = find_linux_pte(mm->pgd, start); | |
198 | unsigned long pte; | |
199 | ||
200 | if (ptep == NULL) | |
201 | continue; | |
202 | pte = pte_val(*ptep); | |
203 | if (!(pte & _PAGE_HASHPTE)) | |
204 | continue; | |
205 | hpte_need_flush(mm, start, ptep, pte, 0); | |
206 | } | |
207 | arch_leave_lazy_mmu_mode(); | |
208 | local_irq_restore(flags); | |
209 | } | |
210 | ||
211 | #endif /* CONFIG_HOTPLUG */ |