powerpc/mm: Add support for handling > 512TB address in SLB miss
[linux-2.6-block.git] / arch / powerpc / mm / tlb_hash64.c
CommitLineData
1da177e4
LT
1/*
2 * This file contains the routines for flushing entries from the
3 * TLB and MMU hash table.
4 *
5 * Derived from arch/ppc64/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
1da177e4
LT
11 *
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 *
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
3c726f8d 23
1da177e4
LT
24#include <linux/kernel.h>
25#include <linux/mm.h>
1da177e4
LT
26#include <linux/percpu.h>
27#include <linux/hardirq.h>
28#include <asm/pgalloc.h>
29#include <asm/tlbflush.h>
30#include <asm/tlb.h>
3c726f8d 31#include <asm/bug.h>
94171b19
AK
32#include <asm/pte-walk.h>
33
1da177e4 34
9e813308
AK
35#include <trace/events/thp.h>
36
1da177e4
LT
37DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
38
1da177e4 39/*
a741e679
BH
40 * A linux PTE was changed and the corresponding hash table entry
41 * neesd to be flushed. This function will either perform the flush
42 * immediately or will batch it up if the current CPU has an active
43 * batch on it.
1da177e4 44 */
a741e679
BH
45void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
46 pte_t *ptep, unsigned long pte, int huge)
1da177e4 47{
5524a27d 48 unsigned long vpn;
f342552b 49 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
5524a27d 50 unsigned long vsid;
bf72aeba 51 unsigned int psize;
1189be65 52 int ssize;
a741e679 53 real_pte_t rpte;
ff31e105 54 int i, offset;
1da177e4 55
1da177e4
LT
56 i = batch->index;
57
16c2d476
BH
58 /* Get page size (maybe move back to caller).
59 *
60 * NOTE: when using special 64K mappings in 4K environment like
61 * for SPEs, we obtain the page size from the slice, which thus
62 * must still exist (and thus the VMA not reused) at the time
63 * of this call
64 */
3c726f8d
BH
65 if (huge) {
66#ifdef CONFIG_HUGETLB_PAGE
d258e64e 67 psize = get_slice_psize(mm, addr);
77058e1a
DG
68 /* Mask the address for the correct page size */
69 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
ff31e105
AK
70 if (unlikely(psize == MMU_PAGE_16G))
71 offset = PTRS_PER_PUD;
72 else
73 offset = PTRS_PER_PMD;
3c726f8d
BH
74#else
75 BUG();
16c2d476 76 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
3c726f8d 77#endif
77058e1a 78 } else {
16c2d476 79 psize = pte_pagesize_index(mm, addr, pte);
77058e1a
DG
80 /* Mask the address for the standard page size. If we
81 * have a 64k page kernel, but the hardware does not
82 * support 64k pages, this might be different from the
83 * hardware page size encoded in the slice table. */
84 addr &= PAGE_MASK;
ff31e105 85 offset = PTRS_PER_PTE;
77058e1a 86 }
3c726f8d 87
f71dc176 88
a741e679
BH
89 /* Build full vaddr */
90 if (!is_kernel_addr(addr)) {
1189be65 91 ssize = user_segment_size(addr);
f384796c 92 vsid = get_user_vsid(&mm->context, addr, ssize);
1189be65
PM
93 } else {
94 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
95 ssize = mmu_kernel_ssize;
96 }
c60ac569 97 WARN_ON(vsid == 0);
5524a27d 98 vpn = hpt_vpn(addr, vsid, ssize);
ff31e105 99 rpte = __real_pte(__pte(pte), ptep, offset);
a741e679
BH
100
101 /*
102 * Check if we have an active batch on this CPU. If not, just
c5cee642 103 * flush now and return.
a741e679
BH
104 */
105 if (!batch->active) {
c5cee642 106 flush_hash_page(vpn, rpte, psize, ssize, mm_is_thread_local(mm));
f342552b 107 put_cpu_var(ppc64_tlb_batch);
a741e679
BH
108 return;
109 }
110
1da177e4
LT
111 /*
112 * This can happen when we are in the middle of a TLB batch and
113 * we encounter memory pressure (eg copy_page_range when it tries
114 * to allocate a new pte). If we have to reclaim memory and end
115 * up scanning and resetting referenced bits then our batch context
116 * will change mid stream.
3c726f8d
BH
117 *
118 * We also need to ensure only one page size is present in a given
119 * batch
1da177e4 120 */
1189be65
PM
121 if (i != 0 && (mm != batch->mm || batch->psize != psize ||
122 batch->ssize != ssize)) {
a741e679 123 __flush_tlb_pending(batch);
1da177e4
LT
124 i = 0;
125 }
1da177e4 126 if (i == 0) {
1da177e4 127 batch->mm = mm;
3c726f8d 128 batch->psize = psize;
1189be65 129 batch->ssize = ssize;
1da177e4 130 }
a741e679 131 batch->pte[i] = rpte;
5524a27d 132 batch->vpn[i] = vpn;
1da177e4
LT
133 batch->index = ++i;
134 if (i >= PPC64_TLB_BATCH_NR)
a741e679 135 __flush_tlb_pending(batch);
f342552b 136 put_cpu_var(ppc64_tlb_batch);
1da177e4
LT
137}
138
a741e679
BH
139/*
140 * This function is called when terminating an mmu batch or when a batch
141 * is full. It will perform the flush of all the entries currently stored
142 * in a batch.
143 *
144 * Must be called from within some kind of spinlock/non-preempt region...
145 */
1da177e4
LT
146void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
147{
b426e4bd 148 int i, local;
1da177e4 149
1da177e4 150 i = batch->index;
b426e4bd 151 local = mm_is_thread_local(batch->mm);
1da177e4 152 if (i == 1)
5524a27d 153 flush_hash_page(batch->vpn[0], batch->pte[0],
1189be65 154 batch->psize, batch->ssize, local);
1da177e4 155 else
61b1a942 156 flush_hash_range(i, local);
1da177e4 157 batch->index = 0;
1da177e4
LT
158}
159
676012a6 160void hash__tlb_flush(struct mmu_gather *tlb)
c7cc58a1 161{
d6bf29b4 162 struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
c7cc58a1
BH
163
164 /* If there's a TLB batch pending, then we must flush it because the
165 * pages are going to be freed and we really don't want to have a CPU
166 * access a freed page because it has a stale TLB
167 */
168 if (tlbbatch->index)
169 __flush_tlb_pending(tlbbatch);
170
d6bf29b4 171 put_cpu_var(ppc64_tlb_batch);
c7cc58a1
BH
172}
173
3d5134ee
BH
174/**
175 * __flush_hash_table_range - Flush all HPTEs for a given address range
176 * from the hash table (and the TLB). But keeps
177 * the linux PTEs intact.
178 *
179 * @mm : mm_struct of the target address space (generally init_mm)
180 * @start : starting address
181 * @end : ending address (not included in the flush)
182 *
183 * This function is mostly to be used by some IO hotplug code in order
184 * to remove all hash entries from a given address range used to map IO
185 * space on a removed PCI-PCI bidge without tearing down the full mapping
186 * since 64K pages may overlap with other bridges when using 64K pages
187 * with 4K HW pages on IO space.
188 *
40b31360
SR
189 * Because of that usage pattern, it is implemented for small size rather
190 * than speed.
3d5134ee 191 */
3d5134ee
BH
192void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
193 unsigned long end)
194{
891121e6 195 bool is_thp;
12bc9f6f 196 int hugepage_shift;
3d5134ee
BH
197 unsigned long flags;
198
199 start = _ALIGN_DOWN(start, PAGE_SIZE);
200 end = _ALIGN_UP(end, PAGE_SIZE);
201
202 BUG_ON(!mm->pgd);
203
204 /* Note: Normally, we should only ever use a batch within a
205 * PTE locked section. This violates the rule, but will work
206 * since we don't actually modify the PTEs, we just flush the
207 * hash while leaving the PTEs intact (including their reference
208 * to being hashed). This is not the most performance oriented
209 * way to do things but is fine for our needs here.
210 */
211 local_irq_save(flags);
212 arch_enter_lazy_mmu_mode();
213 for (; start < end; start += PAGE_SIZE) {
94171b19
AK
214 pte_t *ptep = find_current_mm_pte(mm->pgd, start, &is_thp,
215 &hugepage_shift);
3d5134ee
BH
216 unsigned long pte;
217
218 if (ptep == NULL)
219 continue;
220 pte = pte_val(*ptep);
891121e6 221 if (is_thp)
4f9c53c8 222 trace_hugepage_invalidate(start, pte);
945537df 223 if (!(pte & H_PAGE_HASHPTE))
3d5134ee 224 continue;
891121e6 225 if (unlikely(is_thp))
fc047955 226 hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
12bc9f6f 227 else
891121e6 228 hpte_need_flush(mm, start, ptep, pte, hugepage_shift);
3d5134ee
BH
229 }
230 arch_leave_lazy_mmu_mode();
231 local_irq_restore(flags);
232}
074c2eae
AK
233
234void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
235{
236 pte_t *pte;
237 pte_t *start_pte;
238 unsigned long flags;
239
240 addr = _ALIGN_DOWN(addr, PMD_SIZE);
241 /* Note: Normally, we should only ever use a batch within a
242 * PTE locked section. This violates the rule, but will work
243 * since we don't actually modify the PTEs, we just flush the
244 * hash while leaving the PTEs intact (including their reference
245 * to being hashed). This is not the most performance oriented
246 * way to do things but is fine for our needs here.
247 */
248 local_irq_save(flags);
249 arch_enter_lazy_mmu_mode();
250 start_pte = pte_offset_map(pmd, addr);
251 for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
252 unsigned long pteval = pte_val(*pte);
945537df 253 if (pteval & H_PAGE_HASHPTE)
074c2eae
AK
254 hpte_need_flush(mm, addr, pte, pteval, 0);
255 addr += PAGE_SIZE;
3d5134ee
BH
256 }
257 arch_leave_lazy_mmu_mode();
258 local_irq_restore(flags);
259}