[POWERPC] Add ability to 4K kernel to hash in 64K pages
[linux-2.6-block.git] / arch / powerpc / mm / tlb_64.c
CommitLineData
1da177e4
LT
1/*
2 * This file contains the routines for flushing entries from the
3 * TLB and MMU hash table.
4 *
5 * Derived from arch/ppc64/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 *
16 * Dave Engebretsen <engebret@us.ibm.com>
17 * Rework for PPC64 port.
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 */
3c726f8d 24
1da177e4
LT
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/init.h>
28#include <linux/percpu.h>
29#include <linux/hardirq.h>
30#include <asm/pgalloc.h>
31#include <asm/tlbflush.h>
32#include <asm/tlb.h>
3c726f8d 33#include <asm/bug.h>
1da177e4
LT
34
35DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
36
37/* This is declared as we are using the more or less generic
2ef9481e 38 * include/asm-powerpc/tlb.h file -- tgall
1da177e4
LT
39 */
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
42unsigned long pte_freelist_forced_free;
43
e28f7faf
DG
44struct pte_freelist_batch
45{
46 struct rcu_head rcu;
47 unsigned int index;
48 pgtable_free_t tables[0];
49};
50
51DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
52unsigned long pte_freelist_forced_free;
53
54#define PTE_FREELIST_SIZE \
55 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
56 / sizeof(pgtable_free_t))
57
58#ifdef CONFIG_SMP
59static void pte_free_smp_sync(void *arg)
60{
61 /* Do nothing, just ensure we sync with all CPUs */
62}
63#endif
64
65/* This is only called when we are critically out of memory
66 * (and fail to get a page in pte_free_tlb).
67 */
68static void pgtable_free_now(pgtable_free_t pgf)
69{
70 pte_freelist_forced_free++;
71
72 smp_call_function(pte_free_smp_sync, NULL, 0, 1);
73
74 pgtable_free(pgf);
75}
76
77static void pte_free_rcu_callback(struct rcu_head *head)
78{
79 struct pte_freelist_batch *batch =
80 container_of(head, struct pte_freelist_batch, rcu);
81 unsigned int i;
82
83 for (i = 0; i < batch->index; i++)
84 pgtable_free(batch->tables[i]);
85
86 free_page((unsigned long)batch);
87}
88
89static void pte_free_submit(struct pte_freelist_batch *batch)
90{
91 INIT_RCU_HEAD(&batch->rcu);
92 call_rcu(&batch->rcu, pte_free_rcu_callback);
93}
94
95void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
1da177e4 96{
01edcd89 97 /* This is safe since tlb_gather_mmu has disabled preemption */
1da177e4
LT
98 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
99 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
100
101 if (atomic_read(&tlb->mm->mm_users) < 2 ||
102 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
e28f7faf 103 pgtable_free(pgf);
1da177e4
LT
104 return;
105 }
106
107 if (*batchp == NULL) {
108 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
109 if (*batchp == NULL) {
e28f7faf 110 pgtable_free_now(pgf);
1da177e4
LT
111 return;
112 }
113 (*batchp)->index = 0;
114 }
e28f7faf 115 (*batchp)->tables[(*batchp)->index++] = pgf;
1da177e4
LT
116 if ((*batchp)->index == PTE_FREELIST_SIZE) {
117 pte_free_submit(*batchp);
118 *batchp = NULL;
119 }
120}
121
122/*
a741e679
BH
123 * A linux PTE was changed and the corresponding hash table entry
124 * neesd to be flushed. This function will either perform the flush
125 * immediately or will batch it up if the current CPU has an active
126 * batch on it.
127 *
128 * Must be called from within some kind of spinlock/non-preempt region...
1da177e4 129 */
a741e679
BH
130void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
131 pte_t *ptep, unsigned long pte, int huge)
1da177e4 132{
1da177e4 133 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
a741e679 134 unsigned long vsid, vaddr;
bf72aeba 135 unsigned int psize;
a741e679 136 real_pte_t rpte;
61b1a942 137 int i;
1da177e4 138
1da177e4
LT
139 i = batch->index;
140
3c726f8d
BH
141 /* We mask the address for the base page size. Huge pages will
142 * have applied their own masking already
143 */
144 addr &= PAGE_MASK;
145
16c2d476
BH
146 /* Get page size (maybe move back to caller).
147 *
148 * NOTE: when using special 64K mappings in 4K environment like
149 * for SPEs, we obtain the page size from the slice, which thus
150 * must still exist (and thus the VMA not reused) at the time
151 * of this call
152 */
3c726f8d
BH
153 if (huge) {
154#ifdef CONFIG_HUGETLB_PAGE
155 psize = mmu_huge_psize;
156#else
157 BUG();
16c2d476 158 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
3c726f8d 159#endif
bf72aeba 160 } else
16c2d476 161 psize = pte_pagesize_index(mm, addr, pte);
3c726f8d 162
a741e679
BH
163 /* Build full vaddr */
164 if (!is_kernel_addr(addr)) {
165 vsid = get_vsid(mm->context.id, addr);
166 WARN_ON(vsid == 0);
167 } else
168 vsid = get_kernel_vsid(addr);
169 vaddr = (vsid << 28 ) | (addr & 0x0fffffff);
170 rpte = __real_pte(__pte(pte), ptep);
171
172 /*
173 * Check if we have an active batch on this CPU. If not, just
174 * flush now and return. For now, we don global invalidates
175 * in that case, might be worth testing the mm cpu mask though
176 * and decide to use local invalidates instead...
177 */
178 if (!batch->active) {
179 flush_hash_page(vaddr, rpte, psize, 0);
180 return;
181 }
182
1da177e4
LT
183 /*
184 * This can happen when we are in the middle of a TLB batch and
185 * we encounter memory pressure (eg copy_page_range when it tries
186 * to allocate a new pte). If we have to reclaim memory and end
187 * up scanning and resetting referenced bits then our batch context
188 * will change mid stream.
3c726f8d
BH
189 *
190 * We also need to ensure only one page size is present in a given
191 * batch
1da177e4 192 */
3c726f8d 193 if (i != 0 && (mm != batch->mm || batch->psize != psize)) {
a741e679 194 __flush_tlb_pending(batch);
1da177e4
LT
195 i = 0;
196 }
1da177e4 197 if (i == 0) {
1da177e4 198 batch->mm = mm;
3c726f8d 199 batch->psize = psize;
1da177e4 200 }
a741e679
BH
201 batch->pte[i] = rpte;
202 batch->vaddr[i] = vaddr;
1da177e4
LT
203 batch->index = ++i;
204 if (i >= PPC64_TLB_BATCH_NR)
a741e679 205 __flush_tlb_pending(batch);
1da177e4
LT
206}
207
a741e679
BH
208/*
209 * This function is called when terminating an mmu batch or when a batch
210 * is full. It will perform the flush of all the entries currently stored
211 * in a batch.
212 *
213 * Must be called from within some kind of spinlock/non-preempt region...
214 */
1da177e4
LT
215void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
216{
1da177e4 217 cpumask_t tmp;
a741e679 218 int i, local = 0;
1da177e4 219
1da177e4 220 i = batch->index;
a741e679 221 tmp = cpumask_of_cpu(smp_processor_id());
1da177e4
LT
222 if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
223 local = 1;
1da177e4 224 if (i == 1)
3c726f8d
BH
225 flush_hash_page(batch->vaddr[0], batch->pte[0],
226 batch->psize, local);
1da177e4 227 else
61b1a942 228 flush_hash_range(i, local);
1da177e4 229 batch->index = 0;
1da177e4
LT
230}
231
1da177e4
LT
232void pte_free_finish(void)
233{
01edcd89 234 /* This is safe since tlb_gather_mmu has disabled preemption */
1da177e4
LT
235 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
236
237 if (*batchp == NULL)
238 return;
239 pte_free_submit(*batchp);
240 *batchp = NULL;
241}