powerpc/mm/hash64: Map all the kernel regions in the same 0xc range
[linux-2.6-block.git] / arch / powerpc / mm / 44x_mmu.c
CommitLineData
14cf11af
PM
1/*
2 * Modifications by Matt Porter (mporter@mvista.com) to support
3 * PPC44x Book E processors.
4 *
5 * This file contains the routines for initializing the MMU
6 * on the 4xx series of chips.
7 * -- paulus
8 *
9 * Derived from arch/ppc/mm/init.c:
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 *
12 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
13 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
14 * Copyright (C) 1996 Paul Mackerras
14cf11af
PM
15 *
16 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 */
25
14cf11af 26#include <linux/init.h>
cd3db0c4
BH
27#include <linux/memblock.h>
28
14cf11af 29#include <asm/mmu.h>
57d7909e 30#include <asm/page.h>
1bc54c03 31#include <asm/cacheflush.h>
6c16816b 32#include <asm/code-patching.h>
14cf11af
PM
33
34#include "mmu_decl.h"
35
14cf11af
PM
36/* Used by the 44x TLB replacement exception handler.
37 * Just needed it declared someplace.
38 */
57d7909e
DG
39unsigned int tlb_44x_index; /* = 0 */
40unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
b98ac05d 41int icache_44x_need_flush;
14cf11af 42
e7f75ad0
DK
43unsigned long tlb_47x_boltmap[1024/8];
44
061d19f2 45static void ppc44x_update_tlb_hwater(void)
1bc54c03 46{
1bc54c03
BH
47 /* The TLB miss handlers hard codes the watermark in a cmpli
48 * instruction to improve performances rather than loading it
49 * from the global variable. Thus, we patch the instructions
50 * in the 2 TLB miss handlers when updating the value
51 */
6c16816b
CL
52 modify_instruction_site(&patch__tlb_44x_hwater_D, 0xffff, tlb_44x_hwater);
53 modify_instruction_site(&patch__tlb_44x_hwater_I, 0xffff, tlb_44x_hwater);
1bc54c03
BH
54}
55
14cf11af 56/*
e7f75ad0 57 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 44x type MMU
14cf11af 58 */
57d7909e 59static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
14cf11af 60{
1bc54c03
BH
61 unsigned int entry = tlb_44x_hwater--;
62
63 ppc44x_update_tlb_hwater();
64
e7f75ad0
DK
65 mtspr(SPRN_MMUCR, 0);
66
57d7909e
DG
67 __asm__ __volatile__(
68 "tlbwe %2,%3,%4\n"
69 "tlbwe %1,%3,%5\n"
70 "tlbwe %0,%3,%6\n"
14cf11af 71 :
57d7909e
DG
72 : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
73 "r" (phys),
74 "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
1bc54c03 75 "r" (entry),
14cf11af
PM
76 "i" (PPC44x_TLB_PAGEID),
77 "i" (PPC44x_TLB_XLAT),
78 "i" (PPC44x_TLB_ATTRIB));
79}
80
e7f75ad0
DK
81static int __init ppc47x_find_free_bolted(void)
82{
83 unsigned int mmube0 = mfspr(SPRN_MMUBE0);
84 unsigned int mmube1 = mfspr(SPRN_MMUBE1);
85
86 if (!(mmube0 & MMUBE0_VBE0))
87 return 0;
88 if (!(mmube0 & MMUBE0_VBE1))
89 return 1;
90 if (!(mmube0 & MMUBE0_VBE2))
91 return 2;
92 if (!(mmube1 & MMUBE1_VBE3))
93 return 3;
94 if (!(mmube1 & MMUBE1_VBE4))
95 return 4;
96 if (!(mmube1 & MMUBE1_VBE5))
97 return 5;
98 return -1;
99}
100
101static void __init ppc47x_update_boltmap(void)
102{
103 unsigned int mmube0 = mfspr(SPRN_MMUBE0);
104 unsigned int mmube1 = mfspr(SPRN_MMUBE1);
105
106 if (mmube0 & MMUBE0_VBE0)
107 __set_bit((mmube0 >> MMUBE0_IBE0_SHIFT) & 0xff,
108 tlb_47x_boltmap);
109 if (mmube0 & MMUBE0_VBE1)
110 __set_bit((mmube0 >> MMUBE0_IBE1_SHIFT) & 0xff,
111 tlb_47x_boltmap);
112 if (mmube0 & MMUBE0_VBE2)
113 __set_bit((mmube0 >> MMUBE0_IBE2_SHIFT) & 0xff,
114 tlb_47x_boltmap);
115 if (mmube1 & MMUBE1_VBE3)
116 __set_bit((mmube1 >> MMUBE1_IBE3_SHIFT) & 0xff,
117 tlb_47x_boltmap);
118 if (mmube1 & MMUBE1_VBE4)
119 __set_bit((mmube1 >> MMUBE1_IBE4_SHIFT) & 0xff,
120 tlb_47x_boltmap);
121 if (mmube1 & MMUBE1_VBE5)
122 __set_bit((mmube1 >> MMUBE1_IBE5_SHIFT) & 0xff,
123 tlb_47x_boltmap);
124}
125
126/*
127 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
128 */
061d19f2 129static void ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
e7f75ad0
DK
130{
131 unsigned int rA;
132 int bolted;
133
134 /* Base rA is HW way select, way 0, bolted bit set */
135 rA = 0x88000000;
136
137 /* Look for a bolted entry slot */
138 bolted = ppc47x_find_free_bolted();
139 BUG_ON(bolted < 0);
140
141 /* Insert bolted slot number */
142 rA |= bolted << 24;
143
144 pr_debug("256M TLB entry for 0x%08x->0x%08x in bolt slot %d\n",
145 virt, phys, bolted);
146
147 mtspr(SPRN_MMUCR, 0);
148
149 __asm__ __volatile__(
150 "tlbwe %2,%3,0\n"
151 "tlbwe %1,%3,1\n"
152 "tlbwe %0,%3,2\n"
153 :
154 : "r" (PPC47x_TLB2_SW | PPC47x_TLB2_SR |
155 PPC47x_TLB2_SX
156#ifdef CONFIG_SMP
157 | PPC47x_TLB2_M
158#endif
159 ),
160 "r" (phys),
161 "r" (virt | PPC47x_TLB0_VALID | PPC47x_TLB0_256M),
162 "r" (rA));
163}
164
14cf11af
PM
165void __init MMU_init_hw(void)
166{
e7f75ad0 167 /* This is not useful on 47x but won't hurt either */
1bc54c03
BH
168 ppc44x_update_tlb_hwater();
169
14cf11af
PM
170 flush_instruction_cache();
171}
172
14e609d6 173unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
14cf11af 174{
57d7909e 175 unsigned long addr;
9661534d 176 unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
14cf11af 177
57d7909e
DG
178 /* Pin in enough TLBs to cover any lowmem not covered by the
179 * initial 256M mapping established in head_44x.S */
9661534d 180 for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr;
e7f75ad0
DK
181 addr += PPC_PIN_SIZE) {
182 if (mmu_has_feature(MMU_FTR_TYPE_47x))
183 ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
184 else
185 ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
186 }
187 if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
188 ppc47x_update_boltmap();
14cf11af 189
e7f75ad0
DK
190#ifdef DEBUG
191 {
192 int i;
193
194 printk(KERN_DEBUG "bolted entries: ");
195 for (i = 0; i < 255; i++) {
196 if (test_bit(i, tlb_47x_boltmap))
197 printk("%d ", i);
198 }
199 printk("\n");
200 }
201#endif /* DEBUG */
202 }
14cf11af
PM
203 return total_lowmem;
204}
e7f75ad0 205
cd3db0c4
BH
206void setup_initial_memory_limit(phys_addr_t first_memblock_base,
207 phys_addr_t first_memblock_size)
208{
9661534d
DK
209 u64 size;
210
0f890c8d 211#ifndef CONFIG_NONSTATIC_KERNEL
cd3db0c4
BH
212 /* We don't currently support the first MEMBLOCK not mapping 0
213 * physical on those processors
214 */
215 BUG_ON(first_memblock_base != 0);
9661534d 216#endif
cd3db0c4
BH
217
218 /* 44x has a 256M TLB entry pinned at boot */
9661534d
DK
219 size = (min_t(u64, first_memblock_size, PPC_PIN_SIZE));
220 memblock_set_current_limit(first_memblock_base + size);
cd3db0c4
BH
221}
222
e7f75ad0 223#ifdef CONFIG_SMP
f7e2a152 224void __init mmu_init_secondary(int cpu)
e7f75ad0
DK
225{
226 unsigned long addr;
9661534d 227 unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
e7f75ad0
DK
228
229 /* Pin in enough TLBs to cover any lowmem not covered by the
230 * initial 256M mapping established in head_44x.S
231 *
232 * WARNING: This is called with only the first 256M of the
233 * linear mapping in the TLB and we can't take faults yet
234 * so beware of what this code uses. It runs off a temporary
235 * stack. current (r2) isn't initialized, smp_processor_id()
236 * will not work, current thread info isn't accessible, ...
237 */
9661534d 238 for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr;
e7f75ad0
DK
239 addr += PPC_PIN_SIZE) {
240 if (mmu_has_feature(MMU_FTR_TYPE_47x))
241 ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
242 else
243 ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
244 }
245}
246#endif /* CONFIG_SMP */