powerpc/mm/hash64: Map all the kernel regions in the same 0xc range
[linux-2.6-block.git] / arch / powerpc / mm / 8xx_mmu.c
CommitLineData
a372acfa
CL
1/*
2 * This file contains the routines for initializing the MMU
3 * on the 8xx series of chips.
4 * -- christophe
5 *
6 * Derived from arch/powerpc/mm/40x_mmu.c:
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 */
14
15#include <linux/memblock.h>
b6ae3550 16#include <linux/mmu_context.h>
4badd43a
CL
17#include <asm/fixmap.h>
18#include <asm/code-patching.h>
a372acfa
CL
19
20#include "mmu_decl.h"
21
4badd43a
CL
22#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
23
a372acfa 24extern int __map_without_ltlbs;
4badd43a 25
eef784bb
CL
26static unsigned long block_mapped_ram;
27
4badd43a 28/*
eef784bb
CL
29 * Return PA for this VA if it is in an area mapped with LTLBs.
30 * Otherwise, returns 0
4badd43a
CL
31 */
32phys_addr_t v_block_mapped(unsigned long va)
33{
34 unsigned long p = PHYS_IMMR_BASE;
35
36 if (__map_without_ltlbs)
37 return 0;
38 if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
39 return p + va - VIRT_IMMR_BASE;
eef784bb
CL
40 if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
41 return __pa(va);
4badd43a
CL
42 return 0;
43}
44
45/*
eef784bb 46 * Return VA for a given PA mapped with LTLBs or 0 if not mapped
4badd43a
CL
47 */
48unsigned long p_block_mapped(phys_addr_t pa)
49{
50 unsigned long p = PHYS_IMMR_BASE;
51
52 if (__map_without_ltlbs)
53 return 0;
54 if (pa >= p && pa < p + IMMR_SIZE)
55 return VIRT_IMMR_BASE + pa - p;
eef784bb
CL
56 if (pa < block_mapped_ram)
57 return (unsigned long)__va(pa);
4badd43a
CL
58 return 0;
59}
60
4ad27450
CL
61#define LARGE_PAGE_SIZE_8M (1<<23)
62
a372acfa
CL
63/*
64 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
65 */
66void __init MMU_init_hw(void)
67{
4ad27450 68 /* PIN up to the 3 first 8Mb after IMMR in DTLB table */
665bed23
CL
69 if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) {
70 unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
71 unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY;
72 int i = IS_ENABLED(CONFIG_PIN_TLB_IMMR) ? 29 : 28;
73 unsigned long addr = 0;
74 unsigned long mem = total_lowmem;
75
76 for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) {
77 mtspr(SPRN_MD_CTR, ctr | (i << 8));
78 mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
79 mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID);
80 mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT);
81 addr += LARGE_PAGE_SIZE_8M;
82 mem -= LARGE_PAGE_SIZE_8M;
83 }
4ad27450 84 }
a372acfa
CL
85}
86
346bcc4d 87static void __init mmu_mapin_immr(void)
4badd43a
CL
88{
89 unsigned long p = PHYS_IMMR_BASE;
90 unsigned long v = VIRT_IMMR_BASE;
4badd43a
CL
91 int offset;
92
93 for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
c766ee72 94 map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
4badd43a
CL
95}
96
d5f17ee9 97static void mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
a372acfa 98{
002cdfc2 99 modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16);
bb7f3808
CL
100}
101
d5f17ee9
CL
102static void mmu_patch_addis(s32 *site, long simm)
103{
104 unsigned int instr = *(unsigned int *)patch_site_addr(site);
105
106 instr &= 0xffff0000;
107 instr |= ((unsigned long)simm) >> 16;
108 patch_instruction_site(site, instr);
109}
110
14e609d6 111unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
bb7f3808
CL
112{
113 unsigned long mapped;
a372acfa 114
4badd43a 115 if (__map_without_ltlbs) {
bb7f3808 116 mapped = 0;
4badd43a 117 mmu_mapin_immr();
665bed23
CL
118 if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR))
119 patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP);
120 if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
121 mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0);
bb7f3808
CL
122 } else {
123 mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
e4470bd6
CL
124 if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
125 mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top,
126 _ALIGN(__pa(_einittext), 8 << 20));
a372acfa 127 }
a372acfa 128
1a210878
CL
129 mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
130 mmu_patch_cmp_limit(&patch__fixupdar_linmem_top, mapped);
a372acfa
CL
131
132 /* If the size of RAM is not an exact power of two, we may not
133 * have covered RAM in its entirety with 8 MiB
134 * pages. Consequently, restrict the top end of RAM currently
135 * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
136 * coverage with normal-sized pages (or other reasons) do not
137 * attempt to allocate outside the allowed range.
138 */
bb7f3808
CL
139 if (mapped)
140 memblock_set_current_limit(mapped);
a372acfa 141
eef784bb
CL
142 block_mapped_ram = mapped;
143
a372acfa
CL
144 return mapped;
145}
516d9189 146
d5f17ee9
CL
147void mmu_mark_initmem_nx(void)
148{
149 if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23)
150 mmu_patch_addis(&patch__itlbmiss_linmem_top8,
151 -((long)_etext & ~(LARGE_PAGE_SIZE_8M - 1)));
152 if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
153 mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, __pa(_etext));
154}
155
156#ifdef CONFIG_STRICT_KERNEL_RWX
157void mmu_mark_rodata_ro(void)
158{
159 if (CONFIG_DATA_SHIFT < 23)
160 mmu_patch_addis(&patch__dtlbmiss_romem_top8,
161 -__pa(((unsigned long)_sinittext) &
162 ~(LARGE_PAGE_SIZE_8M - 1)));
163 mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext));
164}
165#endif
166
346bcc4d
CL
167void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
168 phys_addr_t first_memblock_size)
516d9189
CL
169{
170 /* We don't currently support the first MEMBLOCK not mapping 0
171 * physical on those processors
172 */
173 BUG_ON(first_memblock_base != 0);
174
e4470bd6
CL
175 /* 8xx can only access 32MB at the moment */
176 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x02000000));
516d9189 177}
a7761fe4
CL
178
179/*
180 * Set up to use a given MMU context.
181 * id is context number, pgd is PGD pointer.
182 *
183 * We place the physical address of the new task page directory loaded
184 * into the MMU base register, and set the ASID compare register with
185 * the new "context."
186 */
187void set_context(unsigned long id, pgd_t *pgd)
188{
189 s16 offset = (s16)(__pa(swapper_pg_dir));
190
a7761fe4
CL
191 /* Context switch the PTE pointer for the Abatron BDI2000.
192 * The PGDIR is passed as second argument.
193 */
40058337
CL
194 if (IS_ENABLED(CONFIG_BDI_SWITCH))
195 abatron_pteptrs[1] = pgd;
a7761fe4 196
6a8f911b 197 /* Register M_TWB will contain base address of level 1 table minus the
a7761fe4
CL
198 * lower part of the kernel PGDIR base address, so that all accesses to
199 * level 1 table are done relative to lower part of kernel PGDIR base
200 * address.
201 */
6a8f911b 202 mtspr(SPRN_M_TWB, __pa(pgd) - offset);
a7761fe4
CL
203
204 /* Update context */
aa0ab02b 205 mtspr(SPRN_M_CASID, id - 1);
a7761fe4
CL
206 /* sync */
207 mb();
208}
766d45cb
CL
209
210void flush_instruction_cache(void)
211{
212 isync();
213 mtspr(SPRN_IC_CST, IDC_INVALL);
214 isync();
215}
06fbe81b
CL
216
217#ifdef CONFIG_PPC_KUEP
218void __init setup_kuep(bool disabled)
219{
220 if (disabled)
221 return;
222
223 pr_info("Activating Kernel Userspace Execution Prevention\n");
224
225 mtspr(SPRN_MI_AP, MI_APG_KUEP);
226}
227#endif
2679f9bd
CL
228
229#ifdef CONFIG_PPC_KUAP
230void __init setup_kuap(bool disabled)
231{
232 pr_info("Activating Kernel Userspace Access Protection\n");
233
234 if (disabled)
235 pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
236
237 mtspr(SPRN_MD_AP, MD_APG_KUAP);
238}
239#endif