Commit | Line | Data |
---|---|---|
a95d0e16 MS |
1 | /* |
2 | * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu> | |
3 | * Copyright (C) 2006 Atmark Techno, Inc. | |
4 | * | |
5 | * This file is subject to the terms and conditions of the GNU General Public | |
6 | * License. See the file "COPYING" in the main directory of this archive | |
7 | * for more details. | |
8 | */ | |
9 | ||
0b1abd1f | 10 | #include <linux/dma-map-ops.h> |
57c8a661 | 11 | #include <linux/memblock.h> |
a95d0e16 MS |
12 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | |
a95d0e16 MS |
14 | #include <linux/mm.h> /* mem_init */ |
15 | #include <linux/initrd.h> | |
68cbfae8 | 16 | #include <linux/of_fdt.h> |
a95d0e16 MS |
17 | #include <linux/pagemap.h> |
18 | #include <linux/pfn.h> | |
5a0e3ad6 | 19 | #include <linux/slab.h> |
a95d0e16 | 20 | #include <linux/swap.h> |
66421a64 | 21 | #include <linux/export.h> |
a95d0e16 MS |
22 | |
23 | #include <asm/page.h> | |
24 | #include <asm/mmu_context.h> | |
25 | #include <asm/pgalloc.h> | |
26 | #include <asm/sections.h> | |
27 | #include <asm/tlb.h> | |
41938761 | 28 | #include <asm/fixmap.h> |
a95d0e16 | 29 | |
79bf3a13 MS |
30 | /* Use for MMU and noMMU because of PCI generic code */ |
31 | int mem_init_done; | |
32 | ||
a95d0e16 MS |
33 | char *klimit = _end; |
34 | ||
35 | /* | |
36 | * Initialize the bootmem system and give it all the memory we | |
37 | * have available. | |
38 | */ | |
4dc60832 | 39 | unsigned long memory_start; |
fd6ed51f | 40 | EXPORT_SYMBOL(memory_start); |
4dc60832 | 41 | unsigned long memory_size; |
ee4bcdf1 | 42 | EXPORT_SYMBOL(memory_size); |
83a92529 | 43 | unsigned long lowmem_size; |
a95d0e16 | 44 | |
08134e79 RD |
45 | EXPORT_SYMBOL(min_low_pfn); |
46 | EXPORT_SYMBOL(max_low_pfn); | |
47 | ||
a0e16997 | 48 | #ifdef CONFIG_HIGHMEM |
2f2f371f MS |
49 | static void __init highmem_init(void) |
50 | { | |
51 | pr_debug("%x\n", (u32)PKMAP_BASE); | |
52 | map_page(PKMAP_BASE, 0, 0); /* XXX gross */ | |
53 | pkmap_page_table = virt_to_kpte(PKMAP_BASE); | |
2f2f371f | 54 | } |
2f2f371f MS |
55 | #endif /* CONFIG_HIGHMEM */ |
56 | ||
a95d0e16 MS |
57 | /* |
58 | * paging_init() sets up the page tables - in fact we've already done this. | |
59 | */ | |
60 | static void __init paging_init(void) | |
61 | { | |
a95d0e16 | 62 | unsigned long zones_size[MAX_NR_ZONES]; |
41938761 MS |
63 | int idx; |
64 | ||
65 | /* Setup fixmaps */ | |
66 | for (idx = 0; idx < __end_of_fixed_addresses; idx++) | |
67 | clear_fixmap(idx); | |
a95d0e16 | 68 | |
5af90438 SM |
69 | /* Clean every zones */ |
70 | memset(zones_size, 0, sizeof(zones_size)); | |
71 | ||
2f2f371f MS |
72 | #ifdef CONFIG_HIGHMEM |
73 | highmem_init(); | |
74 | ||
75 | zones_size[ZONE_DMA] = max_low_pfn; | |
76 | zones_size[ZONE_HIGHMEM] = max_pfn; | |
77 | #else | |
83a92529 | 78 | zones_size[ZONE_DMA] = max_pfn; |
2f2f371f | 79 | #endif |
a95d0e16 | 80 | |
baab8a82 | 81 | /* We don't have holes in memory map */ |
9691a071 | 82 | free_area_init(zones_size); |
a95d0e16 MS |
83 | } |
84 | ||
85 | void __init setup_memory(void) | |
86 | { | |
a95d0e16 MS |
87 | /* |
88 | * Kernel: | |
89 | * start: base phys address of kernel - page align | |
90 | * end: base phys address of kernel - page align | |
91 | * | |
92 | * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) | |
93 | * max_low_pfn | |
a95d0e16 MS |
94 | */ |
95 | ||
96 | /* memory start is from the kernel end (aligned) to higher addr */ | |
97 | min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ | |
83a92529 MS |
98 | max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; |
99 | max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; | |
a95d0e16 | 100 | |
6bd55f0b MS |
101 | pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); |
102 | pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); | |
103 | pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); | |
a95d0e16 | 104 | |
a95d0e16 MS |
105 | paging_init(); |
106 | } | |
107 | ||
a95d0e16 MS |
108 | void __init mem_init(void) |
109 | { | |
4dc60832 | 110 | mem_init_done = 1; |
a95d0e16 MS |
111 | } |
112 | ||
4dc60832 MS |
113 | int page_is_ram(unsigned long pfn) |
114 | { | |
115 | return pfn < max_low_pfn; | |
116 | } | |
117 | ||
118 | /* | |
119 | * Check for command-line options that affect what MMU_init will do. | |
120 | */ | |
1b3d3e9f | 121 | static void __init mm_cmdline_setup(void) |
4dc60832 MS |
122 | { |
123 | unsigned long maxmem = 0; | |
124 | char *p = cmd_line; | |
125 | ||
126 | /* Look for mem= option on command line */ | |
127 | p = strstr(cmd_line, "mem="); | |
128 | if (p) { | |
129 | p += 4; | |
130 | maxmem = memparse(p, &p); | |
131 | if (maxmem && memory_size > maxmem) { | |
132 | memory_size = maxmem; | |
da5ab11c | 133 | memblock.memory.regions[0].size = memory_size; |
4dc60832 MS |
134 | } |
135 | } | |
136 | } | |
137 | ||
138 | /* | |
139 | * MMU_init_hw does the chip-specific initialization of the MMU hardware. | |
140 | */ | |
141 | static void __init mmu_init_hw(void) | |
142 | { | |
143 | /* | |
144 | * The Zone Protection Register (ZPR) defines how protection will | |
145 | * be applied to every page which is a member of a given zone. At | |
146 | * present, we utilize only two of the zones. | |
147 | * The zone index bits (of ZSEL) in the PTE are used for software | |
148 | * indicators, except the LSB. For user access, zone 1 is used, | |
149 | * for kernel access, zone 0 is used. We set all but zone 1 | |
150 | * to zero, allowing only kernel access as indicated in the PTE. | |
151 | * For zone 1, we set a 01 binary (a value of 10 will not work) | |
152 | * to allow user access as indicated in the PTE. This also allows | |
153 | * kernel access as indicated in the PTE. | |
154 | */ | |
155 | __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \ | |
156 | "mts rzpr, r11;" | |
157 | : : : "r11"); | |
158 | } | |
159 | ||
160 | /* | |
161 | * MMU_init sets up the basic memory mappings for the kernel, | |
162 | * including both RAM and possibly some I/O regions, | |
163 | * and sets up the page tables and the MMU hardware ready to go. | |
164 | */ | |
165 | ||
166 | /* called from head.S */ | |
167 | asmlinkage void __init mmu_init(void) | |
168 | { | |
169 | unsigned int kstart, ksize; | |
170 | ||
95b0f9ea | 171 | if ((u32) memblock.memory.regions[0].size < 0x400000) { |
6bd55f0b | 172 | pr_emerg("Memory must be greater than 4MB\n"); |
4dc60832 MS |
173 | machine_restart(NULL); |
174 | } | |
95b0f9ea MS |
175 | |
176 | if ((u32) memblock.memory.regions[0].size < kernel_tlb) { | |
6bd55f0b | 177 | pr_emerg("Kernel size is greater than memory node\n"); |
95b0f9ea MS |
178 | machine_restart(NULL); |
179 | } | |
180 | ||
4dc60832 | 181 | /* Find main memory where the kernel is */ |
da5ab11c | 182 | memory_start = (u32) memblock.memory.regions[0].base; |
83a92529 MS |
183 | lowmem_size = memory_size = (u32) memblock.memory.regions[0].size; |
184 | ||
185 | if (lowmem_size > CONFIG_LOWMEM_SIZE) { | |
186 | lowmem_size = CONFIG_LOWMEM_SIZE; | |
2f2f371f | 187 | #ifndef CONFIG_HIGHMEM |
83a92529 | 188 | memory_size = lowmem_size; |
2f2f371f | 189 | #endif |
83a92529 | 190 | } |
4dc60832 MS |
191 | |
192 | mm_cmdline_setup(); /* FIXME parse args from command line - not used */ | |
193 | ||
194 | /* | |
195 | * Map out the kernel text/data/bss from the available physical | |
196 | * memory. | |
197 | */ | |
198 | kstart = __pa(CONFIG_KERNEL_START); /* kernel start */ | |
199 | /* kernel size */ | |
200 | ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START)); | |
95f72d1e | 201 | memblock_reserve(kstart, ksize); |
4dc60832 MS |
202 | |
203 | #if defined(CONFIG_BLK_DEV_INITRD) | |
204 | /* Remove the init RAM disk from the available memory. */ | |
5eec2f02 MS |
205 | if (initrd_start) { |
206 | unsigned long size; | |
207 | size = initrd_end - initrd_start; | |
a66a6265 | 208 | memblock_reserve(__virt_to_phys(initrd_start), size); |
5eec2f02 | 209 | } |
4dc60832 MS |
210 | #endif /* CONFIG_BLK_DEV_INITRD */ |
211 | ||
212 | /* Initialize the MMU hardware */ | |
213 | mmu_init_hw(); | |
214 | ||
215 | /* Map in all of RAM starting at CONFIG_KERNEL_START */ | |
216 | mapin_ram(); | |
217 | ||
41938761 | 218 | /* Extend vmalloc and ioremap area as big as possible */ |
2f2f371f MS |
219 | #ifdef CONFIG_HIGHMEM |
220 | ioremap_base = ioremap_bot = PKMAP_BASE; | |
221 | #else | |
41938761 | 222 | ioremap_base = ioremap_bot = FIXADDR_START; |
2f2f371f | 223 | #endif |
41938761 | 224 | |
4dc60832 MS |
225 | /* Initialize the context management stuff */ |
226 | mmu_context_init(); | |
83a92529 MS |
227 | |
228 | /* Shortly after that, the entire linear mapping will be available */ | |
229 | /* This will also cause that unflatten device tree will be allocated | |
230 | * inside 768MB limit */ | |
231 | memblock_set_current_limit(memory_start + lowmem_size - 1); | |
2602276d | 232 | |
dcf639fe MS |
233 | parse_early_param(); |
234 | ||
68cbfae8 MS |
235 | early_init_fdt_scan_reserved_mem(); |
236 | ||
2602276d MS |
237 | /* CMA initialization */ |
238 | dma_contiguous_reserve(memory_start + lowmem_size - 1); | |
e4c70fc6 MS |
239 | |
240 | memblock_dump_all(); | |
4dc60832 MS |
241 | } |
242 | ||
fa3f9f4a AK |
243 | static const pgprot_t protection_map[16] = { |
244 | [VM_NONE] = PAGE_NONE, | |
245 | [VM_READ] = PAGE_READONLY_X, | |
246 | [VM_WRITE] = PAGE_COPY, | |
247 | [VM_WRITE | VM_READ] = PAGE_COPY_X, | |
248 | [VM_EXEC] = PAGE_READONLY, | |
249 | [VM_EXEC | VM_READ] = PAGE_READONLY_X, | |
250 | [VM_EXEC | VM_WRITE] = PAGE_COPY, | |
251 | [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X, | |
252 | [VM_SHARED] = PAGE_NONE, | |
253 | [VM_SHARED | VM_READ] = PAGE_READONLY_X, | |
254 | [VM_SHARED | VM_WRITE] = PAGE_SHARED, | |
255 | [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X, | |
256 | [VM_SHARED | VM_EXEC] = PAGE_READONLY, | |
257 | [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X, | |
258 | [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, | |
259 | [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X | |
260 | }; | |
261 | DECLARE_VM_GET_PAGE_PROT |