Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/parisc/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * Copyright 1999 SuSE GmbH | |
6 | * changed by Philipp Rumpf | |
7 | * Copyright 1999 Philipp Rumpf (prumpf@tux.org) | |
8 | * Copyright 2004 Randolph Chung (tausq@debian.org) | |
a8f44e38 | 9 | * Copyright 2006-2007 Helge Deller (deller@gmx.de) |
1da177e4 LT |
10 | * |
11 | */ | |
12 | ||
1da177e4 LT |
13 | |
14 | #include <linux/module.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/bootmem.h> | |
5a0e3ad6 | 17 | #include <linux/gfp.h> |
1da177e4 LT |
18 | #include <linux/delay.h> |
19 | #include <linux/init.h> | |
20 | #include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */ | |
21 | #include <linux/initrd.h> | |
22 | #include <linux/swap.h> | |
23 | #include <linux/unistd.h> | |
24 | #include <linux/nodemask.h> /* for node_online_map */ | |
25 | #include <linux/pagemap.h> /* for release_pages and page_cache_release */ | |
26 | ||
27 | #include <asm/pgalloc.h> | |
ce8420bb | 28 | #include <asm/pgtable.h> |
1da177e4 LT |
29 | #include <asm/tlb.h> |
30 | #include <asm/pdc_chassis.h> | |
31 | #include <asm/mmzone.h> | |
a581c2a4 | 32 | #include <asm/sections.h> |
1da177e4 | 33 | |
1da177e4 | 34 | extern int data_start; |
1da177e4 | 35 | |
c39f52a9 TG |
36 | #if PT_NLEVELS == 3 |
37 | /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout | |
38 | * with the first pmd adjacent to the pgd and below it. gcc doesn't actually | |
39 | * guarantee that global objects will be laid out in memory in the same order | |
40 | * as the order of declaration, so put these in different sections and use | |
41 | * the linker script to order them. */ | |
42 | pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); | |
43 | #endif | |
44 | ||
45 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); | |
46 | pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); | |
47 | ||
1da177e4 | 48 | #ifdef CONFIG_DISCONTIGMEM |
8039de10 | 49 | struct node_map_data node_data[MAX_NUMNODES] __read_mostly; |
91ea8207 | 50 | signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; |
1da177e4 LT |
51 | #endif |
52 | ||
53 | static struct resource data_resource = { | |
54 | .name = "Kernel data", | |
55 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | |
56 | }; | |
57 | ||
58 | static struct resource code_resource = { | |
59 | .name = "Kernel code", | |
60 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | |
61 | }; | |
62 | ||
63 | static struct resource pdcdata_resource = { | |
64 | .name = "PDC data (Page Zero)", | |
65 | .start = 0, | |
66 | .end = 0x9ff, | |
67 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | |
68 | }; | |
69 | ||
8039de10 | 70 | static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; |
1da177e4 LT |
71 | |
72 | /* The following array is initialized from the firmware specific | |
73 | * information retrieved in kernel/inventory.c. | |
74 | */ | |
75 | ||
8039de10 HD |
76 | physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; |
77 | int npmem_ranges __read_mostly; | |
1da177e4 | 78 | |
a8f44e38 | 79 | #ifdef CONFIG_64BIT |
1da177e4 | 80 | #define MAX_MEM (~0UL) |
a8f44e38 | 81 | #else /* !CONFIG_64BIT */ |
1da177e4 | 82 | #define MAX_MEM (3584U*1024U*1024U) |
a8f44e38 | 83 | #endif /* !CONFIG_64BIT */ |
1da177e4 | 84 | |
8039de10 | 85 | static unsigned long mem_limit __read_mostly = MAX_MEM; |
1da177e4 LT |
86 | |
87 | static void __init mem_limit_func(void) | |
88 | { | |
89 | char *cp, *end; | |
90 | unsigned long limit; | |
1da177e4 LT |
91 | |
92 | /* We need this before __setup() functions are called */ | |
93 | ||
94 | limit = MAX_MEM; | |
668f9931 | 95 | for (cp = boot_command_line; *cp; ) { |
1da177e4 LT |
96 | if (memcmp(cp, "mem=", 4) == 0) { |
97 | cp += 4; | |
98 | limit = memparse(cp, &end); | |
99 | if (end != cp) | |
100 | break; | |
101 | cp = end; | |
102 | } else { | |
103 | while (*cp != ' ' && *cp) | |
104 | ++cp; | |
105 | while (*cp == ' ') | |
106 | ++cp; | |
107 | } | |
108 | } | |
109 | ||
110 | if (limit < mem_limit) | |
111 | mem_limit = limit; | |
112 | } | |
113 | ||
114 | #define MAX_GAP (0x40000000UL >> PAGE_SHIFT) | |
115 | ||
116 | static void __init setup_bootmem(void) | |
117 | { | |
118 | unsigned long bootmap_size; | |
119 | unsigned long mem_max; | |
120 | unsigned long bootmap_pages; | |
121 | unsigned long bootmap_start_pfn; | |
122 | unsigned long bootmap_pfn; | |
123 | #ifndef CONFIG_DISCONTIGMEM | |
124 | physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1]; | |
125 | int npmem_holes; | |
126 | #endif | |
127 | int i, sysram_resource_count; | |
128 | ||
129 | disable_sr_hashing(); /* Turn off space register hashing */ | |
130 | ||
131 | /* | |
132 | * Sort the ranges. Since the number of ranges is typically | |
133 | * small, and performance is not an issue here, just do | |
134 | * a simple insertion sort. | |
135 | */ | |
136 | ||
137 | for (i = 1; i < npmem_ranges; i++) { | |
138 | int j; | |
139 | ||
140 | for (j = i; j > 0; j--) { | |
141 | unsigned long tmp; | |
142 | ||
143 | if (pmem_ranges[j-1].start_pfn < | |
144 | pmem_ranges[j].start_pfn) { | |
145 | ||
146 | break; | |
147 | } | |
148 | tmp = pmem_ranges[j-1].start_pfn; | |
149 | pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn; | |
150 | pmem_ranges[j].start_pfn = tmp; | |
151 | tmp = pmem_ranges[j-1].pages; | |
152 | pmem_ranges[j-1].pages = pmem_ranges[j].pages; | |
153 | pmem_ranges[j].pages = tmp; | |
154 | } | |
155 | } | |
156 | ||
157 | #ifndef CONFIG_DISCONTIGMEM | |
158 | /* | |
159 | * Throw out ranges that are too far apart (controlled by | |
160 | * MAX_GAP). | |
161 | */ | |
162 | ||
163 | for (i = 1; i < npmem_ranges; i++) { | |
164 | if (pmem_ranges[i].start_pfn - | |
165 | (pmem_ranges[i-1].start_pfn + | |
166 | pmem_ranges[i-1].pages) > MAX_GAP) { | |
167 | npmem_ranges = i; | |
168 | printk("Large gap in memory detected (%ld pages). " | |
169 | "Consider turning on CONFIG_DISCONTIGMEM\n", | |
170 | pmem_ranges[i].start_pfn - | |
171 | (pmem_ranges[i-1].start_pfn + | |
172 | pmem_ranges[i-1].pages)); | |
173 | break; | |
174 | } | |
175 | } | |
176 | #endif | |
177 | ||
178 | if (npmem_ranges > 1) { | |
179 | ||
180 | /* Print the memory ranges */ | |
181 | ||
182 | printk(KERN_INFO "Memory Ranges:\n"); | |
183 | ||
184 | for (i = 0; i < npmem_ranges; i++) { | |
185 | unsigned long start; | |
186 | unsigned long size; | |
187 | ||
188 | size = (pmem_ranges[i].pages << PAGE_SHIFT); | |
189 | start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); | |
190 | printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n", | |
191 | i,start, start + (size - 1), size >> 20); | |
192 | } | |
193 | } | |
194 | ||
195 | sysram_resource_count = npmem_ranges; | |
196 | for (i = 0; i < sysram_resource_count; i++) { | |
197 | struct resource *res = &sysram_resources[i]; | |
198 | res->name = "System RAM"; | |
199 | res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT; | |
200 | res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1; | |
201 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | |
202 | request_resource(&iomem_resource, res); | |
203 | } | |
204 | ||
205 | /* | |
206 | * For 32 bit kernels we limit the amount of memory we can | |
207 | * support, in order to preserve enough kernel address space | |
208 | * for other purposes. For 64 bit kernels we don't normally | |
209 | * limit the memory, but this mechanism can be used to | |
210 | * artificially limit the amount of memory (and it is written | |
211 | * to work with multiple memory ranges). | |
212 | */ | |
213 | ||
214 | mem_limit_func(); /* check for "mem=" argument */ | |
215 | ||
216 | mem_max = 0; | |
1da177e4 LT |
217 | for (i = 0; i < npmem_ranges; i++) { |
218 | unsigned long rsize; | |
219 | ||
220 | rsize = pmem_ranges[i].pages << PAGE_SHIFT; | |
221 | if ((mem_max + rsize) > mem_limit) { | |
222 | printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20); | |
223 | if (mem_max == mem_limit) | |
224 | npmem_ranges = i; | |
225 | else { | |
226 | pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) | |
227 | - (mem_max >> PAGE_SHIFT); | |
228 | npmem_ranges = i + 1; | |
229 | mem_max = mem_limit; | |
230 | } | |
1da177e4 LT |
231 | break; |
232 | } | |
1da177e4 LT |
233 | mem_max += rsize; |
234 | } | |
235 | ||
236 | printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20); | |
237 | ||
238 | #ifndef CONFIG_DISCONTIGMEM | |
239 | /* Merge the ranges, keeping track of the holes */ | |
240 | ||
241 | { | |
242 | unsigned long end_pfn; | |
243 | unsigned long hole_pages; | |
244 | ||
245 | npmem_holes = 0; | |
246 | end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; | |
247 | for (i = 1; i < npmem_ranges; i++) { | |
248 | ||
249 | hole_pages = pmem_ranges[i].start_pfn - end_pfn; | |
250 | if (hole_pages) { | |
251 | pmem_holes[npmem_holes].start_pfn = end_pfn; | |
252 | pmem_holes[npmem_holes++].pages = hole_pages; | |
253 | end_pfn += hole_pages; | |
254 | } | |
255 | end_pfn += pmem_ranges[i].pages; | |
256 | } | |
257 | ||
258 | pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn; | |
259 | npmem_ranges = 1; | |
260 | } | |
261 | #endif | |
262 | ||
263 | bootmap_pages = 0; | |
264 | for (i = 0; i < npmem_ranges; i++) | |
265 | bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages); | |
266 | ||
267 | bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT; | |
268 | ||
269 | #ifdef CONFIG_DISCONTIGMEM | |
270 | for (i = 0; i < MAX_PHYSMEM_RANGES; i++) { | |
271 | memset(NODE_DATA(i), 0, sizeof(pg_data_t)); | |
b61bfa3c | 272 | NODE_DATA(i)->bdata = &bootmem_node_data[i]; |
1da177e4 LT |
273 | } |
274 | memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); | |
275 | ||
d9b41e0b DR |
276 | for (i = 0; i < npmem_ranges; i++) { |
277 | node_set_state(i, N_NORMAL_MEMORY); | |
1da177e4 | 278 | node_set_online(i); |
d9b41e0b | 279 | } |
1da177e4 LT |
280 | #endif |
281 | ||
282 | /* | |
283 | * Initialize and free the full range of memory in each range. | |
284 | * Note that the only writing these routines do are to the bootmap, | |
285 | * and we've made sure to locate the bootmap properly so that they | |
286 | * won't be writing over anything important. | |
287 | */ | |
288 | ||
289 | bootmap_pfn = bootmap_start_pfn; | |
290 | max_pfn = 0; | |
291 | for (i = 0; i < npmem_ranges; i++) { | |
292 | unsigned long start_pfn; | |
293 | unsigned long npages; | |
294 | ||
295 | start_pfn = pmem_ranges[i].start_pfn; | |
296 | npages = pmem_ranges[i].pages; | |
297 | ||
298 | bootmap_size = init_bootmem_node(NODE_DATA(i), | |
299 | bootmap_pfn, | |
300 | start_pfn, | |
301 | (start_pfn + npages) ); | |
302 | free_bootmem_node(NODE_DATA(i), | |
303 | (start_pfn << PAGE_SHIFT), | |
304 | (npages << PAGE_SHIFT) ); | |
305 | bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
306 | if ((start_pfn + npages) > max_pfn) | |
307 | max_pfn = start_pfn + npages; | |
308 | } | |
309 | ||
5cdb8205 GG |
310 | /* IOMMU is always used to access "high mem" on those boxes |
311 | * that can support enough mem that a PCI device couldn't | |
312 | * directly DMA to any physical addresses. | |
313 | * ISA DMA support will need to revisit this. | |
314 | */ | |
315 | max_low_pfn = max_pfn; | |
316 | ||
8980a7ba HD |
317 | /* bootmap sizing messed up? */ |
318 | BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages); | |
1da177e4 LT |
319 | |
320 | /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */ | |
321 | ||
322 | #define PDC_CONSOLE_IO_IODC_SIZE 32768 | |
323 | ||
324 | reserve_bootmem_node(NODE_DATA(0), 0UL, | |
72a7fe39 BW |
325 | (unsigned long)(PAGE0->mem_free + |
326 | PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT); | |
c51d476a | 327 | reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text), |
72a7fe39 | 328 | (unsigned long)(_end - _text), BOOTMEM_DEFAULT); |
1da177e4 | 329 | reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT), |
72a7fe39 BW |
330 | ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT), |
331 | BOOTMEM_DEFAULT); | |
1da177e4 LT |
332 | |
333 | #ifndef CONFIG_DISCONTIGMEM | |
334 | ||
335 | /* reserve the holes */ | |
336 | ||
337 | for (i = 0; i < npmem_holes; i++) { | |
338 | reserve_bootmem_node(NODE_DATA(0), | |
339 | (pmem_holes[i].start_pfn << PAGE_SHIFT), | |
72a7fe39 BW |
340 | (pmem_holes[i].pages << PAGE_SHIFT), |
341 | BOOTMEM_DEFAULT); | |
1da177e4 LT |
342 | } |
343 | #endif | |
344 | ||
345 | #ifdef CONFIG_BLK_DEV_INITRD | |
346 | if (initrd_start) { | |
347 | printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end); | |
348 | if (__pa(initrd_start) < mem_max) { | |
349 | unsigned long initrd_reserve; | |
350 | ||
351 | if (__pa(initrd_end) > mem_max) { | |
352 | initrd_reserve = mem_max - __pa(initrd_start); | |
353 | } else { | |
354 | initrd_reserve = initrd_end - initrd_start; | |
355 | } | |
356 | initrd_below_start_ok = 1; | |
357 | printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max); | |
358 | ||
72a7fe39 BW |
359 | reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start), |
360 | initrd_reserve, BOOTMEM_DEFAULT); | |
1da177e4 LT |
361 | } |
362 | } | |
363 | #endif | |
364 | ||
365 | data_resource.start = virt_to_phys(&data_start); | |
c51d476a KM |
366 | data_resource.end = virt_to_phys(_end) - 1; |
367 | code_resource.start = virt_to_phys(_text); | |
1da177e4 LT |
368 | code_resource.end = virt_to_phys(&data_start)-1; |
369 | ||
370 | /* We don't know which region the kernel will be in, so try | |
371 | * all of them. | |
372 | */ | |
373 | for (i = 0; i < sysram_resource_count; i++) { | |
374 | struct resource *res = &sysram_resources[i]; | |
375 | request_resource(res, &code_resource); | |
376 | request_resource(res, &data_resource); | |
377 | } | |
378 | request_resource(&sysram_resources[0], &pdcdata_resource); | |
379 | } | |
380 | ||
d7dd2ff1 JB |
381 | static void __init map_pages(unsigned long start_vaddr, |
382 | unsigned long start_paddr, unsigned long size, | |
383 | pgprot_t pgprot, int force) | |
384 | { | |
385 | pgd_t *pg_dir; | |
386 | pmd_t *pmd; | |
387 | pte_t *pg_table; | |
388 | unsigned long end_paddr; | |
389 | unsigned long start_pmd; | |
390 | unsigned long start_pte; | |
391 | unsigned long tmp1; | |
392 | unsigned long tmp2; | |
393 | unsigned long address; | |
394 | unsigned long vaddr; | |
395 | unsigned long ro_start; | |
396 | unsigned long ro_end; | |
397 | unsigned long fv_addr; | |
398 | unsigned long gw_addr; | |
399 | extern const unsigned long fault_vector_20; | |
400 | extern void * const linux_gateway_page; | |
401 | ||
402 | ro_start = __pa((unsigned long)_text); | |
403 | ro_end = __pa((unsigned long)&data_start); | |
404 | fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; | |
405 | gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; | |
406 | ||
407 | end_paddr = start_paddr + size; | |
408 | ||
409 | pg_dir = pgd_offset_k(start_vaddr); | |
410 | ||
411 | #if PTRS_PER_PMD == 1 | |
412 | start_pmd = 0; | |
413 | #else | |
414 | start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | |
415 | #endif | |
416 | start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); | |
417 | ||
418 | address = start_paddr; | |
419 | vaddr = start_vaddr; | |
420 | while (address < end_paddr) { | |
421 | #if PTRS_PER_PMD == 1 | |
422 | pmd = (pmd_t *)__pa(pg_dir); | |
423 | #else | |
424 | pmd = (pmd_t *)pgd_address(*pg_dir); | |
425 | ||
426 | /* | |
427 | * pmd is physical at this point | |
428 | */ | |
429 | ||
430 | if (!pmd) { | |
431 | pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER); | |
432 | pmd = (pmd_t *) __pa(pmd); | |
433 | } | |
434 | ||
435 | pgd_populate(NULL, pg_dir, __va(pmd)); | |
436 | #endif | |
437 | pg_dir++; | |
438 | ||
439 | /* now change pmd to kernel virtual addresses */ | |
440 | ||
441 | pmd = (pmd_t *)__va(pmd) + start_pmd; | |
442 | for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { | |
443 | ||
444 | /* | |
445 | * pg_table is physical at this point | |
446 | */ | |
447 | ||
448 | pg_table = (pte_t *)pmd_address(*pmd); | |
449 | if (!pg_table) { | |
450 | pg_table = (pte_t *) | |
451 | alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE); | |
452 | pg_table = (pte_t *) __pa(pg_table); | |
453 | } | |
454 | ||
455 | pmd_populate_kernel(NULL, pmd, __va(pg_table)); | |
456 | ||
457 | /* now change pg_table to kernel virtual addresses */ | |
458 | ||
459 | pg_table = (pte_t *) __va(pg_table) + start_pte; | |
460 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { | |
461 | pte_t pte; | |
462 | ||
463 | /* | |
464 | * Map the fault vector writable so we can | |
465 | * write the HPMC checksum. | |
466 | */ | |
467 | if (force) | |
468 | pte = __mk_pte(address, pgprot); | |
469 | else if (core_kernel_text(vaddr) && | |
470 | address != fv_addr) | |
471 | pte = __mk_pte(address, PAGE_KERNEL_EXEC); | |
472 | else | |
473 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | |
474 | if (address >= ro_start && address < ro_end | |
475 | && address != fv_addr | |
476 | && address != gw_addr) | |
477 | pte = __mk_pte(address, PAGE_KERNEL_RO); | |
478 | else | |
479 | #endif | |
480 | pte = __mk_pte(address, pgprot); | |
481 | ||
482 | if (address >= end_paddr) { | |
483 | if (force) | |
484 | break; | |
485 | else | |
486 | pte_val(pte) = 0; | |
487 | } | |
488 | ||
489 | set_pte(pg_table, pte); | |
490 | ||
491 | address += PAGE_SIZE; | |
492 | vaddr += PAGE_SIZE; | |
493 | } | |
494 | start_pte = 0; | |
495 | ||
496 | if (address >= end_paddr) | |
497 | break; | |
498 | } | |
499 | start_pmd = 0; | |
500 | } | |
501 | } | |
502 | ||
1da177e4 LT |
503 | void free_initmem(void) |
504 | { | |
4fb11781 KM |
505 | unsigned long init_begin = (unsigned long)__init_begin; |
506 | unsigned long init_end = (unsigned long)__init_end; | |
1da177e4 | 507 | |
d7dd2ff1 JB |
508 | /* The init text pages are marked R-X. We have to |
509 | * flush the icache and mark them RW- | |
510 | * | |
511 | * This is tricky, because map_pages is in the init section. | |
512 | * Do a dummy remap of the data section first (the data | |
513 | * section is already PAGE_KERNEL) to pull in the TLB entries | |
514 | * for map_kernel */ | |
515 | map_pages(init_begin, __pa(init_begin), init_end - init_begin, | |
516 | PAGE_KERNEL_RWX, 1); | |
517 | /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute | |
518 | * map_pages */ | |
519 | map_pages(init_begin, __pa(init_begin), init_end - init_begin, | |
520 | PAGE_KERNEL, 1); | |
521 | ||
522 | /* force the kernel to see the new TLB entries */ | |
523 | __flush_tlb_range(0, init_begin, init_end); | |
1da177e4 LT |
524 | /* Attempt to catch anyone trying to execute code here |
525 | * by filling the page with BRK insns. | |
1da177e4 | 526 | */ |
20dbc9f7 | 527 | memset((void *)init_begin, 0x00, init_end - init_begin); |
d7dd2ff1 JB |
528 | /* finally dump all the instructions which were cached, since the |
529 | * pages are no-longer executable */ | |
4fb11781 | 530 | flush_icache_range(init_begin, init_end); |
1da177e4 | 531 | |
7d2c7747 | 532 | free_initmem_default(-1); |
1da177e4 LT |
533 | |
534 | /* set up a new led state on systems shipped LED State panel */ | |
535 | pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); | |
1da177e4 LT |
536 | } |
537 | ||
1bcdd854 HD |
538 | |
539 | #ifdef CONFIG_DEBUG_RODATA | |
540 | void mark_rodata_ro(void) | |
541 | { | |
1bcdd854 HD |
542 | /* rodata memory was already mapped with KERNEL_RO access rights by |
543 | pagetable_init() and map_pages(). No need to do additional stuff here */ | |
544 | printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n", | |
a581c2a4 | 545 | (unsigned long)(__end_rodata - __start_rodata) >> 10); |
1bcdd854 HD |
546 | } |
547 | #endif | |
548 | ||
549 | ||
1da177e4 LT |
550 | /* |
551 | * Just an arbitrary offset to serve as a "hole" between mapping areas | |
552 | * (between top of physical memory and a potential pcxl dma mapping | |
553 | * area, and below the vmalloc mapping area). | |
554 | * | |
555 | * The current 32K value just means that there will be a 32K "hole" | |
556 | * between mapping areas. That means that any out-of-bounds memory | |
557 | * accesses will hopefully be caught. The vmalloc() routines leaves | |
558 | * a hole of 4kB between each vmalloced area for the same reason. | |
559 | */ | |
560 | ||
561 | /* Leave room for gateway page expansion */ | |
562 | #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE | |
563 | #error KERNEL_MAP_START is in gateway reserved region | |
564 | #endif | |
565 | #define MAP_START (KERNEL_MAP_START) | |
566 | ||
567 | #define VM_MAP_OFFSET (32*1024) | |
568 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ | |
569 | & ~(VM_MAP_OFFSET-1))) | |
570 | ||
4255f0d2 HD |
571 | void *parisc_vmalloc_start __read_mostly; |
572 | EXPORT_SYMBOL(parisc_vmalloc_start); | |
1da177e4 LT |
573 | |
574 | #ifdef CONFIG_PA11 | |
8039de10 | 575 | unsigned long pcxl_dma_start __read_mostly; |
1da177e4 LT |
576 | #endif |
577 | ||
578 | void __init mem_init(void) | |
579 | { | |
48d27cb2 HD |
580 | /* Do sanity checks on page table constants */ |
581 | BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t)); | |
582 | BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t)); | |
583 | BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t)); | |
584 | BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD | |
585 | > BITS_PER_LONG); | |
586 | ||
1da177e4 LT |
587 | high_memory = __va((max_pfn << PAGE_SHIFT)); |
588 | ||
589 | #ifndef CONFIG_DISCONTIGMEM | |
590 | max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1; | |
0c988534 | 591 | free_all_bootmem(); |
1da177e4 LT |
592 | #else |
593 | { | |
594 | int i; | |
595 | ||
596 | for (i = 0; i < npmem_ranges; i++) | |
0c988534 | 597 | free_all_bootmem_node(NODE_DATA(i)); |
1da177e4 LT |
598 | } |
599 | #endif | |
600 | ||
1da177e4 LT |
601 | #ifdef CONFIG_PA11 |
602 | if (hppa_dma_ops == &pcxl_dma_ops) { | |
603 | pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); | |
4255f0d2 HD |
604 | parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start |
605 | + PCXL_DMA_MAP_SIZE); | |
1da177e4 LT |
606 | } else { |
607 | pcxl_dma_start = 0; | |
4255f0d2 | 608 | parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); |
1da177e4 LT |
609 | } |
610 | #else | |
4255f0d2 | 611 | parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); |
1da177e4 LT |
612 | #endif |
613 | ||
7d2c7747 | 614 | mem_init_print_info(NULL); |
ce8420bb HD |
615 | #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */ |
616 | printk("virtual kernel memory layout:\n" | |
617 | " vmalloc : 0x%p - 0x%p (%4ld MB)\n" | |
53faf291 | 618 | " memory : 0x%p - 0x%p (%4ld MB)\n" |
ce8420bb HD |
619 | " .init : 0x%p - 0x%p (%4ld kB)\n" |
620 | " .data : 0x%p - 0x%p (%4ld kB)\n" | |
621 | " .text : 0x%p - 0x%p (%4ld kB)\n", | |
622 | ||
623 | (void*)VMALLOC_START, (void*)VMALLOC_END, | |
624 | (VMALLOC_END - VMALLOC_START) >> 20, | |
625 | ||
626 | __va(0), high_memory, | |
627 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, | |
628 | ||
53faf291 KM |
629 | __init_begin, __init_end, |
630 | ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10, | |
ce8420bb | 631 | |
53faf291 KM |
632 | _etext, _edata, |
633 | ((unsigned long)_edata - (unsigned long)_etext) >> 10, | |
ce8420bb | 634 | |
53faf291 KM |
635 | _text, _etext, |
636 | ((unsigned long)_etext - (unsigned long)_text) >> 10); | |
ce8420bb | 637 | #endif |
1da177e4 LT |
638 | } |
639 | ||
8039de10 | 640 | unsigned long *empty_zero_page __read_mostly; |
22febf1f | 641 | EXPORT_SYMBOL(empty_zero_page); |
1da177e4 | 642 | |
b2b755b5 | 643 | void show_mem(unsigned int filter) |
1da177e4 LT |
644 | { |
645 | int i,free = 0,total = 0,reserved = 0; | |
646 | int shared = 0, cached = 0; | |
647 | ||
648 | printk(KERN_INFO "Mem-info:\n"); | |
7bf02ea2 | 649 | show_free_areas(filter); |
4b59e6c4 DR |
650 | if (filter & SHOW_MEM_FILTER_PAGE_COUNT) |
651 | return; | |
1da177e4 LT |
652 | #ifndef CONFIG_DISCONTIGMEM |
653 | i = max_mapnr; | |
654 | while (i-- > 0) { | |
655 | total++; | |
656 | if (PageReserved(mem_map+i)) | |
657 | reserved++; | |
658 | else if (PageSwapCache(mem_map+i)) | |
659 | cached++; | |
660 | else if (!page_count(&mem_map[i])) | |
661 | free++; | |
662 | else | |
663 | shared += page_count(&mem_map[i]) - 1; | |
664 | } | |
665 | #else | |
666 | for (i = 0; i < npmem_ranges; i++) { | |
667 | int j; | |
668 | ||
669 | for (j = node_start_pfn(i); j < node_end_pfn(i); j++) { | |
670 | struct page *p; | |
208d54e5 | 671 | unsigned long flags; |
1da177e4 | 672 | |
208d54e5 | 673 | pgdat_resize_lock(NODE_DATA(i), &flags); |
408fde81 | 674 | p = nid_page_nr(i, j) - node_start_pfn(i); |
1da177e4 LT |
675 | |
676 | total++; | |
677 | if (PageReserved(p)) | |
678 | reserved++; | |
679 | else if (PageSwapCache(p)) | |
680 | cached++; | |
681 | else if (!page_count(p)) | |
682 | free++; | |
683 | else | |
684 | shared += page_count(p) - 1; | |
208d54e5 | 685 | pgdat_resize_unlock(NODE_DATA(i), &flags); |
1da177e4 LT |
686 | } |
687 | } | |
688 | #endif | |
689 | printk(KERN_INFO "%d pages of RAM\n", total); | |
690 | printk(KERN_INFO "%d reserved pages\n", reserved); | |
691 | printk(KERN_INFO "%d pages shared\n", shared); | |
692 | printk(KERN_INFO "%d pages swap cached\n", cached); | |
693 | ||
694 | ||
695 | #ifdef CONFIG_DISCONTIGMEM | |
696 | { | |
697 | struct zonelist *zl; | |
54a6eb5c | 698 | int i, j; |
1da177e4 LT |
699 | |
700 | for (i = 0; i < npmem_ranges; i++) { | |
4413a0f6 | 701 | zl = node_zonelist(i, 0); |
1da177e4 | 702 | for (j = 0; j < MAX_NR_ZONES; j++) { |
dd1a239f | 703 | struct zoneref *z; |
54a6eb5c | 704 | struct zone *zone; |
1da177e4 LT |
705 | |
706 | printk("Zone list for zone %d on node %d: ", j, i); | |
54a6eb5c MG |
707 | for_each_zone_zonelist(zone, z, zl, j) |
708 | printk("[%d/%s] ", zone_to_nid(zone), | |
709 | zone->name); | |
1da177e4 LT |
710 | printk("\n"); |
711 | } | |
712 | } | |
713 | } | |
714 | #endif | |
715 | } | |
716 | ||
1da177e4 LT |
717 | /* |
718 | * pagetable_init() sets up the page tables | |
719 | * | |
720 | * Note that gateway_init() places the Linux gateway page at page 0. | |
721 | * Since gateway pages cannot be dereferenced this has the desirable | |
722 | * side effect of trapping those pesky NULL-reference errors in the | |
723 | * kernel. | |
724 | */ | |
725 | static void __init pagetable_init(void) | |
726 | { | |
727 | int range; | |
728 | ||
729 | /* Map each physical memory range to its kernel vaddr */ | |
730 | ||
731 | for (range = 0; range < npmem_ranges; range++) { | |
732 | unsigned long start_paddr; | |
733 | unsigned long end_paddr; | |
734 | unsigned long size; | |
735 | ||
736 | start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; | |
737 | end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); | |
738 | size = pmem_ranges[range].pages << PAGE_SHIFT; | |
739 | ||
740 | map_pages((unsigned long)__va(start_paddr), start_paddr, | |
d7dd2ff1 | 741 | size, PAGE_KERNEL, 0); |
1da177e4 LT |
742 | } |
743 | ||
744 | #ifdef CONFIG_BLK_DEV_INITRD | |
745 | if (initrd_end && initrd_end > mem_limit) { | |
1bcdd854 | 746 | printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); |
1da177e4 | 747 | map_pages(initrd_start, __pa(initrd_start), |
d7dd2ff1 | 748 | initrd_end - initrd_start, PAGE_KERNEL, 0); |
1da177e4 LT |
749 | } |
750 | #endif | |
751 | ||
752 | empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); | |
753 | memset(empty_zero_page, 0, PAGE_SIZE); | |
754 | } | |
755 | ||
756 | static void __init gateway_init(void) | |
757 | { | |
758 | unsigned long linux_gateway_page_addr; | |
759 | /* FIXME: This is 'const' in order to trick the compiler | |
760 | into not treating it as DP-relative data. */ | |
761 | extern void * const linux_gateway_page; | |
762 | ||
763 | linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK; | |
764 | ||
765 | /* | |
766 | * Setup Linux Gateway page. | |
767 | * | |
768 | * The Linux gateway page will reside in kernel space (on virtual | |
769 | * page 0), so it doesn't need to be aliased into user space. | |
770 | */ | |
771 | ||
772 | map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), | |
d7dd2ff1 | 773 | PAGE_SIZE, PAGE_GATEWAY, 1); |
1da177e4 LT |
774 | } |
775 | ||
776 | #ifdef CONFIG_HPUX | |
777 | void | |
778 | map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm) | |
779 | { | |
780 | pgd_t *pg_dir; | |
781 | pmd_t *pmd; | |
782 | pte_t *pg_table; | |
783 | unsigned long start_pmd; | |
784 | unsigned long start_pte; | |
785 | unsigned long address; | |
786 | unsigned long hpux_gw_page_addr; | |
787 | /* FIXME: This is 'const' in order to trick the compiler | |
788 | into not treating it as DP-relative data. */ | |
789 | extern void * const hpux_gateway_page; | |
790 | ||
791 | hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK; | |
792 | ||
793 | /* | |
794 | * Setup HP-UX Gateway page. | |
795 | * | |
796 | * The HP-UX gateway page resides in the user address space, | |
797 | * so it needs to be aliased into each process. | |
798 | */ | |
799 | ||
800 | pg_dir = pgd_offset(mm,hpux_gw_page_addr); | |
801 | ||
802 | #if PTRS_PER_PMD == 1 | |
803 | start_pmd = 0; | |
804 | #else | |
805 | start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | |
806 | #endif | |
807 | start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); | |
808 | ||
809 | address = __pa(&hpux_gateway_page); | |
810 | #if PTRS_PER_PMD == 1 | |
811 | pmd = (pmd_t *)__pa(pg_dir); | |
812 | #else | |
813 | pmd = (pmd_t *) pgd_address(*pg_dir); | |
814 | ||
815 | /* | |
816 | * pmd is physical at this point | |
817 | */ | |
818 | ||
819 | if (!pmd) { | |
820 | pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL); | |
821 | pmd = (pmd_t *) __pa(pmd); | |
822 | } | |
823 | ||
824 | __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd); | |
825 | #endif | |
826 | /* now change pmd to kernel virtual addresses */ | |
827 | ||
828 | pmd = (pmd_t *)__va(pmd) + start_pmd; | |
829 | ||
830 | /* | |
831 | * pg_table is physical at this point | |
832 | */ | |
833 | ||
834 | pg_table = (pte_t *) pmd_address(*pmd); | |
835 | if (!pg_table) | |
836 | pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL)); | |
837 | ||
838 | __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table); | |
839 | ||
840 | /* now change pg_table to kernel virtual addresses */ | |
841 | ||
842 | pg_table = (pte_t *) __va(pg_table) + start_pte; | |
843 | set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY)); | |
844 | } | |
845 | EXPORT_SYMBOL(map_hpux_gateway_page); | |
846 | #endif | |
847 | ||
1da177e4 LT |
848 | void __init paging_init(void) |
849 | { | |
850 | int i; | |
851 | ||
852 | setup_bootmem(); | |
853 | pagetable_init(); | |
854 | gateway_init(); | |
855 | flush_cache_all_local(); /* start with known state */ | |
ce33941f | 856 | flush_tlb_all_local(NULL); |
1da177e4 LT |
857 | |
858 | for (i = 0; i < npmem_ranges; i++) { | |
f06a9684 | 859 | unsigned long zones_size[MAX_NR_ZONES] = { 0, }; |
1da177e4 | 860 | |
00592837 | 861 | zones_size[ZONE_NORMAL] = pmem_ranges[i].pages; |
1da177e4 LT |
862 | |
863 | #ifdef CONFIG_DISCONTIGMEM | |
864 | /* Need to initialize the pfnnid_map before we can initialize | |
865 | the zone */ | |
866 | { | |
867 | int j; | |
868 | for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT); | |
869 | j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT); | |
870 | j++) { | |
871 | pfnnid_map[j] = i; | |
872 | } | |
873 | } | |
874 | #endif | |
875 | ||
9109fb7b | 876 | free_area_init_node(i, zones_size, |
1da177e4 LT |
877 | pmem_ranges[i].start_pfn, NULL); |
878 | } | |
879 | } | |
880 | ||
881 | #ifdef CONFIG_PA20 | |
882 | ||
883 | /* | |
7022672e | 884 | * Currently, all PA20 chips have 18 bit protection IDs, which is the |
1da177e4 LT |
885 | * limiting factor (space ids are 32 bits). |
886 | */ | |
887 | ||
888 | #define NR_SPACE_IDS 262144 | |
889 | ||
890 | #else | |
891 | ||
892 | /* | |
7022672e SA |
893 | * Currently we have a one-to-one relationship between space IDs and |
894 | * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only | |
895 | * support 15 bit protection IDs, so that is the limiting factor. | |
896 | * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's | |
1da177e4 LT |
897 | * probably not worth the effort for a special case here. |
898 | */ | |
899 | ||
900 | #define NR_SPACE_IDS 32768 | |
901 | ||
902 | #endif /* !CONFIG_PA20 */ | |
903 | ||
904 | #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2) | |
905 | #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long))) | |
906 | ||
907 | static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */ | |
908 | static unsigned long dirty_space_id[SID_ARRAY_SIZE]; | |
909 | static unsigned long space_id_index; | |
910 | static unsigned long free_space_ids = NR_SPACE_IDS - 1; | |
911 | static unsigned long dirty_space_ids = 0; | |
912 | ||
913 | static DEFINE_SPINLOCK(sid_lock); | |
914 | ||
915 | unsigned long alloc_sid(void) | |
916 | { | |
917 | unsigned long index; | |
918 | ||
919 | spin_lock(&sid_lock); | |
920 | ||
921 | if (free_space_ids == 0) { | |
922 | if (dirty_space_ids != 0) { | |
923 | spin_unlock(&sid_lock); | |
924 | flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ | |
925 | spin_lock(&sid_lock); | |
926 | } | |
2fd83038 | 927 | BUG_ON(free_space_ids == 0); |
1da177e4 LT |
928 | } |
929 | ||
930 | free_space_ids--; | |
931 | ||
932 | index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); | |
933 | space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1))); | |
934 | space_id_index = index; | |
935 | ||
936 | spin_unlock(&sid_lock); | |
937 | ||
938 | return index << SPACEID_SHIFT; | |
939 | } | |
940 | ||
941 | void free_sid(unsigned long spaceid) | |
942 | { | |
943 | unsigned long index = spaceid >> SPACEID_SHIFT; | |
944 | unsigned long *dirty_space_offset; | |
945 | ||
946 | dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG); | |
947 | index &= (BITS_PER_LONG - 1); | |
948 | ||
949 | spin_lock(&sid_lock); | |
950 | ||
2fd83038 | 951 | BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ |
1da177e4 LT |
952 | |
953 | *dirty_space_offset |= (1L << index); | |
954 | dirty_space_ids++; | |
955 | ||
956 | spin_unlock(&sid_lock); | |
957 | } | |
958 | ||
959 | ||
960 | #ifdef CONFIG_SMP | |
961 | static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array) | |
962 | { | |
963 | int i; | |
964 | ||
965 | /* NOTE: sid_lock must be held upon entry */ | |
966 | ||
967 | *ndirtyptr = dirty_space_ids; | |
968 | if (dirty_space_ids != 0) { | |
969 | for (i = 0; i < SID_ARRAY_SIZE; i++) { | |
970 | dirty_array[i] = dirty_space_id[i]; | |
971 | dirty_space_id[i] = 0; | |
972 | } | |
973 | dirty_space_ids = 0; | |
974 | } | |
975 | ||
976 | return; | |
977 | } | |
978 | ||
979 | static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array) | |
980 | { | |
981 | int i; | |
982 | ||
983 | /* NOTE: sid_lock must be held upon entry */ | |
984 | ||
985 | if (ndirty != 0) { | |
986 | for (i = 0; i < SID_ARRAY_SIZE; i++) { | |
987 | space_id[i] ^= dirty_array[i]; | |
988 | } | |
989 | ||
990 | free_space_ids += ndirty; | |
991 | space_id_index = 0; | |
992 | } | |
993 | } | |
994 | ||
995 | #else /* CONFIG_SMP */ | |
996 | ||
997 | static void recycle_sids(void) | |
998 | { | |
999 | int i; | |
1000 | ||
1001 | /* NOTE: sid_lock must be held upon entry */ | |
1002 | ||
1003 | if (dirty_space_ids != 0) { | |
1004 | for (i = 0; i < SID_ARRAY_SIZE; i++) { | |
1005 | space_id[i] ^= dirty_space_id[i]; | |
1006 | dirty_space_id[i] = 0; | |
1007 | } | |
1008 | ||
1009 | free_space_ids += dirty_space_ids; | |
1010 | dirty_space_ids = 0; | |
1011 | space_id_index = 0; | |
1012 | } | |
1013 | } | |
1014 | #endif | |
1015 | ||
1016 | /* | |
1017 | * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is | |
1018 | * purged, we can safely reuse the space ids that were released but | |
1019 | * not flushed from the tlb. | |
1020 | */ | |
1021 | ||
1022 | #ifdef CONFIG_SMP | |
1023 | ||
1024 | static unsigned long recycle_ndirty; | |
1025 | static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; | |
2fd83038 | 1026 | static unsigned int recycle_inuse; |
1da177e4 LT |
1027 | |
1028 | void flush_tlb_all(void) | |
1029 | { | |
1030 | int do_recycle; | |
1031 | ||
416821d3 | 1032 | __inc_irq_stat(irq_tlb_count); |
1da177e4 LT |
1033 | do_recycle = 0; |
1034 | spin_lock(&sid_lock); | |
1035 | if (dirty_space_ids > RECYCLE_THRESHOLD) { | |
2fd83038 | 1036 | BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */ |
1da177e4 LT |
1037 | get_dirty_sids(&recycle_ndirty,recycle_dirty_array); |
1038 | recycle_inuse++; | |
1039 | do_recycle++; | |
1040 | } | |
1041 | spin_unlock(&sid_lock); | |
15c8b6c1 | 1042 | on_each_cpu(flush_tlb_all_local, NULL, 1); |
1da177e4 LT |
1043 | if (do_recycle) { |
1044 | spin_lock(&sid_lock); | |
1045 | recycle_sids(recycle_ndirty,recycle_dirty_array); | |
1046 | recycle_inuse = 0; | |
1047 | spin_unlock(&sid_lock); | |
1048 | } | |
1049 | } | |
1050 | #else | |
1051 | void flush_tlb_all(void) | |
1052 | { | |
416821d3 | 1053 | __inc_irq_stat(irq_tlb_count); |
1da177e4 | 1054 | spin_lock(&sid_lock); |
1b2425e3 | 1055 | flush_tlb_all_local(NULL); |
1da177e4 LT |
1056 | recycle_sids(); |
1057 | spin_unlock(&sid_lock); | |
1058 | } | |
1059 | #endif | |
1060 | ||
1061 | #ifdef CONFIG_BLK_DEV_INITRD | |
1062 | void free_initrd_mem(unsigned long start, unsigned long end) | |
1063 | { | |
7d2c7747 | 1064 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
1da177e4 LT |
1065 | } |
1066 | #endif |