Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #include <linux/sched.h> |
2 | #include <linux/kernel.h> | |
3 | #include <linux/errno.h> | |
4 | #include <linux/mm.h> | |
27eb0b28 | 5 | #include <linux/nmi.h> |
1da177e4 LT |
6 | #include <linux/swap.h> |
7 | #include <linux/smp.h> | |
8 | #include <linux/highmem.h> | |
1da177e4 LT |
9 | #include <linux/pagemap.h> |
10 | #include <linux/spinlock.h> | |
11 | ||
1da177e4 LT |
12 | #include <asm/pgtable.h> |
13 | #include <asm/pgalloc.h> | |
14 | #include <asm/fixmap.h> | |
15 | #include <asm/e820.h> | |
16 | #include <asm/tlb.h> | |
17 | #include <asm/tlbflush.h> | |
56f0e74c | 18 | #include <asm/io.h> |
1da177e4 | 19 | |
2b688dfd PE |
20 | unsigned int __VMALLOC_RESERVE = 128 << 20; |
21 | ||
1da177e4 LT |
22 | /* |
23 | * Associate a virtual page frame with a given physical page frame | |
24 | * and protection flags for that frame. | |
25 | */ | |
d494a961 | 26 | void set_pte_vaddr(unsigned long vaddr, pte_t pteval) |
1da177e4 LT |
27 | { |
28 | pgd_t *pgd; | |
29 | pud_t *pud; | |
30 | pmd_t *pmd; | |
31 | pte_t *pte; | |
32 | ||
33 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
34 | if (pgd_none(*pgd)) { | |
35 | BUG(); | |
36 | return; | |
37 | } | |
38 | pud = pud_offset(pgd, vaddr); | |
39 | if (pud_none(*pud)) { | |
40 | BUG(); | |
41 | return; | |
42 | } | |
43 | pmd = pmd_offset(pud, vaddr); | |
44 | if (pmd_none(*pmd)) { | |
45 | BUG(); | |
46 | return; | |
47 | } | |
48 | pte = pte_offset_kernel(pmd, vaddr); | |
dcb32d99 | 49 | if (!pte_none(pteval)) |
b40c7579 | 50 | set_pte_at(&init_mm, vaddr, pte, pteval); |
b0bfece4 JB |
51 | else |
52 | pte_clear(&init_mm, vaddr, pte); | |
1da177e4 LT |
53 | |
54 | /* | |
55 | * It's enough to flush this one mapping. | |
56 | * (PGE mappings get flushed as well) | |
1da177e4 LT |
57 | */ |
58 | __flush_tlb_one(vaddr); | |
59 | } | |
60 | ||
052e7994 JF |
61 | unsigned long __FIXADDR_TOP = 0xfffff000; |
62 | EXPORT_SYMBOL(__FIXADDR_TOP); | |
052e7994 | 63 | |
bef1568d YL |
64 | /* |
65 | * vmalloc=size forces the vmalloc area to be exactly 'size' | |
66 | * bytes. This can be used to increase (or decrease) the | |
67 | * vmalloc area - the default is 128m. | |
68 | */ | |
69 | static int __init parse_vmalloc(char *arg) | |
70 | { | |
71 | if (!arg) | |
72 | return -EINVAL; | |
73 | ||
e621bd18 DY |
74 | /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/ |
75 | __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET; | |
bef1568d YL |
76 | return 0; |
77 | } | |
78 | early_param("vmalloc", parse_vmalloc); | |
79 | ||
80 | /* | |
81 | * reservetop=size reserves a hole at the top of the kernel address space which | |
82 | * a hypervisor can load into later. Needed for dynamically loaded hypervisors, | |
83 | * so relocating the fixmap can be done before paging initialization. | |
84 | */ | |
85 | static int __init parse_reservetop(char *arg) | |
86 | { | |
87 | unsigned long address; | |
88 | ||
89 | if (!arg) | |
90 | return -EINVAL; | |
91 | ||
92 | address = memparse(arg, &arg); | |
93 | reserve_top_address(address); | |
5b7c73e0 | 94 | early_ioremap_init(); |
bef1568d YL |
95 | return 0; |
96 | } | |
97 | early_param("reservetop", parse_reservetop); |