Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/i386/mm/pgtable.c | |
3 | */ | |
4 | ||
5 | #include <linux/config.h> | |
6 | #include <linux/sched.h> | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/errno.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/swap.h> | |
11 | #include <linux/smp.h> | |
12 | #include <linux/highmem.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/pagemap.h> | |
15 | #include <linux/spinlock.h> | |
16 | ||
17 | #include <asm/system.h> | |
18 | #include <asm/pgtable.h> | |
19 | #include <asm/pgalloc.h> | |
20 | #include <asm/fixmap.h> | |
21 | #include <asm/e820.h> | |
22 | #include <asm/tlb.h> | |
23 | #include <asm/tlbflush.h> | |
24 | ||
25 | void show_mem(void) | |
26 | { | |
27 | int total = 0, reserved = 0; | |
28 | int shared = 0, cached = 0; | |
29 | int highmem = 0; | |
30 | struct page *page; | |
31 | pg_data_t *pgdat; | |
32 | unsigned long i; | |
6f4e1e50 | 33 | struct page_state ps; |
208d54e5 | 34 | unsigned long flags; |
1da177e4 | 35 | |
f90e7185 | 36 | printk(KERN_INFO "Mem-info:\n"); |
1da177e4 | 37 | show_free_areas(); |
f90e7185 | 38 | printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
ec936fc5 | 39 | for_each_online_pgdat(pgdat) { |
208d54e5 | 40 | pgdat_resize_lock(pgdat, &flags); |
1da177e4 | 41 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { |
408fde81 | 42 | page = pgdat_page_nr(pgdat, i); |
1da177e4 LT |
43 | total++; |
44 | if (PageHighMem(page)) | |
45 | highmem++; | |
46 | if (PageReserved(page)) | |
47 | reserved++; | |
48 | else if (PageSwapCache(page)) | |
49 | cached++; | |
50 | else if (page_count(page)) | |
51 | shared += page_count(page) - 1; | |
52 | } | |
208d54e5 | 53 | pgdat_resize_unlock(pgdat, &flags); |
1da177e4 | 54 | } |
f90e7185 CL |
55 | printk(KERN_INFO "%d pages of RAM\n", total); |
56 | printk(KERN_INFO "%d pages of HIGHMEM\n", highmem); | |
57 | printk(KERN_INFO "%d reserved pages\n", reserved); | |
58 | printk(KERN_INFO "%d pages shared\n", shared); | |
59 | printk(KERN_INFO "%d pages swap cached\n", cached); | |
6f4e1e50 MB |
60 | |
61 | get_page_state(&ps); | |
b1e7a8fd | 62 | printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY)); |
f90e7185 | 63 | printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback); |
65ba55f5 | 64 | printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); |
9a865ffa | 65 | printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB)); |
df849a15 CL |
66 | printk(KERN_INFO "%lu pages pagetables\n", |
67 | global_page_state(NR_PAGETABLE)); | |
1da177e4 LT |
68 | } |
69 | ||
70 | /* | |
71 | * Associate a virtual page frame with a given physical page frame | |
72 | * and protection flags for that frame. | |
73 | */ | |
74 | static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |
75 | { | |
76 | pgd_t *pgd; | |
77 | pud_t *pud; | |
78 | pmd_t *pmd; | |
79 | pte_t *pte; | |
80 | ||
81 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
82 | if (pgd_none(*pgd)) { | |
83 | BUG(); | |
84 | return; | |
85 | } | |
86 | pud = pud_offset(pgd, vaddr); | |
87 | if (pud_none(*pud)) { | |
88 | BUG(); | |
89 | return; | |
90 | } | |
91 | pmd = pmd_offset(pud, vaddr); | |
92 | if (pmd_none(*pmd)) { | |
93 | BUG(); | |
94 | return; | |
95 | } | |
96 | pte = pte_offset_kernel(pmd, vaddr); | |
97 | /* <pfn,flags> stored as-is, to permit clearing entries */ | |
98 | set_pte(pte, pfn_pte(pfn, flags)); | |
99 | ||
100 | /* | |
101 | * It's enough to flush this one mapping. | |
102 | * (PGE mappings get flushed as well) | |
103 | */ | |
104 | __flush_tlb_one(vaddr); | |
105 | } | |
106 | ||
107 | /* | |
108 | * Associate a large virtual page frame with a given physical page frame | |
109 | * and protection flags for that frame. pfn is for the base of the page, | |
110 | * vaddr is what the page gets mapped to - both must be properly aligned. | |
111 | * The pmd must already be instantiated. Assumes PAE mode. | |
112 | */ | |
113 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |
114 | { | |
115 | pgd_t *pgd; | |
116 | pud_t *pud; | |
117 | pmd_t *pmd; | |
118 | ||
119 | if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ | |
f90e7185 | 120 | printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n"); |
1da177e4 LT |
121 | return; /* BUG(); */ |
122 | } | |
123 | if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */ | |
f90e7185 | 124 | printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n"); |
1da177e4 LT |
125 | return; /* BUG(); */ |
126 | } | |
127 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
128 | if (pgd_none(*pgd)) { | |
f90e7185 | 129 | printk(KERN_WARNING "set_pmd_pfn: pgd_none\n"); |
1da177e4 LT |
130 | return; /* BUG(); */ |
131 | } | |
132 | pud = pud_offset(pgd, vaddr); | |
133 | pmd = pmd_offset(pud, vaddr); | |
134 | set_pmd(pmd, pfn_pmd(pfn, flags)); | |
135 | /* | |
136 | * It's enough to flush this one mapping. | |
137 | * (PGE mappings get flushed as well) | |
138 | */ | |
139 | __flush_tlb_one(vaddr); | |
140 | } | |
141 | ||
142 | void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) | |
143 | { | |
144 | unsigned long address = __fix_to_virt(idx); | |
145 | ||
146 | if (idx >= __end_of_fixed_addresses) { | |
147 | BUG(); | |
148 | return; | |
149 | } | |
150 | set_pte_pfn(address, phys >> PAGE_SHIFT, flags); | |
151 | } | |
152 | ||
153 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | |
154 | { | |
155 | return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | |
156 | } | |
157 | ||
158 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | |
159 | { | |
160 | struct page *pte; | |
161 | ||
162 | #ifdef CONFIG_HIGHPTE | |
163 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); | |
164 | #else | |
165 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | |
166 | #endif | |
167 | return pte; | |
168 | } | |
169 | ||
170 | void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags) | |
171 | { | |
172 | memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); | |
173 | } | |
174 | ||
175 | /* | |
176 | * List of all pgd's needed for non-PAE so it can invalidate entries | |
177 | * in both cached and uncached pgd's; not needed for PAE since the | |
178 | * kernel pmd is shared. If PAE were not to share the pmd a similar | |
179 | * tactic would be needed. This is essentially codepath-based locking | |
180 | * against pageattr.c; it is the unique case in which a valid change | |
181 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | |
182 | * vmalloc faults work because attached pagetables are never freed. | |
183 | * The locking scheme was chosen on the basis of manfred's | |
184 | * recommendations and having no core impact whatsoever. | |
185 | * -- wli | |
186 | */ | |
187 | DEFINE_SPINLOCK(pgd_lock); | |
188 | struct page *pgd_list; | |
189 | ||
190 | static inline void pgd_list_add(pgd_t *pgd) | |
191 | { | |
192 | struct page *page = virt_to_page(pgd); | |
193 | page->index = (unsigned long)pgd_list; | |
194 | if (pgd_list) | |
4c21e2f2 | 195 | set_page_private(pgd_list, (unsigned long)&page->index); |
1da177e4 | 196 | pgd_list = page; |
4c21e2f2 | 197 | set_page_private(page, (unsigned long)&pgd_list); |
1da177e4 LT |
198 | } |
199 | ||
200 | static inline void pgd_list_del(pgd_t *pgd) | |
201 | { | |
202 | struct page *next, **pprev, *page = virt_to_page(pgd); | |
203 | next = (struct page *)page->index; | |
4c21e2f2 | 204 | pprev = (struct page **)page_private(page); |
1da177e4 LT |
205 | *pprev = next; |
206 | if (next) | |
4c21e2f2 | 207 | set_page_private(next, (unsigned long)pprev); |
1da177e4 LT |
208 | } |
209 | ||
210 | void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) | |
211 | { | |
212 | unsigned long flags; | |
213 | ||
d7271b14 ZA |
214 | if (PTRS_PER_PMD == 1) { |
215 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); | |
1da177e4 | 216 | spin_lock_irqsave(&pgd_lock, flags); |
d7271b14 | 217 | } |
1da177e4 | 218 | |
d7271b14 | 219 | clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, |
1da177e4 | 220 | swapper_pg_dir + USER_PTRS_PER_PGD, |
d7271b14 | 221 | KERNEL_PGD_PTRS); |
1da177e4 LT |
222 | if (PTRS_PER_PMD > 1) |
223 | return; | |
224 | ||
225 | pgd_list_add(pgd); | |
226 | spin_unlock_irqrestore(&pgd_lock, flags); | |
1da177e4 LT |
227 | } |
228 | ||
229 | /* never called when PTRS_PER_PMD > 1 */ | |
230 | void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) | |
231 | { | |
232 | unsigned long flags; /* can be called from interrupt context */ | |
233 | ||
234 | spin_lock_irqsave(&pgd_lock, flags); | |
235 | pgd_list_del(pgd); | |
236 | spin_unlock_irqrestore(&pgd_lock, flags); | |
237 | } | |
238 | ||
239 | pgd_t *pgd_alloc(struct mm_struct *mm) | |
240 | { | |
241 | int i; | |
242 | pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); | |
243 | ||
244 | if (PTRS_PER_PMD == 1 || !pgd) | |
245 | return pgd; | |
246 | ||
247 | for (i = 0; i < USER_PTRS_PER_PGD; ++i) { | |
248 | pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); | |
249 | if (!pmd) | |
250 | goto out_oom; | |
251 | set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); | |
252 | } | |
253 | return pgd; | |
254 | ||
255 | out_oom: | |
256 | for (i--; i >= 0; i--) | |
257 | kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); | |
258 | kmem_cache_free(pgd_cache, pgd); | |
259 | return NULL; | |
260 | } | |
261 | ||
262 | void pgd_free(pgd_t *pgd) | |
263 | { | |
264 | int i; | |
265 | ||
266 | /* in the PAE case user pgd entries are overwritten before usage */ | |
267 | if (PTRS_PER_PMD > 1) | |
268 | for (i = 0; i < USER_PTRS_PER_PGD; ++i) | |
269 | kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); | |
e0da382c | 270 | /* in the non-PAE case, free_pgtables() clears user pgd entries */ |
1da177e4 LT |
271 | kmem_cache_free(pgd_cache, pgd); |
272 | } |