Commit | Line | Data |
---|---|---|
b920de1b DH |
1 | /* MN10300 Page table management |
2 | * | |
3 | * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd. | |
4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
5 | * Modified by David Howells (dhowells@redhat.com) | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public Licence | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the Licence, or (at your option) any later version. | |
11 | */ | |
12 | #include <linux/sched.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/errno.h> | |
5a0e3ad6 | 15 | #include <linux/gfp.h> |
b920de1b DH |
16 | #include <linux/mm.h> |
17 | #include <linux/swap.h> | |
18 | #include <linux/smp.h> | |
19 | #include <linux/highmem.h> | |
b920de1b DH |
20 | #include <linux/pagemap.h> |
21 | #include <linux/spinlock.h> | |
22 | #include <linux/quicklist.h> | |
23 | ||
b920de1b DH |
24 | #include <asm/pgtable.h> |
25 | #include <asm/pgalloc.h> | |
26 | #include <asm/tlb.h> | |
27 | #include <asm/tlbflush.h> | |
28 | ||
b920de1b DH |
29 | /* |
30 | * Associate a large virtual page frame with a given physical page frame | |
31 | * and protection flags for that frame. pfn is for the base of the page, | |
32 | * vaddr is what the page gets mapped to - both must be properly aligned. | |
33 | * The pmd must already be instantiated. Assumes PAE mode. | |
34 | */ | |
35 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |
36 | { | |
37 | pgd_t *pgd; | |
38 | pud_t *pud; | |
39 | pmd_t *pmd; | |
40 | ||
41 | if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ | |
42 | printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n"); | |
43 | return; /* BUG(); */ | |
44 | } | |
45 | if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */ | |
46 | printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n"); | |
47 | return; /* BUG(); */ | |
48 | } | |
49 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
50 | if (pgd_none(*pgd)) { | |
51 | printk(KERN_ERR "set_pmd_pfn: pgd_none\n"); | |
52 | return; /* BUG(); */ | |
53 | } | |
54 | pud = pud_offset(pgd, vaddr); | |
55 | pmd = pmd_offset(pud, vaddr); | |
56 | set_pmd(pmd, pfn_pmd(pfn, flags)); | |
57 | /* | |
58 | * It's enough to flush this one mapping. | |
59 | * (PGE mappings get flushed as well) | |
60 | */ | |
492e6751 | 61 | local_flush_tlb_one(vaddr); |
b920de1b DH |
62 | } |
63 | ||
64 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | |
65 | { | |
66 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | |
67 | if (pte) | |
68 | clear_page(pte); | |
69 | return pte; | |
70 | } | |
71 | ||
72 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | |
73 | { | |
74 | struct page *pte; | |
75 | ||
76 | #ifdef CONFIG_HIGHPTE | |
77 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); | |
78 | #else | |
79 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); | |
80 | #endif | |
0470d4aa KS |
81 | if (!pte) |
82 | return NULL; | |
83 | clear_highpage(pte); | |
84 | if (!pgtable_page_ctor(pte)) { | |
85 | __free_page(pte); | |
86 | return NULL; | |
87 | } | |
b920de1b DH |
88 | return pte; |
89 | } | |
90 | ||
91 | /* | |
92 | * List of all pgd's needed for non-PAE so it can invalidate entries | |
93 | * in both cached and uncached pgd's; not needed for PAE since the | |
94 | * kernel pmd is shared. If PAE were not to share the pmd a similar | |
95 | * tactic would be needed. This is essentially codepath-based locking | |
96 | * against pageattr.c; it is the unique case in which a valid change | |
97 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | |
98 | * vmalloc faults work because attached pagetables are never freed. | |
99 | * If the locking proves to be non-performant, a ticketing scheme with | |
100 | * checks at dup_mmap(), exec(), and other mmlist addition points | |
101 | * could be used. The locking scheme was chosen on the basis of | |
102 | * manfred's recommendations and having no core impact whatsoever. | |
6d49e352 | 103 | * -- nyc |
b920de1b DH |
104 | */ |
105 | DEFINE_SPINLOCK(pgd_lock); | |
106 | struct page *pgd_list; | |
107 | ||
108 | static inline void pgd_list_add(pgd_t *pgd) | |
109 | { | |
110 | struct page *page = virt_to_page(pgd); | |
111 | page->index = (unsigned long) pgd_list; | |
112 | if (pgd_list) | |
113 | set_page_private(pgd_list, (unsigned long) &page->index); | |
114 | pgd_list = page; | |
115 | set_page_private(page, (unsigned long) &pgd_list); | |
116 | } | |
117 | ||
118 | static inline void pgd_list_del(pgd_t *pgd) | |
119 | { | |
120 | struct page *next, **pprev, *page = virt_to_page(pgd); | |
121 | next = (struct page *) page->index; | |
122 | pprev = (struct page **) page_private(page); | |
123 | *pprev = next; | |
124 | if (next) | |
125 | set_page_private(next, (unsigned long) pprev); | |
126 | } | |
127 | ||
128 | void pgd_ctor(void *pgd) | |
129 | { | |
130 | unsigned long flags; | |
131 | ||
132 | if (PTRS_PER_PMD == 1) | |
133 | spin_lock_irqsave(&pgd_lock, flags); | |
134 | ||
135 | memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD, | |
136 | swapper_pg_dir + USER_PTRS_PER_PGD, | |
137 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | |
138 | ||
139 | if (PTRS_PER_PMD > 1) | |
140 | return; | |
141 | ||
142 | pgd_list_add(pgd); | |
143 | spin_unlock_irqrestore(&pgd_lock, flags); | |
144 | memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); | |
145 | } | |
146 | ||
147 | /* never called when PTRS_PER_PMD > 1 */ | |
148 | void pgd_dtor(void *pgd) | |
149 | { | |
150 | unsigned long flags; /* can be called from interrupt context */ | |
151 | ||
152 | spin_lock_irqsave(&pgd_lock, flags); | |
153 | pgd_list_del(pgd); | |
154 | spin_unlock_irqrestore(&pgd_lock, flags); | |
155 | } | |
156 | ||
157 | pgd_t *pgd_alloc(struct mm_struct *mm) | |
158 | { | |
159 | return quicklist_alloc(0, GFP_KERNEL, pgd_ctor); | |
160 | } | |
161 | ||
162 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
163 | { | |
164 | quicklist_free(0, pgd_dtor, pgd); | |
165 | } | |
166 | ||
167 | void __init pgtable_cache_init(void) | |
168 | { | |
169 | } | |
170 | ||
171 | void check_pgt_cache(void) | |
172 | { | |
173 | quicklist_trim(0, pgd_dtor, 25, 16); | |
174 | } |