Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SH64_PGTABLE_H |
2 | #define __ASM_SH64_PGTABLE_H | |
3 | ||
4 | #include <asm-generic/4level-fixup.h> | |
5 | ||
6 | /* | |
7 | * This file is subject to the terms and conditions of the GNU General Public | |
8 | * License. See the file "COPYING" in the main directory of this archive | |
9 | * for more details. | |
10 | * | |
11 | * include/asm-sh64/pgtable.h | |
12 | * | |
13 | * Copyright (C) 2000, 2001 Paolo Alberelli | |
14 | * Copyright (C) 2003, 2004 Paul Mundt | |
15 | * Copyright (C) 2003, 2004 Richard Curnow | |
16 | * | |
17 | * This file contains the functions and defines necessary to modify and use | |
18 | * the SuperH page table tree. | |
19 | */ | |
20 | ||
21 | #ifndef __ASSEMBLY__ | |
22 | #include <asm/processor.h> | |
23 | #include <asm/page.h> | |
24 | #include <linux/threads.h> | |
1da177e4 | 25 | |
8c65b4a6 TS |
26 | struct vm_area_struct; |
27 | ||
1da177e4 LT |
28 | extern void paging_init(void); |
29 | ||
30 | /* We provide our own get_unmapped_area to avoid cache synonym issue */ | |
31 | #define HAVE_ARCH_UNMAPPED_AREA | |
32 | ||
33 | /* | |
34 | * Basically we have the same two-level (which is the logical three level | |
35 | * Linux page table layout folded) page tables as the i386. | |
36 | */ | |
37 | ||
38 | /* | |
39 | * ZERO_PAGE is a global shared page that is always zero: used | |
40 | * for zero-mapped memory areas etc.. | |
41 | */ | |
42 | extern unsigned char empty_zero_page[PAGE_SIZE]; | |
43 | #define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page)) | |
44 | ||
45 | #endif /* !__ASSEMBLY__ */ | |
46 | ||
47 | /* | |
48 | * NEFF and NPHYS related defines. | |
49 | * FIXME : These need to be model-dependent. For now this is OK, SH5-101 and SH5-103 | |
50 | * implement 32 bits effective and 32 bits physical. But future implementations may | |
51 | * extend beyond this. | |
52 | */ | |
53 | #define NEFF 32 | |
54 | #define NEFF_SIGN (1LL << (NEFF - 1)) | |
55 | #define NEFF_MASK (-1LL << NEFF) | |
56 | ||
57 | #define NPHYS 32 | |
58 | #define NPHYS_SIGN (1LL << (NPHYS - 1)) | |
59 | #define NPHYS_MASK (-1LL << NPHYS) | |
60 | ||
61 | /* Typically 2-level is sufficient up to 32 bits of virtual address space, beyond | |
62 | that 3-level would be appropriate. */ | |
63 | #if defined(CONFIG_SH64_PGTABLE_2_LEVEL) | |
64 | /* For 4k pages, this contains 512 entries, i.e. 9 bits worth of address. */ | |
65 | #define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long)) | |
66 | #define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */ | |
67 | #define PTE_SHIFT PAGE_SHIFT | |
68 | #define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE) | |
69 | ||
70 | /* top level: PMD. */ | |
71 | #define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) | |
72 | #define PGD_BITS (NEFF - PGDIR_SHIFT) | |
73 | #define PTRS_PER_PGD (1<<PGD_BITS) | |
74 | ||
75 | /* middle level: PMD. This doesn't do anything for the 2-level case. */ | |
76 | #define PTRS_PER_PMD (1) | |
77 | ||
78 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | |
79 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
80 | #define PMD_SHIFT PGDIR_SHIFT | |
81 | #define PMD_SIZE PGDIR_SIZE | |
82 | #define PMD_MASK PGDIR_MASK | |
83 | ||
84 | #elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) | |
85 | /* | |
86 | * three-level asymmetric paging structure: PGD is top level. | |
87 | * The asymmetry comes from 32-bit pointers and 64-bit PTEs. | |
88 | */ | |
89 | /* bottom level: PTE. It's 9 bits = 512 pointers */ | |
90 | #define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long)) | |
91 | #define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */ | |
92 | #define PTE_SHIFT PAGE_SHIFT | |
93 | #define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE) | |
94 | ||
95 | /* middle level: PMD. It's 10 bits = 1024 pointers */ | |
96 | #define PTRS_PER_PMD ((1<<PAGE_SHIFT)/sizeof(unsigned long long *)) | |
97 | #define PMD_MAGNITUDE 2 /* sizeof(unsigned long long *) magnit. */ | |
98 | #define PMD_SHIFT (PTE_SHIFT + PTE_BITS) | |
99 | #define PMD_BITS (PAGE_SHIFT - PMD_MAGNITUDE) | |
100 | ||
101 | /* top level: PMD. It's 1 bit = 2 pointers */ | |
102 | #define PGDIR_SHIFT (PMD_SHIFT + PMD_BITS) | |
103 | #define PGD_BITS (NEFF - PGDIR_SHIFT) | |
104 | #define PTRS_PER_PGD (1<<PGD_BITS) | |
105 | ||
106 | #define PMD_SIZE (1UL << PMD_SHIFT) | |
107 | #define PMD_MASK (~(PMD_SIZE-1)) | |
108 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | |
109 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
110 | ||
111 | #else | |
112 | #error "No defined number of page table levels" | |
113 | #endif | |
114 | ||
115 | /* | |
116 | * Error outputs. | |
117 | */ | |
118 | #define pte_ERROR(e) \ | |
119 | printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e)) | |
120 | #define pmd_ERROR(e) \ | |
121 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) | |
122 | #define pgd_ERROR(e) \ | |
123 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | |
124 | ||
125 | /* | |
126 | * Table setting routines. Used within arch/mm only. | |
127 | */ | |
128 | #define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval) | |
129 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) | |
130 | ||
131 | static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) | |
132 | { | |
133 | unsigned long long x = ((unsigned long long) pteval.pte); | |
134 | unsigned long long *xp = (unsigned long long *) pteptr; | |
135 | /* | |
136 | * Sign-extend based on NPHYS. | |
137 | */ | |
138 | *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x; | |
139 | } | |
140 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | |
141 | ||
142 | static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) | |
143 | { | |
144 | pmd_val(*pmdp) = (unsigned long) ptep; | |
145 | } | |
146 | ||
147 | /* | |
148 | * PGD defines. Top level. | |
149 | */ | |
150 | ||
151 | /* To find an entry in a generic PGD. */ | |
152 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | |
153 | #define __pgd_offset(address) pgd_index(address) | |
154 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | |
155 | ||
156 | /* To find an entry in a kernel PGD. */ | |
157 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
158 | ||
159 | /* | |
160 | * PGD level access routines. | |
161 | * | |
162 | * Note1: | |
163 | * There's no need to use physical addresses since the tree walk is all | |
164 | * in performed in software, until the PTE translation. | |
165 | * | |
166 | * Note 2: | |
167 | * A PGD entry can be uninitialized (_PGD_UNUSED), generically bad, | |
168 | * clear (_PGD_EMPTY), present. When present, lower 3 nibbles contain | |
169 | * _KERNPG_TABLE. Being a kernel virtual pointer also bit 31 must | |
170 | * be 1. Assuming an arbitrary clear value of bit 31 set to 0 and | |
171 | * lower 3 nibbles set to 0xFFF (_PGD_EMPTY) any other value is a | |
172 | * bad pgd that must be notified via printk(). | |
173 | * | |
174 | */ | |
175 | #define _PGD_EMPTY 0x0 | |
176 | ||
177 | #if defined(CONFIG_SH64_PGTABLE_2_LEVEL) | |
178 | static inline int pgd_none(pgd_t pgd) { return 0; } | |
179 | static inline int pgd_bad(pgd_t pgd) { return 0; } | |
180 | #define pgd_present(pgd) ((pgd_val(pgd) & _PAGE_PRESENT) ? 1 : 0) | |
181 | #define pgd_clear(xx) do { } while(0) | |
182 | ||
183 | #elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) | |
184 | #define pgd_present(pgd_entry) (1) | |
185 | #define pgd_none(pgd_entry) (pgd_val((pgd_entry)) == _PGD_EMPTY) | |
186 | /* TODO: Think later about what a useful definition of 'bad' would be now. */ | |
187 | #define pgd_bad(pgd_entry) (0) | |
188 | #define pgd_clear(pgd_entry_p) (set_pgd((pgd_entry_p), __pgd(_PGD_EMPTY))) | |
189 | ||
190 | #endif | |
191 | ||
192 | ||
46a82b2d DM |
193 | #define pgd_page_vaddr(pgd_entry) ((unsigned long) (pgd_val(pgd_entry) & PAGE_MASK)) |
194 | #define pgd_page(pgd) (virt_to_page(pgd_val(pgd))) | |
195 | ||
1da177e4 LT |
196 | |
197 | /* | |
198 | * PMD defines. Middle level. | |
199 | */ | |
200 | ||
201 | /* PGD to PMD dereferencing */ | |
202 | #if defined(CONFIG_SH64_PGTABLE_2_LEVEL) | |
203 | static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) | |
204 | { | |
205 | return (pmd_t *) dir; | |
206 | } | |
207 | #elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) | |
208 | #define __pmd_offset(address) \ | |
209 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | |
210 | #define pmd_offset(dir, addr) \ | |
211 | ((pmd_t *) ((pgd_val(*(dir))) & PAGE_MASK) + __pmd_offset((addr))) | |
212 | #endif | |
213 | ||
214 | /* | |
215 | * PMD level access routines. Same notes as above. | |
216 | */ | |
217 | #define _PMD_EMPTY 0x0 | |
218 | /* Either the PMD is empty or present, it's not paged out */ | |
219 | #define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT) | |
220 | #define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY))) | |
221 | #define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY) | |
222 | #define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) | |
223 | ||
46a82b2d | 224 | #define pmd_page_vaddr(pmd_entry) \ |
1da177e4 LT |
225 | ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK)) |
226 | ||
227 | #define pmd_page(pmd) \ | |
228 | (virt_to_page(pmd_val(pmd))) | |
229 | ||
230 | /* PMD to PTE dereferencing */ | |
231 | #define pte_index(address) \ | |
232 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
233 | ||
234 | #define pte_offset_kernel(dir, addr) \ | |
235 | ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr))) | |
236 | ||
237 | #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) | |
238 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr) | |
239 | #define pte_unmap(pte) do { } while (0) | |
240 | #define pte_unmap_nested(pte) do { } while (0) | |
241 | ||
242 | /* Round it up ! */ | |
243 | #define USER_PTRS_PER_PGD ((TASK_SIZE+PGDIR_SIZE-1)/PGDIR_SIZE) | |
d455a369 | 244 | #define FIRST_USER_ADDRESS 0 |
1da177e4 LT |
245 | |
246 | #ifndef __ASSEMBLY__ | |
247 | #define VMALLOC_END 0xff000000 | |
248 | #define VMALLOC_START 0xf0000000 | |
249 | #define VMALLOC_VMADDR(x) ((unsigned long)(x)) | |
250 | ||
251 | #define IOBASE_VADDR 0xff000000 | |
252 | #define IOBASE_END 0xffffffff | |
253 | ||
254 | /* | |
255 | * PTEL coherent flags. | |
256 | * See Chapter 17 ST50 CPU Core Volume 1, Architecture. | |
257 | */ | |
258 | /* The bits that are required in the SH-5 TLB are placed in the h/w-defined | |
259 | positions, to avoid expensive bit shuffling on every refill. The remaining | |
260 | bits are used for s/w purposes and masked out on each refill. | |
261 | ||
262 | Note, the PTE slots are used to hold data of type swp_entry_t when a page is | |
263 | swapped out. Only the _PAGE_PRESENT flag is significant when the page is | |
264 | swapped out, and it must be placed so that it doesn't overlap either the | |
265 | type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type | |
266 | at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This | |
267 | scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit | |
268 | [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split | |
269 | into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */ | |
270 | #define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */ | |
271 | #define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */ | |
272 | #define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */ | |
273 | #define _PAGE_PRESENT 0x004 /* software: page referenced */ | |
274 | #define _PAGE_FILE 0x004 /* software: only when !present */ | |
275 | #define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */ | |
276 | #define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */ | |
277 | #define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */ | |
278 | #define _PAGE_READ 0x040 /* PR0-bit : read access allowed */ | |
279 | #define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */ | |
280 | #define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */ | |
281 | #define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */ | |
282 | #define _PAGE_DIRTY 0x400 /* software: page accessed in write */ | |
283 | #define _PAGE_ACCESSED 0x800 /* software: page referenced */ | |
284 | ||
285 | /* Mask which drops software flags */ | |
286 | #define _PAGE_FLAGS_HARDWARE_MASK 0xfffffffffffff3dbLL | |
287 | ||
288 | /* | |
289 | * HugeTLB support | |
290 | */ | |
291 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | |
292 | #define _PAGE_SZHUGE (_PAGE_SIZE0) | |
293 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) | |
294 | #define _PAGE_SZHUGE (_PAGE_SIZE1) | |
295 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) | |
296 | #define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1) | |
297 | #endif | |
298 | ||
299 | /* | |
300 | * Default flags for a Kernel page. | |
301 | * This is fundametally also SHARED because the main use of this define | |
302 | * (other than for PGD/PMD entries) is for the VMALLOC pool which is | |
303 | * contextless. | |
304 | * | |
305 | * _PAGE_EXECUTE is required for modules | |
306 | * | |
307 | */ | |
308 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ | |
309 | _PAGE_EXECUTE | \ | |
310 | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \ | |
311 | _PAGE_SHARED) | |
312 | ||
313 | /* Default flags for a User page */ | |
314 | #define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER) | |
315 | ||
316 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | |
317 | ||
318 | #define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED) | |
319 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ | |
320 | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_USER | \ | |
321 | _PAGE_SHARED) | |
322 | /* We need to include PAGE_EXECUTE in PAGE_COPY because it is the default | |
323 | * protection mode for the stack. */ | |
324 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \ | |
325 | _PAGE_ACCESSED | _PAGE_USER | _PAGE_EXECUTE) | |
326 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \ | |
327 | _PAGE_ACCESSED | _PAGE_USER) | |
328 | #define PAGE_KERNEL __pgprot(_KERNPG_TABLE) | |
329 | ||
330 | ||
331 | /* | |
332 | * In ST50 we have full permissions (Read/Write/Execute/Shared). | |
333 | * Just match'em all. These are for mmap(), therefore all at least | |
334 | * User/Cachable/Present/Accessed. No point in making Fault on Write. | |
335 | */ | |
336 | #define __MMAP_COMMON (_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED) | |
337 | /* sxwr */ | |
338 | #define __P000 __pgprot(__MMAP_COMMON) | |
339 | #define __P001 __pgprot(__MMAP_COMMON | _PAGE_READ) | |
340 | #define __P010 __pgprot(__MMAP_COMMON) | |
341 | #define __P011 __pgprot(__MMAP_COMMON | _PAGE_READ) | |
342 | #define __P100 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE) | |
343 | #define __P101 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ) | |
344 | #define __P110 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE) | |
345 | #define __P111 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ) | |
346 | ||
347 | #define __S000 __pgprot(__MMAP_COMMON | _PAGE_SHARED) | |
348 | #define __S001 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ) | |
349 | #define __S010 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_WRITE) | |
350 | #define __S011 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ | _PAGE_WRITE) | |
351 | #define __S100 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE) | |
352 | #define __S101 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ) | |
353 | #define __S110 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_WRITE) | |
354 | #define __S111 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ | _PAGE_WRITE) | |
355 | ||
356 | /* Make it a device mapping for maximum safety (e.g. for mapping device | |
357 | registers into user-space via /dev/map). */ | |
358 | #define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE) | |
359 | #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE) | |
360 | ||
361 | /* | |
362 | * Handling allocation failures during page table setup. | |
363 | */ | |
364 | extern void __handle_bad_pmd_kernel(pmd_t * pmd); | |
365 | #define __handle_bad_pmd(x) __handle_bad_pmd_kernel(x) | |
366 | ||
367 | /* | |
368 | * PTE level access routines. | |
369 | * | |
370 | * Note1: | |
371 | * It's the tree walk leaf. This is physical address to be stored. | |
372 | * | |
373 | * Note 2: | |
374 | * Regarding the choice of _PTE_EMPTY: | |
375 | ||
376 | We must choose a bit pattern that cannot be valid, whether or not the page | |
377 | is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped | |
378 | out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is | |
379 | left for us to select. If we force bit[7]==0 when swapped out, we could use | |
380 | the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if | |
381 | we force bit[7]==1 when swapped out, we can use all zeroes to indicate | |
382 | empty. This is convenient, because the page tables get cleared to zero | |
383 | when they are allocated. | |
384 | ||
385 | */ | |
386 | #define _PTE_EMPTY 0x0 | |
387 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) | |
388 | #define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY))) | |
389 | #define pte_none(x) (pte_val(x) == _PTE_EMPTY) | |
390 | ||
391 | /* | |
392 | * Some definitions to translate between mem_map, PTEs, and page | |
393 | * addresses: | |
394 | */ | |
395 | ||
396 | /* | |
397 | * Given a PTE, return the index of the mem_map[] entry corresponding | |
398 | * to the page frame the PTE. Get the absolute physical address, make | |
399 | * a relative physical address and translate it to an index. | |
400 | */ | |
401 | #define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \ | |
402 | __MEMORY_START) >> PAGE_SHIFT) | |
403 | ||
404 | /* | |
405 | * Given a PTE, return the "struct page *". | |
406 | */ | |
407 | #define pte_page(x) (mem_map + pte_pagenr(x)) | |
408 | ||
409 | /* | |
410 | * Return number of (down rounded) MB corresponding to x pages. | |
411 | */ | |
412 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | |
413 | ||
414 | ||
415 | /* | |
416 | * The following have defined behavior only work if pte_present() is true. | |
417 | */ | |
418 | static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; } | |
419 | static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXECUTE; } | |
420 | static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; } | |
421 | static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; } | |
422 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | |
423 | static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; } | |
424 | ||
ca5ed2f5 AB |
425 | static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_READ)); return pte; } |
426 | static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; } | |
427 | static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE)); return pte; } | |
428 | static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } | |
429 | static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } | |
1da177e4 | 430 | |
ca5ed2f5 AB |
431 | static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_READ)); return pte; } |
432 | static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; } | |
433 | static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); return pte; } | |
434 | static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } | |
435 | static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } | |
436 | static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } | |
63551ae0 | 437 | |
1da177e4 LT |
438 | |
439 | /* | |
440 | * Conversion functions: convert a page and protection to a page entry. | |
441 | * | |
442 | * extern pte_t mk_pte(struct page *page, pgprot_t pgprot) | |
443 | */ | |
444 | #define mk_pte(page,pgprot) \ | |
445 | ({ \ | |
446 | pte_t __pte; \ | |
447 | \ | |
448 | set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \ | |
449 | __MEMORY_START | pgprot_val((pgprot)))); \ | |
450 | __pte; \ | |
451 | }) | |
452 | ||
453 | /* | |
454 | * This takes a (absolute) physical page address that is used | |
455 | * by the remapping functions | |
456 | */ | |
457 | #define mk_pte_phys(physpage, pgprot) \ | |
458 | ({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; }) | |
459 | ||
ca5ed2f5 | 460 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
1da177e4 LT |
461 | { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; } |
462 | ||
1da177e4 LT |
463 | typedef pte_t *pte_addr_t; |
464 | #define pgtable_cache_init() do { } while (0) | |
465 | ||
466 | extern void update_mmu_cache(struct vm_area_struct * vma, | |
467 | unsigned long address, pte_t pte); | |
468 | ||
469 | /* Encode and decode a swap entry */ | |
470 | #define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c)) | |
471 | #define __swp_offset(x) ((x).val >> 8) | |
472 | #define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) }) | |
473 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
474 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | |
475 | ||
476 | /* Encode and decode a nonlinear file mapping entry */ | |
477 | #define PTE_FILE_MAX_BITS 29 | |
478 | #define pte_to_pgoff(pte) (pte_val(pte)) | |
479 | #define pgoff_to_pte(off) ((pte_t) { (off) | _PAGE_FILE }) | |
480 | ||
481 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | |
482 | #define PageSkip(page) (0) | |
483 | #define kern_addr_valid(addr) (1) | |
484 | ||
1da177e4 LT |
485 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
486 | remap_pfn_range(vma, vaddr, pfn, size, prot) | |
487 | ||
1da177e4 LT |
488 | #endif /* !__ASSEMBLY__ */ |
489 | ||
490 | /* | |
491 | * No page table caches to initialise | |
492 | */ | |
493 | #define pgtable_cache_init() do { } while (0) | |
494 | ||
495 | #define pte_pfn(x) (((unsigned long)((x).pte)) >> PAGE_SHIFT) | |
496 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | |
497 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | |
498 | ||
499 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |
500 | ||
501 | #include <asm-generic/pgtable.h> | |
502 | ||
503 | #endif /* __ASM_SH64_PGTABLE_H */ |