sh: uncached mapping helpers.
[linux-2.6-block.git] / arch / sh / include / asm / page.h
CommitLineData
1da177e4
LT
1#ifndef __ASM_SH_PAGE_H
2#define __ASM_SH_PAGE_H
3
4/*
5 * Copyright (C) 1999 Niibe Yutaka
6 */
7
d02b08f6
SM
8#include <linux/const.h>
9
1da177e4 10/* PAGE_SHIFT determines the page size */
21440cf0
PM
11#if defined(CONFIG_PAGE_SIZE_4KB)
12# define PAGE_SHIFT 12
13#elif defined(CONFIG_PAGE_SIZE_8KB)
14# define PAGE_SHIFT 13
66dfe181
PM
15#elif defined(CONFIG_PAGE_SIZE_16KB)
16# define PAGE_SHIFT 14
21440cf0
PM
17#elif defined(CONFIG_PAGE_SIZE_64KB)
18# define PAGE_SHIFT 16
19#else
20# error "Bogus kernel page size?"
21#endif
8c12b5dc 22
d02b08f6 23#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
1da177e4
LT
24#define PAGE_MASK (~(PAGE_SIZE-1))
25#define PTE_MASK PAGE_MASK
26
27#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
28#define HPAGE_SHIFT 16
21440cf0
PM
29#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
30#define HPAGE_SHIFT 18
1da177e4
LT
31#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
32#define HPAGE_SHIFT 20
21440cf0
PM
33#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
34#define HPAGE_SHIFT 22
35#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
36#define HPAGE_SHIFT 26
caff44e7
PM
37#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
38#define HPAGE_SHIFT 29
1da177e4
LT
39#endif
40
41#ifdef CONFIG_HUGETLB_PAGE
42#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
43#define HPAGE_MASK (~(HPAGE_SIZE-1))
44#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
45#endif
46
1da177e4
LT
47#ifndef __ASSEMBLY__
48
f3c25758 49extern unsigned long shm_align_mask;
01066625 50extern unsigned long max_low_pfn, min_low_pfn;
e08f457c 51extern unsigned long memory_start, memory_end;
f3c25758 52
9edef286
PM
53#ifdef CONFIG_UNCACHED_MAPPING
54extern unsigned long uncached_start, uncached_end;
55
56extern int virt_addr_uncached(unsigned long kaddr);
57extern void uncached_init(void);
58#else
59#define virt_addr_uncached(kaddr) (0)
60#define uncached_init() do { } while (0)
61#endif
62
2277ab4a
PM
63static inline unsigned long
64pages_do_alias(unsigned long addr1, unsigned long addr2)
65{
66 return (addr1 ^ addr2) & shm_align_mask;
67}
68
dfff0fa6 69#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
379a95d1 70extern void copy_page(void *to, void *from);
1da177e4 71
dfff0fa6
PM
72struct page;
73struct vm_area_struct;
74
7747b9a4
PM
75extern void copy_user_highpage(struct page *to, struct page *from,
76 unsigned long vaddr, struct vm_area_struct *vma);
77#define __HAVE_ARCH_COPY_USER_HIGHPAGE
dfff0fa6
PM
78extern void clear_user_highpage(struct page *page, unsigned long vaddr);
79#define clear_user_highpage clear_user_highpage
0dfae7d5 80
1da177e4
LT
81/*
82 * These are used to make use of C type-checking..
83 */
21440cf0
PM
84#ifdef CONFIG_X2TLB
85typedef struct { unsigned long pte_low, pte_high; } pte_t;
86typedef struct { unsigned long long pgprot; } pgprot_t;
d04a0f79 87typedef struct { unsigned long long pgd; } pgd_t;
21440cf0
PM
88#define pte_val(x) \
89 ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
90#define __pte(x) \
91 ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
249cfea9 92#elif defined(CONFIG_SUPERH32)
21440cf0 93typedef struct { unsigned long pte_low; } pte_t;
1da177e4 94typedef struct { unsigned long pgprot; } pgprot_t;
d04a0f79 95typedef struct { unsigned long pgd; } pgd_t;
21440cf0 96#define pte_val(x) ((x).pte_low)
249cfea9
PM
97#define __pte(x) ((pte_t) { (x) } )
98#else
99typedef struct { unsigned long long pte_low; } pte_t;
24ef7fc4 100typedef struct { unsigned long long pgprot; } pgprot_t;
249cfea9
PM
101typedef struct { unsigned long pgd; } pgd_t;
102#define pte_val(x) ((x).pte_low)
103#define __pte(x) ((pte_t) { (x) } )
21440cf0
PM
104#endif
105
1da177e4
LT
106#define pgd_val(x) ((x).pgd)
107#define pgprot_val(x) ((x).pgprot)
108
1da177e4
LT
109#define __pgd(x) ((pgd_t) { (x) } )
110#define __pgprot(x) ((pgprot_t) { (x) } )
111
2f569afd
MS
112typedef struct page *pgtable_t;
113
cb700aa4
PM
114#define pte_pgprot(x) __pgprot(pte_val(x) & PTE_FLAGS_MASK)
115
1da177e4
LT
116#endif /* !__ASSEMBLY__ */
117
d02b08f6
SM
118/*
119 * __MEMORY_START and SIZE are the physical addresses and size of RAM.
120 */
1da177e4
LT
121#define __MEMORY_START CONFIG_MEMORY_START
122#define __MEMORY_SIZE CONFIG_MEMORY_SIZE
1da177e4 123
d02b08f6
SM
124/*
125 * PAGE_OFFSET is the virtual address of the start of kernel address
126 * space.
127 */
e7f93a35 128#define PAGE_OFFSET CONFIG_PAGE_OFFSET
1da177e4 129
d02b08f6
SM
130/*
131 * Virtual to physical RAM address translation.
132 *
133 * In 29 bit mode, the physical offset of RAM from address 0 is visible in
134 * the kernel virtual address space, and thus we don't have to take
135 * this into account when translating. However in 32 bit mode this offset
136 * is not visible (it is part of the PMB mapping) and so needs to be
137 * added or subtracted as required.
138 */
1d5cfcdf 139#ifdef CONFIG_PMB
d02b08f6
SM
140#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
141#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
142#else
143#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
144#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
145#endif
146
9edef286
PM
147#ifdef CONFIG_UNCACHED_MAPPING
148#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start)
149#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET)
150#else
151#define UNCAC_ADDR(addr) ((addr))
152#define CAC_ADDR(addr) ((addr))
153#endif
154
d02b08f6 155#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
01066625 156#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
1da177e4 157
d02b08f6
SM
158/*
159 * PFN = physical frame number (ie PFN 0 == physical address 0)
160 * PFN_START is the PFN of the first page of RAM. By defining this we
161 * don't have struct page entries for the portion of address space
162 * between physical address 0 and the start of RAM.
163 */
1da177e4 164#define PFN_START (__MEMORY_START >> PAGE_SHIFT)
67bb2c69 165#define ARCH_PFN_OFFSET (PFN_START)
1da177e4 166#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
5900711a 167#ifdef CONFIG_FLATMEM
01066625 168#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn)
5900711a 169#endif
1da177e4
LT
170#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
171
172#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
173 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
174
104b8dea 175#include <asm-generic/memory_model.h>
5b17e1cd 176#include <asm-generic/getorder.h>
fd4fd5aa 177
19f9a34f
PM
178/* vDSO support */
179#ifdef CONFIG_VSYSCALL
180#define __HAVE_ARCH_GATE_AREA
181#endif
182
cbd2d9d8 183/*
66d485b4
PM
184 * Some drivers need to perform DMA into kmalloc'ed buffers
185 * and so we have to increase the kmalloc minalign for this.
cbd2d9d8 186 */
66d485b4 187#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
01fed931 188
66d485b4 189#ifdef CONFIG_SUPERH64
01fed931 190/*
66d485b4
PM
191 * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
192 * happily generate {ld/st}.q pairs, requiring us to have 8-byte
193 * alignment to avoid traps. The kmalloc alignment is gauranteed by
194 * virtue of L1_CACHE_BYTES, requiring this to only be special cased
195 * for slab caches.
01fed931
PM
196 */
197#define ARCH_SLAB_MINALIGN 8
198#endif
cbd2d9d8 199
1da177e4 200#endif /* __ASM_SH_PAGE_H */