Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 | 2 | * S390 version |
a53c8fab | 3 | * Copyright IBM Corp. 1999, 2000 |
1da177e4 LT |
4 | * Author(s): Hartmut Penner (hp@de.ibm.com) |
5 | */ | |
6 | ||
7 | #ifndef _S390_PAGE_H | |
8 | #define _S390_PAGE_H | |
9 | ||
52480ee5 | 10 | #include <linux/const.h> |
1da177e4 LT |
11 | #include <asm/types.h> |
12 | ||
13 | /* PAGE_SHIFT determines the page size */ | |
14 | #define PAGE_SHIFT 12 | |
52480ee5 | 15 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
1da177e4 | 16 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
0b642ede PO |
17 | #define PAGE_DEFAULT_ACC 0 |
18 | #define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4) | |
1da177e4 | 19 | |
53492b1d GS |
20 | #define HPAGE_SHIFT 20 |
21 | #define HPAGE_SIZE (1UL << HPAGE_SHIFT) | |
22 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | |
23 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | |
24 | ||
25 | #define ARCH_HAS_SETCLEAR_HUGE_PTE | |
26 | #define ARCH_HAS_HUGE_PTE_TYPE | |
27 | #define ARCH_HAS_PREPARE_HUGEPAGE | |
28 | #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH | |
29 | ||
274f5946 | 30 | #include <asm/setup.h> |
1da177e4 LT |
31 | #ifndef __ASSEMBLY__ |
32 | ||
1da177e4 LT |
33 | static inline void clear_page(void *page) |
34 | { | |
53492b1d GS |
35 | if (MACHINE_HAS_PFMF) { |
36 | asm volatile( | |
37 | " .insn rre,0xb9af0000,%0,%1" | |
38 | : : "d" (0x10000), "a" (page) : "memory", "cc"); | |
39 | } else { | |
40 | register unsigned long reg1 asm ("1") = 0; | |
41 | register void *reg2 asm ("2") = page; | |
42 | register unsigned long reg3 asm ("3") = 4096; | |
43 | asm volatile( | |
44 | " mvcl 2,0" | |
45 | : "+d" (reg2), "+d" (reg3) : "d" (reg1) | |
46 | : "memory", "cc"); | |
47 | } | |
1da177e4 LT |
48 | } |
49 | ||
50 | static inline void copy_page(void *to, void *from) | |
51 | { | |
94c12cc7 MS |
52 | if (MACHINE_HAS_MVPG) { |
53 | register unsigned long reg0 asm ("0") = 0; | |
54 | asm volatile( | |
55 | " mvpg %0,%1" | |
56 | : : "a" (to), "a" (from), "d" (reg0) | |
57 | : "memory", "cc"); | |
58 | } else | |
59 | asm volatile( | |
60 | " mvc 0(256,%0),0(%1)\n" | |
61 | " mvc 256(256,%0),256(%1)\n" | |
62 | " mvc 512(256,%0),512(%1)\n" | |
63 | " mvc 768(256,%0),768(%1)\n" | |
64 | " mvc 1024(256,%0),1024(%1)\n" | |
65 | " mvc 1280(256,%0),1280(%1)\n" | |
66 | " mvc 1536(256,%0),1536(%1)\n" | |
67 | " mvc 1792(256,%0),1792(%1)\n" | |
68 | " mvc 2048(256,%0),2048(%1)\n" | |
69 | " mvc 2304(256,%0),2304(%1)\n" | |
70 | " mvc 2560(256,%0),2560(%1)\n" | |
71 | " mvc 2816(256,%0),2816(%1)\n" | |
72 | " mvc 3072(256,%0),3072(%1)\n" | |
73 | " mvc 3328(256,%0),3328(%1)\n" | |
74 | " mvc 3584(256,%0),3584(%1)\n" | |
75 | " mvc 3840(256,%0),3840(%1)\n" | |
76 | : : "a" (to), "a" (from) : "memory"); | |
1da177e4 LT |
77 | } |
78 | ||
1da177e4 LT |
79 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
80 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | |
81 | ||
769848c0 MG |
82 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ |
83 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) | |
1da177e4 LT |
84 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
85 | ||
1da177e4 LT |
86 | /* |
87 | * These are used to make use of C type-checking.. | |
88 | */ | |
89 | ||
90 | typedef struct { unsigned long pgprot; } pgprot_t; | |
b2fa47e6 | 91 | typedef struct { unsigned long pgste; } pgste_t; |
1da177e4 | 92 | typedef struct { unsigned long pte; } pte_t; |
1da177e4 | 93 | typedef struct { unsigned long pmd; } pmd_t; |
190a1d72 | 94 | typedef struct { unsigned long pud; } pud_t; |
1da177e4 | 95 | typedef struct { unsigned long pgd; } pgd_t; |
146e4b3c | 96 | typedef pte_t *pgtable_t; |
1da177e4 | 97 | |
146e4b3c | 98 | #define pgprot_val(x) ((x).pgprot) |
b2fa47e6 | 99 | #define pgste_val(x) ((x).pgste) |
146e4b3c MS |
100 | #define pte_val(x) ((x).pte) |
101 | #define pmd_val(x) ((x).pmd) | |
190a1d72 | 102 | #define pud_val(x) ((x).pud) |
1da177e4 LT |
103 | #define pgd_val(x) ((x).pgd) |
104 | ||
b2fa47e6 | 105 | #define __pgste(x) ((pgste_t) { (x) } ) |
1da177e4 LT |
106 | #define __pte(x) ((pte_t) { (x) } ) |
107 | #define __pmd(x) ((pmd_t) { (x) } ) | |
b2fa47e6 | 108 | #define __pud(x) ((pud_t) { (x) } ) |
1da177e4 LT |
109 | #define __pgd(x) ((pgd_t) { (x) } ) |
110 | #define __pgprot(x) ((pgprot_t) { (x) } ) | |
111 | ||
2d42552d MS |
112 | static inline void page_set_storage_key(unsigned long addr, |
113 | unsigned char skey, int mapped) | |
1da177e4 | 114 | { |
e2b8d7af MS |
115 | if (!mapped) |
116 | asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0" | |
117 | : : "d" (skey), "a" (addr)); | |
118 | else | |
119 | asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); | |
1da177e4 LT |
120 | } |
121 | ||
2d42552d | 122 | static inline unsigned char page_get_storage_key(unsigned long addr) |
1da177e4 | 123 | { |
2d42552d | 124 | unsigned char skey; |
1da177e4 | 125 | |
2d42552d | 126 | asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr)); |
1da177e4 LT |
127 | return skey; |
128 | } | |
129 | ||
2d42552d MS |
130 | static inline int page_reset_referenced(unsigned long addr) |
131 | { | |
132 | unsigned int ipm; | |
133 | ||
134 | asm volatile( | |
135 | " rrbe 0,%1\n" | |
136 | " ipm %0\n" | |
137 | : "=d" (ipm) : "a" (addr) : "cc"); | |
138 | return !!(ipm & 0x20000000); | |
139 | } | |
140 | ||
141 | /* Bits int the storage key */ | |
142 | #define _PAGE_CHANGED 0x02 /* HW changed bit */ | |
143 | #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ | |
144 | #define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */ | |
145 | #define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ | |
146 | ||
147 | /* | |
148 | * Test and clear dirty bit in storage key. | |
149 | * We can't clear the changed bit atomically. This is a potential | |
150 | * race against modification of the referenced bit. This function | |
151 | * should therefore only be called if it is not mapped in any | |
152 | * address space. | |
153 | */ | |
154 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY | |
155 | static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped) | |
156 | { | |
157 | unsigned char skey; | |
158 | ||
159 | skey = page_get_storage_key(pfn << PAGE_SHIFT); | |
160 | if (!(skey & _PAGE_CHANGED)) | |
161 | return 0; | |
162 | page_set_storage_key(pfn << PAGE_SHIFT, skey & ~_PAGE_CHANGED, mapped); | |
163 | return 1; | |
164 | } | |
165 | ||
166 | /* | |
167 | * Test and clear referenced bit in storage key. | |
168 | */ | |
169 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | |
170 | static inline int page_test_and_clear_young(unsigned long pfn) | |
171 | { | |
172 | return page_reset_referenced(pfn << PAGE_SHIFT); | |
173 | } | |
174 | ||
45e576b1 MS |
175 | struct page; |
176 | void arch_free_page(struct page *page, int order); | |
177 | void arch_alloc_page(struct page *page, int order); | |
638ad34a | 178 | void arch_set_page_states(int make_stable); |
45e576b1 | 179 | |
ec6743bb HB |
180 | static inline int devmem_is_allowed(unsigned long pfn) |
181 | { | |
182 | return 0; | |
183 | } | |
184 | ||
45e576b1 MS |
185 | #define HAVE_ARCH_FREE_PAGE |
186 | #define HAVE_ARCH_ALLOC_PAGE | |
187 | ||
1da177e4 LT |
188 | #endif /* !__ASSEMBLY__ */ |
189 | ||
1da177e4 LT |
190 | #define __PAGE_OFFSET 0x0UL |
191 | #define PAGE_OFFSET 0x0UL | |
192 | #define __pa(x) (unsigned long)(x) | |
193 | #define __va(x) (void *)(unsigned long)(x) | |
1da177e4 | 194 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
0b2b6e1d | 195 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
1da177e4 LT |
196 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
197 | ||
146e4b3c | 198 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ |
1da177e4 LT |
199 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
200 | ||
aed63043 | 201 | #include <asm-generic/memory_model.h> |
5b17e1cd | 202 | #include <asm-generic/getorder.h> |
fd4fd5aa | 203 | |
b020632e MS |
204 | #define __HAVE_ARCH_GATE_AREA 1 |
205 | ||
1da177e4 | 206 | #endif /* _S390_PAGE_H */ |