Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 | 2 | /* |
1da177e4 | 3 | * S390 version |
a53c8fab | 4 | * Copyright IBM Corp. 1999, 2000 |
1da177e4 LT |
5 | * Author(s): Hartmut Penner (hp@de.ibm.com) |
6 | * Ulrich Weigand (weigand@de.ibm.com) | |
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | |
8 | * | |
9 | * Derived from "include/asm-i386/pgtable.h" | |
10 | */ | |
11 | ||
12 | #ifndef _ASM_S390_PGTABLE_H | |
13 | #define _ASM_S390_PGTABLE_H | |
14 | ||
9789db08 | 15 | #include <linux/sched.h> |
2dcea57a | 16 | #include <linux/mm_types.h> |
15a36036 | 17 | #include <linux/cpufeature.h> |
abf09bed | 18 | #include <linux/page-flags.h> |
527e30b4 | 19 | #include <linux/radix-tree.h> |
37cd944c | 20 | #include <linux/atomic.h> |
527618ab | 21 | #include <asm/ctlreg.h> |
1da177e4 | 22 | #include <asm/bug.h> |
b2fa47e6 | 23 | #include <asm/page.h> |
214d9bbc | 24 | #include <asm/uv.h> |
1da177e4 | 25 | |
0ccb32c9 | 26 | extern pgd_t swapper_pg_dir[]; |
bb1520d5 | 27 | extern pgd_t invalid_pg_dir[]; |
1da177e4 | 28 | extern void paging_init(void); |
527618ab | 29 | extern struct ctlreg s390_invalid_asce; |
1da177e4 | 30 | |
37cd944c HC |
31 | enum { |
32 | PG_DIRECT_MAP_4K = 0, | |
33 | PG_DIRECT_MAP_1M, | |
34 | PG_DIRECT_MAP_2G, | |
35 | PG_DIRECT_MAP_MAX | |
36 | }; | |
37 | ||
912a0d35 | 38 | extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX]; |
37cd944c HC |
39 | |
40 | static inline void update_page_count(int level, long count) | |
41 | { | |
42 | if (IS_ENABLED(CONFIG_PROC_FS)) | |
43 | atomic_long_add(count, &direct_pages_count[level]); | |
44 | } | |
45 | ||
1da177e4 LT |
46 | /* |
47 | * The S390 doesn't have any external MMU info: the kernel page | |
48 | * tables contain all the necessary information. | |
49 | */ | |
4b3073e1 | 50 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
843f9310 | 51 | #define update_mmu_cache_range(vmf, vma, addr, ptep, nr) do { } while (0) |
b113da65 | 52 | #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) |
1da177e4 LT |
53 | |
54 | /* | |
238ec4ef | 55 | * ZERO_PAGE is a global shared page that is always zero; used |
1da177e4 LT |
56 | * for zero-mapped memory areas etc.. |
57 | */ | |
238ec4ef MS |
58 | |
59 | extern unsigned long empty_zero_page; | |
60 | extern unsigned long zero_page_mask; | |
61 | ||
62 | #define ZERO_PAGE(vaddr) \ | |
63 | (virt_to_page((void *)(empty_zero_page + \ | |
64 | (((unsigned long)(vaddr)) &zero_page_mask)))) | |
816422ad | 65 | #define __HAVE_COLOR_ZERO_PAGE |
238ec4ef | 66 | |
4f2e2903 | 67 | /* TODO: s390 cannot support io_remap_pfn_range... */ |
1da177e4 | 68 | |
1da177e4 | 69 | #define pte_ERROR(e) \ |
bb50655b | 70 | pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) |
1da177e4 | 71 | #define pmd_ERROR(e) \ |
bb50655b | 72 | pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) |
190a1d72 | 73 | #define pud_ERROR(e) \ |
bb50655b | 74 | pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) |
1aea9b3f | 75 | #define p4d_ERROR(e) \ |
bb50655b | 76 | pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e)) |
1da177e4 | 77 | #define pgd_ERROR(e) \ |
bb50655b | 78 | pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) |
1da177e4 | 79 | |
1da177e4 | 80 | /* |
a1c843b8 | 81 | * The vmalloc and module area will always be on the topmost area of the |
fc67c880 HC |
82 | * kernel mapping. 512GB are reserved for vmalloc by default. |
83 | * At the top of the vmalloc area a 2GB area is reserved where modules | |
84 | * will reside. That makes sure that inter module branches always | |
85 | * happen without trampolines and in addition the placement within a | |
86 | * 2GB frame is branch prediction unit friendly. | |
8b62bc96 | 87 | */ |
912a0d35 VG |
88 | extern unsigned long VMALLOC_START; |
89 | extern unsigned long VMALLOC_END; | |
fc67c880 | 90 | #define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN) |
912a0d35 VG |
91 | extern struct page *vmemmap; |
92 | extern unsigned long vmemmap_size; | |
239a6425 | 93 | |
912a0d35 VG |
94 | extern unsigned long MODULES_VADDR; |
95 | extern unsigned long MODULES_END; | |
c972cc60 HC |
96 | #define MODULES_VADDR MODULES_VADDR |
97 | #define MODULES_END MODULES_END | |
98 | #define MODULES_LEN (1UL << 31) | |
c972cc60 | 99 | |
c933146a HC |
100 | static inline int is_module_addr(void *addr) |
101 | { | |
c933146a HC |
102 | BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); |
103 | if (addr < (void *)MODULES_VADDR) | |
104 | return 0; | |
105 | if (addr > (void *)MODULES_END) | |
106 | return 0; | |
c933146a HC |
107 | return 1; |
108 | } | |
109 | ||
65ca73f9 IL |
110 | #ifdef CONFIG_KMSAN |
111 | #define KMSAN_VMALLOC_SIZE (VMALLOC_END - VMALLOC_START) | |
112 | #define KMSAN_VMALLOC_SHADOW_START VMALLOC_END | |
113 | #define KMSAN_VMALLOC_SHADOW_END (KMSAN_VMALLOC_SHADOW_START + KMSAN_VMALLOC_SIZE) | |
114 | #define KMSAN_VMALLOC_ORIGIN_START KMSAN_VMALLOC_SHADOW_END | |
115 | #define KMSAN_VMALLOC_ORIGIN_END (KMSAN_VMALLOC_ORIGIN_START + KMSAN_VMALLOC_SIZE) | |
116 | #define KMSAN_MODULES_SHADOW_START KMSAN_VMALLOC_ORIGIN_END | |
117 | #define KMSAN_MODULES_SHADOW_END (KMSAN_MODULES_SHADOW_START + MODULES_LEN) | |
118 | #define KMSAN_MODULES_ORIGIN_START KMSAN_MODULES_SHADOW_END | |
119 | #define KMSAN_MODULES_ORIGIN_END (KMSAN_MODULES_ORIGIN_START + MODULES_LEN) | |
120 | #endif | |
121 | ||
c98d2eca AG |
122 | #ifdef CONFIG_RANDOMIZE_BASE |
123 | #define KASLR_LEN (1UL << 31) | |
124 | #else | |
125 | #define KASLR_LEN 0UL | |
126 | #endif | |
127 | ||
f8107a8b HC |
128 | void setup_protection_map(void); |
129 | ||
1da177e4 | 130 | /* |
1da177e4 | 131 | * A 64 bit pagetable entry of S390 has following format: |
6a985c61 | 132 | * | PFRA |0IPC| OS | |
1da177e4 LT |
133 | * 0000000000111111111122222222223333333333444444444455555555556666 |
134 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
135 | * | |
136 | * I Page-Invalid Bit: Page is not available for address-translation | |
137 | * P Page-Protection Bit: Store access not possible for page | |
6a985c61 | 138 | * C Change-bit override: HW is not required to set change bit |
1da177e4 LT |
139 | * |
140 | * A 64 bit segmenttable entry of S390 has following format: | |
141 | * | P-table origin | TT | |
142 | * 0000000000111111111122222222223333333333444444444455555555556666 | |
143 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
144 | * | |
145 | * I Segment-Invalid Bit: Segment is not available for address-translation | |
146 | * C Common-Segment Bit: Segment is not private (PoP 3-30) | |
147 | * P Page-Protection Bit: Store access not possible for page | |
148 | * TT Type 00 | |
149 | * | |
150 | * A 64 bit region table entry of S390 has following format: | |
151 | * | S-table origin | TF TTTL | |
152 | * 0000000000111111111122222222223333333333444444444455555555556666 | |
153 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
154 | * | |
155 | * I Segment-Invalid Bit: Segment is not available for address-translation | |
156 | * TT Type 01 | |
157 | * TF | |
190a1d72 | 158 | * TL Table length |
1da177e4 LT |
159 | * |
160 | * The 64 bit regiontable origin of S390 has following format: | |
161 | * | region table origon | DTTL | |
162 | * 0000000000111111111122222222223333333333444444444455555555556666 | |
163 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
164 | * | |
165 | * X Space-Switch event: | |
166 | * G Segment-Invalid Bit: | |
167 | * P Private-Space Bit: | |
168 | * S Storage-Alteration: | |
169 | * R Real space | |
170 | * TL Table-Length: | |
171 | * | |
172 | * A storage key has the following format: | |
173 | * | ACC |F|R|C|0| | |
174 | * 0 3 4 5 6 7 | |
175 | * ACC: access key | |
176 | * F : fetch protection bit | |
177 | * R : referenced bit | |
178 | * C : changed bit | |
179 | */ | |
180 | ||
181 | /* Hardware bits in the page table entry */ | |
57d7f939 | 182 | #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */ |
e5098611 | 183 | #define _PAGE_PROTECT 0x200 /* HW read-only bit */ |
83377484 | 184 | #define _PAGE_INVALID 0x400 /* HW invalid bit */ |
e5098611 | 185 | #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ |
3610cce8 MS |
186 | |
187 | /* Software bits in the page table entry */ | |
e5098611 | 188 | #define _PAGE_PRESENT 0x001 /* SW pte present bit */ |
e5098611 MS |
189 | #define _PAGE_YOUNG 0x004 /* SW pte young bit */ |
190 | #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ | |
0944fe3f MS |
191 | #define _PAGE_READ 0x010 /* SW pte read bit */ |
192 | #define _PAGE_WRITE 0x020 /* SW pte write bit */ | |
193 | #define _PAGE_SPECIAL 0x040 /* SW associated with special page */ | |
b31288fa | 194 | #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ |
1da177e4 | 195 | |
5614dd92 MS |
196 | #ifdef CONFIG_MEM_SOFT_DIRTY |
197 | #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */ | |
198 | #else | |
199 | #define _PAGE_SOFT_DIRTY 0x000 | |
200 | #endif | |
201 | ||
0807b856 GS |
202 | #define _PAGE_SW_BITS 0xffUL /* All SW bits */ |
203 | ||
92cd58bd DH |
204 | #define _PAGE_SWP_EXCLUSIVE _PAGE_LARGE /* SW pte exclusive swap bit */ |
205 | ||
138c9021 | 206 | /* Set of bits not changed in pte_modify */ |
6a5c1482 | 207 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \ |
5614dd92 | 208 | _PAGE_YOUNG | _PAGE_SOFT_DIRTY) |
53492b1d | 209 | |
0807b856 GS |
210 | /* |
211 | * Mask of bits that must not be changed with RDP. Allow only _PAGE_PROTECT | |
212 | * HW bit and all SW bits. | |
213 | */ | |
214 | #define _PAGE_RDP_MASK ~(_PAGE_PROTECT | _PAGE_SW_BITS) | |
215 | ||
83377484 | 216 | /* |
6e76d4b2 KS |
217 | * handle_pte_fault uses pte_present and pte_none to find out the pte type |
218 | * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to | |
219 | * distinguish present from not-present ptes. It is changed only with the page | |
220 | * table lock held. | |
83377484 | 221 | * |
e5098611 | 222 | * The following table gives the different possible bit combinations for |
a1c843b8 MS |
223 | * the pte hardware and software bits in the last 12 bits of a pte |
224 | * (. unassigned bit, x don't care, t swap type): | |
83377484 | 225 | * |
0944fe3f MS |
226 | * 842100000000 |
227 | * 000084210000 | |
228 | * 000000008421 | |
a1c843b8 MS |
229 | * .IR.uswrdy.p |
230 | * empty .10.00000000 | |
231 | * swap .11..ttttt.0 | |
232 | * prot-none, clean, old .11.xx0000.1 | |
233 | * prot-none, clean, young .11.xx0001.1 | |
bc29b7ac GS |
234 | * prot-none, dirty, old .11.xx0010.1 |
235 | * prot-none, dirty, young .11.xx0011.1 | |
a1c843b8 MS |
236 | * read-only, clean, old .11.xx0100.1 |
237 | * read-only, clean, young .01.xx0101.1 | |
238 | * read-only, dirty, old .11.xx0110.1 | |
239 | * read-only, dirty, young .01.xx0111.1 | |
240 | * read-write, clean, old .11.xx1100.1 | |
241 | * read-write, clean, young .01.xx1101.1 | |
242 | * read-write, dirty, old .10.xx1110.1 | |
243 | * read-write, dirty, young .00.xx1111.1 | |
244 | * HW-bits: R read-only, I invalid | |
245 | * SW-bits: p present, y young, d dirty, r read, w write, s special, | |
246 | * u unused, l large | |
e5098611 | 247 | * |
a1c843b8 MS |
248 | * pte_none is true for the bit pattern .10.00000000, pte == 0x400 |
249 | * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200 | |
250 | * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001 | |
83377484 MS |
251 | */ |
252 | ||
3610cce8 | 253 | /* Bits in the segment/region table address-space-control-element */ |
8457d775 | 254 | #define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */ |
3610cce8 MS |
255 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ |
256 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ | |
257 | #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ | |
258 | #define _ASCE_REAL_SPACE 0x20 /* real space control */ | |
259 | #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ | |
260 | #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ | |
261 | #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ | |
262 | #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ | |
263 | #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ | |
264 | #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ | |
265 | ||
266 | /* Bits in the region table entry */ | |
267 | #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ | |
e5098611 | 268 | #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */ |
57d7f939 | 269 | #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */ |
4be130a0 | 270 | #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */ |
e5098611 | 271 | #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */ |
c9f62152 | 272 | #define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */ |
3610cce8 MS |
273 | #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ |
274 | #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ | |
275 | #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ | |
276 | #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ | |
277 | ||
278 | #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) | |
e5098611 | 279 | #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID) |
3610cce8 | 280 | #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) |
e5098611 | 281 | #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID) |
03e6db16 GS |
282 | #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH | \ |
283 | _REGION3_ENTRY_PRESENT) | |
e5098611 | 284 | #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) |
3610cce8 | 285 | |
712c5d5f CI |
286 | #define _REGION3_ENTRY_HARDWARE_BITS 0xfffffffffffff6ffUL |
287 | #define _REGION3_ENTRY_HARDWARE_BITS_LARGE 0xffffffff8001073cUL | |
9e20b4da | 288 | #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */ |
2dffdcba HC |
289 | #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */ |
290 | #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */ | |
f934f6be | 291 | #define _REGION3_ENTRY_COMM 0x0010 /* Common-Region, marks swap entry */ |
2dffdcba | 292 | #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */ |
ae1b9fb2 GS |
293 | #define _REGION3_ENTRY_WRITE 0x8000 /* SW region write bit */ |
294 | #define _REGION3_ENTRY_READ 0x4000 /* SW region read bit */ | |
2dffdcba HC |
295 | |
296 | #ifdef CONFIG_MEM_SOFT_DIRTY | |
ae1b9fb2 | 297 | #define _REGION3_ENTRY_SOFT_DIRTY 0x0002 /* SW region soft dirty bit */ |
2dffdcba HC |
298 | #else |
299 | #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */ | |
300 | #endif | |
301 | ||
1aea9b3f | 302 | #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL |
d08de8e2 | 303 | |
03e6db16 GS |
304 | /* |
305 | * SW region present bit. For non-leaf region-third-table entries, bits 62-63 | |
306 | * indicate the TABLE LENGTH and both must be set to 1. But such entries | |
307 | * would always be considered as present, so it is safe to use bit 63 as | |
308 | * PRESENT bit for PUD. | |
309 | */ | |
310 | #define _REGION3_ENTRY_PRESENT 0x0001 | |
311 | ||
1da177e4 | 312 | /* Bits in the segment table entry */ |
712c5d5f CI |
313 | #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe3fUL |
314 | #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe3cUL | |
315 | #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff1073cUL | |
ea81531d | 316 | #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ |
8457d775 HC |
317 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */ |
318 | #define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */ | |
319 | #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */ | |
e5098611 | 320 | #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ |
c9f62152 | 321 | #define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */ |
1da177e4 | 322 | |
03e6db16 | 323 | #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PRESENT) |
e5098611 | 324 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) |
3610cce8 | 325 | |
152125b7 MS |
326 | #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */ |
327 | #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ | |
ae1b9fb2 | 328 | |
f934f6be | 329 | #define _SEGMENT_ENTRY_COMM 0x0010 /* Common-Segment, marks swap entry */ |
152125b7 | 330 | #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ |
ae1b9fb2 GS |
331 | #define _SEGMENT_ENTRY_WRITE 0x8000 /* SW segment write bit */ |
332 | #define _SEGMENT_ENTRY_READ 0x4000 /* SW segment read bit */ | |
0944fe3f | 333 | |
5614dd92 | 334 | #ifdef CONFIG_MEM_SOFT_DIRTY |
ae1b9fb2 | 335 | #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0002 /* SW segment soft dirty bit */ |
5614dd92 MS |
336 | #else |
337 | #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */ | |
338 | #endif | |
339 | ||
03e6db16 GS |
340 | #define _SEGMENT_ENTRY_PRESENT 0x0001 /* SW segment present bit */ |
341 | ||
f934f6be GS |
342 | /* Common bits in region and segment table entries, for swap entries */ |
343 | #define _RST_ENTRY_COMM 0x0010 /* Common-Region/Segment, marks swap entry */ | |
344 | #define _RST_ENTRY_INVALID 0x0020 /* invalid region/segment table entry */ | |
345 | ||
c67da7c7 HC |
346 | #define _CRST_ENTRIES 2048 /* number of region/segment table entries */ |
347 | #define _PAGE_ENTRIES 256 /* number of page table entries */ | |
348 | ||
349 | #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8) | |
350 | #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8) | |
351 | ||
352 | #define _REGION1_SHIFT 53 | |
353 | #define _REGION2_SHIFT 42 | |
354 | #define _REGION3_SHIFT 31 | |
355 | #define _SEGMENT_SHIFT 20 | |
356 | ||
357 | #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT) | |
358 | #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT) | |
359 | #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT) | |
360 | #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT) | |
6febe0ef | 361 | #define _PAGE_INDEX (0xffUL << PAGE_SHIFT) |
c67da7c7 HC |
362 | |
363 | #define _REGION1_SIZE (1UL << _REGION1_SHIFT) | |
364 | #define _REGION2_SIZE (1UL << _REGION2_SHIFT) | |
365 | #define _REGION3_SIZE (1UL << _REGION3_SHIFT) | |
366 | #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT) | |
367 | ||
368 | #define _REGION1_MASK (~(_REGION1_SIZE - 1)) | |
369 | #define _REGION2_MASK (~(_REGION2_SIZE - 1)) | |
370 | #define _REGION3_MASK (~(_REGION3_SIZE - 1)) | |
371 | #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1)) | |
372 | ||
373 | #define PMD_SHIFT _SEGMENT_SHIFT | |
374 | #define PUD_SHIFT _REGION3_SHIFT | |
375 | #define P4D_SHIFT _REGION2_SHIFT | |
376 | #define PGDIR_SHIFT _REGION1_SHIFT | |
377 | ||
378 | #define PMD_SIZE _SEGMENT_SIZE | |
379 | #define PUD_SIZE _REGION3_SIZE | |
380 | #define P4D_SIZE _REGION2_SIZE | |
381 | #define PGDIR_SIZE _REGION1_SIZE | |
382 | ||
383 | #define PMD_MASK _SEGMENT_MASK | |
384 | #define PUD_MASK _REGION3_MASK | |
385 | #define P4D_MASK _REGION2_MASK | |
386 | #define PGDIR_MASK _REGION1_MASK | |
387 | ||
388 | #define PTRS_PER_PTE _PAGE_ENTRIES | |
389 | #define PTRS_PER_PMD _CRST_ENTRIES | |
390 | #define PTRS_PER_PUD _CRST_ENTRIES | |
391 | #define PTRS_PER_P4D _CRST_ENTRIES | |
392 | #define PTRS_PER_PGD _CRST_ENTRIES | |
393 | ||
0944fe3f | 394 | /* |
2dffdcba HC |
395 | * Segment table and region3 table entry encoding |
396 | * (R = read-only, I = invalid, y = young bit): | |
bc29b7ac | 397 | * dy..R...I...wr |
152125b7 MS |
398 | * prot-none, clean, old 00..1...1...00 |
399 | * prot-none, clean, young 01..1...1...00 | |
400 | * prot-none, dirty, old 10..1...1...00 | |
401 | * prot-none, dirty, young 11..1...1...00 | |
bc29b7ac GS |
402 | * read-only, clean, old 00..1...1...01 |
403 | * read-only, clean, young 01..1...0...01 | |
404 | * read-only, dirty, old 10..1...1...01 | |
405 | * read-only, dirty, young 11..1...0...01 | |
152125b7 MS |
406 | * read-write, clean, old 00..1...1...11 |
407 | * read-write, clean, young 01..1...0...11 | |
408 | * read-write, dirty, old 10..0...1...11 | |
409 | * read-write, dirty, young 11..0...0...11 | |
0944fe3f MS |
410 | * The segment table origin is used to distinguish empty (origin==0) from |
411 | * read-write, old segment table entries (origin!=0) | |
a1c843b8 MS |
412 | * HW-bits: R read-only, I invalid |
413 | * SW-bits: y young, d dirty, r read, w write | |
0944fe3f | 414 | */ |
e5098611 | 415 | |
6c61cfe9 | 416 | /* Page status table bits for virtualization */ |
0d0dafc1 MS |
417 | #define PGSTE_ACC_BITS 0xf000000000000000UL |
418 | #define PGSTE_FP_BIT 0x0800000000000000UL | |
419 | #define PGSTE_PCL_BIT 0x0080000000000000UL | |
420 | #define PGSTE_HR_BIT 0x0040000000000000UL | |
421 | #define PGSTE_HC_BIT 0x0020000000000000UL | |
422 | #define PGSTE_GR_BIT 0x0004000000000000UL | |
423 | #define PGSTE_GC_BIT 0x0002000000000000UL | |
84b73876 | 424 | #define PGSTE_ST2_MASK 0x0000ffff00000000UL |
1f438993 CI |
425 | #define PGSTE_UC_BIT 0x0000000000008000UL /* user dirty (migration) */ |
426 | #define PGSTE_IN_BIT 0x0000000000004000UL /* IPTE notify bit */ | |
427 | #define PGSTE_VSIE_BIT 0x0000000000002000UL /* ref'd in a shadow table */ | |
6c61cfe9 | 428 | |
b31288fa | 429 | /* Guest Page State used for virtualization */ |
2d42f947 | 430 | #define _PGSTE_GPS_ZERO 0x0000000080000000UL |
cd774b90 | 431 | #define _PGSTE_GPS_NODAT 0x0000000040000000UL |
2d42f947 CI |
432 | #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL |
433 | #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL | |
434 | #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL | |
435 | #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL | |
436 | #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK | |
b31288fa | 437 | |
1da177e4 | 438 | /* |
3610cce8 MS |
439 | * A user page table pointer has the space-switch-event bit, the |
440 | * private-space-control bit and the storage-alteration-event-control | |
441 | * bit set. A kernel page table pointer doesn't need them. | |
1da177e4 | 442 | */ |
3610cce8 MS |
443 | #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ |
444 | _ASCE_ALT_EVENT) | |
1da177e4 | 445 | |
1da177e4 | 446 | /* |
9282ed92 | 447 | * Page protection definitions. |
1da177e4 | 448 | */ |
f8107a8b HC |
449 | #define __PAGE_NONE (_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT) |
450 | #define __PAGE_RO (_PAGE_PRESENT | _PAGE_READ | \ | |
57d7f939 | 451 | _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) |
f8107a8b | 452 | #define __PAGE_RX (_PAGE_PRESENT | _PAGE_READ | \ |
0944fe3f | 453 | _PAGE_INVALID | _PAGE_PROTECT) |
f8107a8b | 454 | #define __PAGE_RW (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
57d7f939 | 455 | _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) |
f8107a8b | 456 | #define __PAGE_RWX (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
0944fe3f | 457 | _PAGE_INVALID | _PAGE_PROTECT) |
f8107a8b | 458 | #define __PAGE_SHARED (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
57d7f939 | 459 | _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) |
f8107a8b | 460 | #define __PAGE_KERNEL (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
57d7f939 | 461 | _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) |
f8107a8b | 462 | #define __PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \ |
57d7f939 | 463 | _PAGE_PROTECT | _PAGE_NOEXEC) |
1da177e4 | 464 | |
f8107a8b HC |
465 | extern unsigned long page_noexec_mask; |
466 | ||
467 | #define __pgprot_page_mask(x) __pgprot((x) & page_noexec_mask) | |
468 | ||
469 | #define PAGE_NONE __pgprot_page_mask(__PAGE_NONE) | |
470 | #define PAGE_RO __pgprot_page_mask(__PAGE_RO) | |
471 | #define PAGE_RX __pgprot_page_mask(__PAGE_RX) | |
472 | #define PAGE_RW __pgprot_page_mask(__PAGE_RW) | |
473 | #define PAGE_RWX __pgprot_page_mask(__PAGE_RWX) | |
474 | #define PAGE_SHARED __pgprot_page_mask(__PAGE_SHARED) | |
475 | #define PAGE_KERNEL __pgprot_page_mask(__PAGE_KERNEL) | |
476 | #define PAGE_KERNEL_RO __pgprot_page_mask(__PAGE_KERNEL_RO) | |
477 | ||
106c992a GS |
478 | /* |
479 | * Segment entry (large page) protection definitions. | |
480 | */ | |
f8107a8b | 481 | #define __SEGMENT_NONE (_SEGMENT_ENTRY_PRESENT | \ |
03e6db16 | 482 | _SEGMENT_ENTRY_INVALID | \ |
e5098611 | 483 | _SEGMENT_ENTRY_PROTECT) |
f8107a8b | 484 | #define __SEGMENT_RO (_SEGMENT_ENTRY_PRESENT | \ |
03e6db16 | 485 | _SEGMENT_ENTRY_PROTECT | \ |
57d7f939 MS |
486 | _SEGMENT_ENTRY_READ | \ |
487 | _SEGMENT_ENTRY_NOEXEC) | |
f8107a8b | 488 | #define __SEGMENT_RX (_SEGMENT_ENTRY_PRESENT | \ |
03e6db16 | 489 | _SEGMENT_ENTRY_PROTECT | \ |
152125b7 | 490 | _SEGMENT_ENTRY_READ) |
f8107a8b | 491 | #define __SEGMENT_RW (_SEGMENT_ENTRY_PRESENT | \ |
03e6db16 | 492 | _SEGMENT_ENTRY_READ | \ |
57d7f939 MS |
493 | _SEGMENT_ENTRY_WRITE | \ |
494 | _SEGMENT_ENTRY_NOEXEC) | |
f8107a8b | 495 | #define __SEGMENT_RWX (_SEGMENT_ENTRY_PRESENT | \ |
03e6db16 | 496 | _SEGMENT_ENTRY_READ | \ |
152125b7 | 497 | _SEGMENT_ENTRY_WRITE) |
f8107a8b | 498 | #define __SEGMENT_KERNEL (_SEGMENT_ENTRY | \ |
2dffdcba HC |
499 | _SEGMENT_ENTRY_LARGE | \ |
500 | _SEGMENT_ENTRY_READ | \ | |
501 | _SEGMENT_ENTRY_WRITE | \ | |
502 | _SEGMENT_ENTRY_YOUNG | \ | |
57d7f939 MS |
503 | _SEGMENT_ENTRY_DIRTY | \ |
504 | _SEGMENT_ENTRY_NOEXEC) | |
f8107a8b | 505 | #define __SEGMENT_KERNEL_RO (_SEGMENT_ENTRY | \ |
2dffdcba HC |
506 | _SEGMENT_ENTRY_LARGE | \ |
507 | _SEGMENT_ENTRY_READ | \ | |
508 | _SEGMENT_ENTRY_YOUNG | \ | |
57d7f939 MS |
509 | _SEGMENT_ENTRY_PROTECT | \ |
510 | _SEGMENT_ENTRY_NOEXEC) | |
2dffdcba | 511 | |
f8107a8b HC |
512 | extern unsigned long segment_noexec_mask; |
513 | ||
514 | #define __pgprot_segment_mask(x) __pgprot((x) & segment_noexec_mask) | |
515 | ||
516 | #define SEGMENT_NONE __pgprot_segment_mask(__SEGMENT_NONE) | |
517 | #define SEGMENT_RO __pgprot_segment_mask(__SEGMENT_RO) | |
518 | #define SEGMENT_RX __pgprot_segment_mask(__SEGMENT_RX) | |
519 | #define SEGMENT_RW __pgprot_segment_mask(__SEGMENT_RW) | |
520 | #define SEGMENT_RWX __pgprot_segment_mask(__SEGMENT_RWX) | |
521 | #define SEGMENT_KERNEL __pgprot_segment_mask(__SEGMENT_KERNEL) | |
522 | #define SEGMENT_KERNEL_RO __pgprot_segment_mask(__SEGMENT_KERNEL_RO) | |
523 | ||
2dffdcba HC |
524 | /* |
525 | * Region3 entry (large page) protection definitions. | |
526 | */ | |
527 | ||
f8107a8b | 528 | #define __REGION3_KERNEL (_REGION_ENTRY_TYPE_R3 | \ |
03e6db16 | 529 | _REGION3_ENTRY_PRESENT | \ |
f8107a8b HC |
530 | _REGION3_ENTRY_LARGE | \ |
531 | _REGION3_ENTRY_READ | \ | |
532 | _REGION3_ENTRY_WRITE | \ | |
533 | _REGION3_ENTRY_YOUNG | \ | |
57d7f939 MS |
534 | _REGION3_ENTRY_DIRTY | \ |
535 | _REGION_ENTRY_NOEXEC) | |
f8107a8b HC |
536 | #define __REGION3_KERNEL_RO (_REGION_ENTRY_TYPE_R3 | \ |
537 | _REGION3_ENTRY_PRESENT | \ | |
538 | _REGION3_ENTRY_LARGE | \ | |
539 | _REGION3_ENTRY_READ | \ | |
540 | _REGION3_ENTRY_YOUNG | \ | |
541 | _REGION_ENTRY_PROTECT | \ | |
542 | _REGION_ENTRY_NOEXEC) | |
543 | ||
544 | extern unsigned long region_noexec_mask; | |
545 | ||
546 | #define __pgprot_region_mask(x) __pgprot((x) & region_noexec_mask) | |
547 | ||
548 | #define REGION3_KERNEL __pgprot_region_mask(__REGION3_KERNEL) | |
549 | #define REGION3_KERNEL_RO __pgprot_region_mask(__REGION3_KERNEL_RO) | |
106c992a | 550 | |
e12e4044 MS |
551 | static inline bool mm_p4d_folded(struct mm_struct *mm) |
552 | { | |
553 | return mm->context.asce_limit <= _REGION1_SIZE; | |
554 | } | |
555 | #define mm_p4d_folded(mm) mm_p4d_folded(mm) | |
556 | ||
557 | static inline bool mm_pud_folded(struct mm_struct *mm) | |
558 | { | |
559 | return mm->context.asce_limit <= _REGION2_SIZE; | |
560 | } | |
561 | #define mm_pud_folded(mm) mm_pud_folded(mm) | |
562 | ||
563 | static inline bool mm_pmd_folded(struct mm_struct *mm) | |
564 | { | |
565 | return mm->context.asce_limit <= _REGION3_SIZE; | |
566 | } | |
567 | #define mm_pmd_folded(mm) mm_pmd_folded(mm) | |
568 | ||
b2fa47e6 MS |
569 | static inline int mm_has_pgste(struct mm_struct *mm) |
570 | { | |
571 | #ifdef CONFIG_PGSTE | |
572 | if (unlikely(mm->context.has_pgste)) | |
573 | return 1; | |
574 | #endif | |
575 | return 0; | |
576 | } | |
65eef335 | 577 | |
214d9bbc CI |
578 | static inline int mm_is_protected(struct mm_struct *mm) |
579 | { | |
580 | #ifdef CONFIG_PGSTE | |
07fbdf7f | 581 | if (unlikely(atomic_read(&mm->context.protected_count))) |
214d9bbc CI |
582 | return 1; |
583 | #endif | |
584 | return 0; | |
585 | } | |
586 | ||
94d553ce HC |
587 | static inline pgste_t clear_pgste_bit(pgste_t pgste, unsigned long mask) |
588 | { | |
589 | return __pgste(pgste_val(pgste) & ~mask); | |
590 | } | |
591 | ||
592 | static inline pgste_t set_pgste_bit(pgste_t pgste, unsigned long mask) | |
593 | { | |
594 | return __pgste(pgste_val(pgste) | mask); | |
595 | } | |
596 | ||
f29111f1 HC |
597 | static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) |
598 | { | |
599 | return __pte(pte_val(pte) & ~pgprot_val(prot)); | |
600 | } | |
601 | ||
602 | static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) | |
603 | { | |
604 | return __pte(pte_val(pte) | pgprot_val(prot)); | |
605 | } | |
606 | ||
607 | static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) | |
608 | { | |
609 | return __pmd(pmd_val(pmd) & ~pgprot_val(prot)); | |
610 | } | |
611 | ||
612 | static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) | |
613 | { | |
614 | return __pmd(pmd_val(pmd) | pgprot_val(prot)); | |
615 | } | |
616 | ||
617 | static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot) | |
618 | { | |
619 | return __pud(pud_val(pud) & ~pgprot_val(prot)); | |
620 | } | |
621 | ||
622 | static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot) | |
623 | { | |
624 | return __pud(pud_val(pud) | pgprot_val(prot)); | |
625 | } | |
626 | ||
2faee8ff | 627 | /* |
06201e00 DH |
628 | * As soon as the guest uses storage keys or enables PV, we deduplicate all |
629 | * mapped shared zeropages and prevent new shared zeropages from getting | |
630 | * mapped. | |
2faee8ff | 631 | */ |
06201e00 DH |
632 | #define mm_forbids_zeropage mm_forbids_zeropage |
633 | static inline int mm_forbids_zeropage(struct mm_struct *mm) | |
634 | { | |
635 | #ifdef CONFIG_PGSTE | |
636 | if (!mm->context.allow_cow_sharing) | |
637 | return 1; | |
638 | #endif | |
639 | return 0; | |
640 | } | |
641 | ||
55531b74 | 642 | static inline int mm_uses_skeys(struct mm_struct *mm) |
65eef335 DD |
643 | { |
644 | #ifdef CONFIG_PGSTE | |
55531b74 | 645 | if (mm->context.uses_skeys) |
65eef335 DD |
646 | return 1; |
647 | #endif | |
648 | return 0; | |
649 | } | |
650 | ||
4ccccc52 HC |
651 | static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new) |
652 | { | |
53c1c250 | 653 | union register_pair r1 = { .even = old, .odd = new, }; |
4ccccc52 HC |
654 | unsigned long address = (unsigned long)ptr | 1; |
655 | ||
656 | asm volatile( | |
53c1c250 HC |
657 | " csp %[r1],%[address]" |
658 | : [r1] "+&d" (r1.pair), "+m" (*ptr) | |
659 | : [address] "d" (address) | |
4ccccc52 HC |
660 | : "cc"); |
661 | } | |
662 | ||
02ee1491 CI |
663 | /** |
664 | * cspg() - Compare and Swap and Purge (CSPG) | |
665 | * @ptr: Pointer to the value to be exchanged | |
666 | * @old: The expected old value | |
667 | * @new: The new value | |
668 | * | |
669 | * Return: True if compare and swap was successful, otherwise false. | |
670 | */ | |
671 | static inline bool cspg(unsigned long *ptr, unsigned long old, unsigned long new) | |
e8a97e42 | 672 | { |
53c1c250 | 673 | union register_pair r1 = { .even = old, .odd = new, }; |
e8a97e42 HC |
674 | unsigned long address = (unsigned long)ptr | 1; |
675 | ||
676 | asm volatile( | |
731efc96 | 677 | " cspg %[r1],%[address]" |
53c1c250 HC |
678 | : [r1] "+&d" (r1.pair), "+m" (*ptr) |
679 | : [address] "d" (address) | |
e8a97e42 | 680 | : "cc"); |
02ee1491 | 681 | return old == r1.even; |
e8a97e42 HC |
682 | } |
683 | ||
684 | #define CRDTE_DTT_PAGE 0x00UL | |
685 | #define CRDTE_DTT_SEGMENT 0x10UL | |
686 | #define CRDTE_DTT_REGION3 0x14UL | |
687 | #define CRDTE_DTT_REGION2 0x18UL | |
688 | #define CRDTE_DTT_REGION1 0x1cUL | |
689 | ||
02ee1491 CI |
690 | /** |
691 | * crdte() - Compare and Replace DAT Table Entry | |
692 | * @old: The expected old value | |
693 | * @new: The new value | |
694 | * @table: Pointer to the value to be exchanged | |
695 | * @dtt: Table type of the table to be exchanged | |
696 | * @address: The address mapped by the entry to be replaced | |
697 | * @asce: The ASCE of this entry | |
698 | * | |
699 | * Return: True if compare and replace was successful, otherwise false. | |
700 | */ | |
701 | static inline bool crdte(unsigned long old, unsigned long new, | |
273cd173 | 702 | unsigned long *table, unsigned long dtt, |
e8a97e42 HC |
703 | unsigned long address, unsigned long asce) |
704 | { | |
53c1c250 | 705 | union register_pair r1 = { .even = old, .odd = new, }; |
273cd173 | 706 | union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, }; |
e8a97e42 | 707 | |
53c1c250 HC |
708 | asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0" |
709 | : [r1] "+&d" (r1.pair) | |
710 | : [r2] "d" (r2.pair), [asce] "a" (asce) | |
e8a97e42 | 711 | : "memory", "cc"); |
02ee1491 | 712 | return old == r1.even; |
e8a97e42 HC |
713 | } |
714 | ||
1da177e4 | 715 | /* |
cc18b460 | 716 | * pgd/p4d/pud/pmd/pte query functions |
1da177e4 | 717 | */ |
cc18b460 HC |
718 | static inline int pgd_folded(pgd_t pgd) |
719 | { | |
720 | return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1; | |
721 | } | |
722 | ||
5a216a20 MS |
723 | static inline int pgd_present(pgd_t pgd) |
724 | { | |
cc18b460 | 725 | if (pgd_folded(pgd)) |
6252d702 | 726 | return 1; |
5a216a20 MS |
727 | return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; |
728 | } | |
729 | ||
730 | static inline int pgd_none(pgd_t pgd) | |
731 | { | |
cc18b460 | 732 | if (pgd_folded(pgd)) |
6252d702 | 733 | return 0; |
e5098611 | 734 | return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL; |
5a216a20 MS |
735 | } |
736 | ||
737 | static inline int pgd_bad(pgd_t pgd) | |
738 | { | |
c9f62152 MS |
739 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1) |
740 | return 0; | |
741 | return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0; | |
5a216a20 | 742 | } |
190a1d72 | 743 | |
d0e2eb0a VG |
744 | static inline unsigned long pgd_pfn(pgd_t pgd) |
745 | { | |
746 | unsigned long origin_mask; | |
747 | ||
748 | origin_mask = _REGION_ENTRY_ORIGIN; | |
749 | return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT; | |
750 | } | |
751 | ||
cc18b460 HC |
752 | static inline int p4d_folded(p4d_t p4d) |
753 | { | |
754 | return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2; | |
755 | } | |
756 | ||
1aea9b3f MS |
757 | static inline int p4d_present(p4d_t p4d) |
758 | { | |
cc18b460 | 759 | if (p4d_folded(p4d)) |
1aea9b3f MS |
760 | return 1; |
761 | return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL; | |
762 | } | |
763 | ||
764 | static inline int p4d_none(p4d_t p4d) | |
765 | { | |
cc18b460 | 766 | if (p4d_folded(p4d)) |
1aea9b3f MS |
767 | return 0; |
768 | return p4d_val(p4d) == _REGION2_ENTRY_EMPTY; | |
769 | } | |
770 | ||
771 | static inline unsigned long p4d_pfn(p4d_t p4d) | |
772 | { | |
773 | unsigned long origin_mask; | |
774 | ||
775 | origin_mask = _REGION_ENTRY_ORIGIN; | |
776 | return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT; | |
777 | } | |
778 | ||
cc18b460 HC |
779 | static inline int pud_folded(pud_t pud) |
780 | { | |
781 | return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3; | |
782 | } | |
783 | ||
190a1d72 | 784 | static inline int pud_present(pud_t pud) |
1da177e4 | 785 | { |
cc18b460 | 786 | if (pud_folded(pud)) |
6252d702 | 787 | return 1; |
03e6db16 | 788 | return (pud_val(pud) & _REGION3_ENTRY_PRESENT) != 0; |
1da177e4 LT |
789 | } |
790 | ||
190a1d72 | 791 | static inline int pud_none(pud_t pud) |
1da177e4 | 792 | { |
cc18b460 | 793 | if (pud_folded(pud)) |
6252d702 | 794 | return 0; |
d08de8e2 | 795 | return pud_val(pud) == _REGION3_ENTRY_EMPTY; |
1da177e4 LT |
796 | } |
797 | ||
e72c7c2b | 798 | #define pud_leaf pud_leaf |
c05995b7 | 799 | static inline bool pud_leaf(pud_t pud) |
18da2369 HC |
800 | { |
801 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3) | |
802 | return 0; | |
03e6db16 GS |
803 | return (pud_present(pud) && (pud_val(pud) & _REGION3_ENTRY_LARGE) != 0); |
804 | } | |
805 | ||
806 | static inline int pmd_present(pmd_t pmd) | |
807 | { | |
808 | return (pmd_val(pmd) & _SEGMENT_ENTRY_PRESENT) != 0; | |
18da2369 HC |
809 | } |
810 | ||
e72c7c2b | 811 | #define pmd_leaf pmd_leaf |
c05995b7 | 812 | static inline bool pmd_leaf(pmd_t pmd) |
d08de8e2 | 813 | { |
03e6db16 | 814 | return (pmd_present(pmd) && (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0); |
d08de8e2 GS |
815 | } |
816 | ||
817 | static inline int pmd_bad(pmd_t pmd) | |
818 | { | |
2f709f7b | 819 | if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_leaf(pmd)) |
c9f62152 | 820 | return 1; |
d08de8e2 GS |
821 | return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; |
822 | } | |
823 | ||
190a1d72 | 824 | static inline int pud_bad(pud_t pud) |
1da177e4 | 825 | { |
c9f62152 MS |
826 | unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK; |
827 | ||
0a845e0f | 828 | if (type > _REGION_ENTRY_TYPE_R3 || pud_leaf(pud)) |
c9f62152 MS |
829 | return 1; |
830 | if (type < _REGION_ENTRY_TYPE_R3) | |
831 | return 0; | |
d08de8e2 | 832 | return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0; |
1da177e4 LT |
833 | } |
834 | ||
1aea9b3f MS |
835 | static inline int p4d_bad(p4d_t p4d) |
836 | { | |
c9f62152 MS |
837 | unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK; |
838 | ||
839 | if (type > _REGION_ENTRY_TYPE_R2) | |
840 | return 1; | |
841 | if (type < _REGION_ENTRY_TYPE_R2) | |
842 | return 0; | |
1aea9b3f MS |
843 | return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0; |
844 | } | |
845 | ||
4448aaf0 | 846 | static inline int pmd_none(pmd_t pmd) |
1da177e4 | 847 | { |
54397bb0 | 848 | return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY; |
1da177e4 LT |
849 | } |
850 | ||
e4e40e02 | 851 | #define pmd_write pmd_write |
1ae1c1d0 GS |
852 | static inline int pmd_write(pmd_t pmd) |
853 | { | |
152125b7 MS |
854 | return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; |
855 | } | |
856 | ||
582b4e55 GS |
857 | #define pud_write pud_write |
858 | static inline int pud_write(pud_t pud) | |
859 | { | |
860 | return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0; | |
861 | } | |
862 | ||
533c67e6 | 863 | #define pmd_dirty pmd_dirty |
152125b7 MS |
864 | static inline int pmd_dirty(pmd_t pmd) |
865 | { | |
2d1fc1eb | 866 | return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; |
1ae1c1d0 GS |
867 | } |
868 | ||
6617da8f | 869 | #define pmd_young pmd_young |
1ae1c1d0 GS |
870 | static inline int pmd_young(pmd_t pmd) |
871 | { | |
2d1fc1eb | 872 | return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; |
1ae1c1d0 GS |
873 | } |
874 | ||
e5098611 | 875 | static inline int pte_present(pte_t pte) |
1da177e4 | 876 | { |
e5098611 MS |
877 | /* Bit pattern: (pte & 0x001) == 0x001 */ |
878 | return (pte_val(pte) & _PAGE_PRESENT) != 0; | |
1da177e4 LT |
879 | } |
880 | ||
e5098611 | 881 | static inline int pte_none(pte_t pte) |
1da177e4 | 882 | { |
e5098611 MS |
883 | /* Bit pattern: pte == 0x400 */ |
884 | return pte_val(pte) == _PAGE_INVALID; | |
1da177e4 LT |
885 | } |
886 | ||
b31288fa KW |
887 | static inline int pte_swap(pte_t pte) |
888 | { | |
a1c843b8 MS |
889 | /* Bit pattern: (pte & 0x201) == 0x200 */ |
890 | return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT)) | |
891 | == _PAGE_PROTECT; | |
b31288fa KW |
892 | } |
893 | ||
7e675137 NP |
894 | static inline int pte_special(pte_t pte) |
895 | { | |
a08cb629 | 896 | return (pte_val(pte) & _PAGE_SPECIAL); |
7e675137 NP |
897 | } |
898 | ||
ba8a9229 | 899 | #define __HAVE_ARCH_PTE_SAME |
b2fa47e6 MS |
900 | static inline int pte_same(pte_t a, pte_t b) |
901 | { | |
902 | return pte_val(a) == pte_val(b); | |
903 | } | |
1da177e4 | 904 | |
b54565b8 MS |
905 | #ifdef CONFIG_NUMA_BALANCING |
906 | static inline int pte_protnone(pte_t pte) | |
907 | { | |
908 | return pte_present(pte) && !(pte_val(pte) & _PAGE_READ); | |
909 | } | |
910 | ||
911 | static inline int pmd_protnone(pmd_t pmd) | |
912 | { | |
2f709f7b PX |
913 | /* pmd_leaf(pmd) implies pmd_present(pmd) */ |
914 | return pmd_leaf(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); | |
b54565b8 MS |
915 | } |
916 | #endif | |
917 | ||
403d1338 | 918 | static inline bool pte_swp_exclusive(pte_t pte) |
92cd58bd DH |
919 | { |
920 | return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; | |
921 | } | |
922 | ||
923 | static inline pte_t pte_swp_mkexclusive(pte_t pte) | |
924 | { | |
925 | return set_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE)); | |
926 | } | |
927 | ||
928 | static inline pte_t pte_swp_clear_exclusive(pte_t pte) | |
929 | { | |
930 | return clear_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE)); | |
931 | } | |
932 | ||
5614dd92 MS |
933 | static inline int pte_soft_dirty(pte_t pte) |
934 | { | |
935 | return pte_val(pte) & _PAGE_SOFT_DIRTY; | |
936 | } | |
937 | #define pte_swp_soft_dirty pte_soft_dirty | |
938 | ||
939 | static inline pte_t pte_mksoft_dirty(pte_t pte) | |
940 | { | |
4a366f51 | 941 | return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY)); |
5614dd92 MS |
942 | } |
943 | #define pte_swp_mksoft_dirty pte_mksoft_dirty | |
944 | ||
945 | static inline pte_t pte_clear_soft_dirty(pte_t pte) | |
946 | { | |
4a366f51 | 947 | return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY)); |
5614dd92 MS |
948 | } |
949 | #define pte_swp_clear_soft_dirty pte_clear_soft_dirty | |
950 | ||
951 | static inline int pmd_soft_dirty(pmd_t pmd) | |
952 | { | |
953 | return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY; | |
954 | } | |
955 | ||
956 | static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) | |
957 | { | |
4a366f51 | 958 | return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY)); |
5614dd92 MS |
959 | } |
960 | ||
961 | static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) | |
962 | { | |
4a366f51 | 963 | return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY)); |
5614dd92 MS |
964 | } |
965 | ||
1da177e4 LT |
966 | /* |
967 | * query functions pte_write/pte_dirty/pte_young only work if | |
968 | * pte_present() is true. Undefined behaviour if not.. | |
969 | */ | |
4448aaf0 | 970 | static inline int pte_write(pte_t pte) |
1da177e4 | 971 | { |
e5098611 | 972 | return (pte_val(pte) & _PAGE_WRITE) != 0; |
1da177e4 LT |
973 | } |
974 | ||
4448aaf0 | 975 | static inline int pte_dirty(pte_t pte) |
1da177e4 | 976 | { |
e5098611 | 977 | return (pte_val(pte) & _PAGE_DIRTY) != 0; |
1da177e4 LT |
978 | } |
979 | ||
4448aaf0 | 980 | static inline int pte_young(pte_t pte) |
1da177e4 | 981 | { |
0944fe3f | 982 | return (pte_val(pte) & _PAGE_YOUNG) != 0; |
1da177e4 LT |
983 | } |
984 | ||
b31288fa KW |
985 | #define __HAVE_ARCH_PTE_UNUSED |
986 | static inline int pte_unused(pte_t pte) | |
987 | { | |
988 | return pte_val(pte) & _PAGE_UNUSED; | |
989 | } | |
990 | ||
d460bb6c NS |
991 | /* |
992 | * Extract the pgprot value from the given pte while at the same time making it | |
993 | * usable for kernel address space mappings where fault driven dirty and | |
994 | * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID | |
995 | * must not be set. | |
996 | */ | |
0515e022 | 997 | #define pte_pgprot pte_pgprot |
d460bb6c NS |
998 | static inline pgprot_t pte_pgprot(pte_t pte) |
999 | { | |
1000 | unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK; | |
1001 | ||
1002 | if (pte_write(pte)) | |
1003 | pte_flags |= pgprot_val(PAGE_KERNEL); | |
1004 | else | |
1005 | pte_flags |= pgprot_val(PAGE_KERNEL_RO); | |
1006 | pte_flags |= pte_val(pte) & mio_wb_bit_mask; | |
1007 | ||
1008 | return __pgprot(pte_flags); | |
1009 | } | |
1010 | ||
1da177e4 LT |
1011 | /* |
1012 | * pgd/pmd/pte modification functions | |
1013 | */ | |
1014 | ||
e2aaae2d HC |
1015 | static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) |
1016 | { | |
1017 | WRITE_ONCE(*pgdp, pgd); | |
1018 | } | |
1019 | ||
1020 | static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) | |
1021 | { | |
1022 | WRITE_ONCE(*p4dp, p4d); | |
1023 | } | |
1024 | ||
1025 | static inline void set_pud(pud_t *pudp, pud_t pud) | |
1026 | { | |
1027 | WRITE_ONCE(*pudp, pud); | |
1028 | } | |
1029 | ||
1030 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | |
1031 | { | |
1032 | WRITE_ONCE(*pmdp, pmd); | |
1033 | } | |
1034 | ||
1035 | static inline void set_pte(pte_t *ptep, pte_t pte) | |
1036 | { | |
1037 | WRITE_ONCE(*ptep, pte); | |
1038 | } | |
1039 | ||
b2fa47e6 | 1040 | static inline void pgd_clear(pgd_t *pgd) |
5a216a20 | 1041 | { |
1aea9b3f | 1042 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1) |
b8e3b379 | 1043 | set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY)); |
1aea9b3f MS |
1044 | } |
1045 | ||
1046 | static inline void p4d_clear(p4d_t *p4d) | |
1047 | { | |
1048 | if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) | |
b8e3b379 | 1049 | set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY)); |
5a216a20 MS |
1050 | } |
1051 | ||
b2fa47e6 | 1052 | static inline void pud_clear(pud_t *pud) |
1da177e4 | 1053 | { |
6252d702 | 1054 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
b8e3b379 | 1055 | set_pud(pud, __pud(_REGION3_ENTRY_EMPTY)); |
1da177e4 LT |
1056 | } |
1057 | ||
b2fa47e6 | 1058 | static inline void pmd_clear(pmd_t *pmdp) |
1da177e4 | 1059 | { |
b8e3b379 | 1060 | set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); |
1da177e4 LT |
1061 | } |
1062 | ||
4448aaf0 | 1063 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
1da177e4 | 1064 | { |
b8e3b379 | 1065 | set_pte(ptep, __pte(_PAGE_INVALID)); |
1da177e4 LT |
1066 | } |
1067 | ||
1068 | /* | |
1069 | * The following pte modification functions only work if | |
1070 | * pte_present() is true. Undefined behaviour if not.. | |
1071 | */ | |
4448aaf0 | 1072 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
1da177e4 | 1073 | { |
4a366f51 HC |
1074 | pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK)); |
1075 | pte = set_pte_bit(pte, newprot); | |
0944fe3f | 1076 | /* |
57d7f939 MS |
1077 | * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX |
1078 | * has the invalid bit set, clear it again for readable, young pages | |
0944fe3f MS |
1079 | */ |
1080 | if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) | |
4a366f51 | 1081 | pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID)); |
0944fe3f | 1082 | /* |
57d7f939 MS |
1083 | * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page |
1084 | * protection bit set, clear it again for writable, dirty pages | |
0944fe3f | 1085 | */ |
e5098611 | 1086 | if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) |
4a366f51 | 1087 | pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT)); |
1da177e4 LT |
1088 | return pte; |
1089 | } | |
1090 | ||
4448aaf0 | 1091 | static inline pte_t pte_wrprotect(pte_t pte) |
1da177e4 | 1092 | { |
4a366f51 HC |
1093 | pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE)); |
1094 | return set_pte_bit(pte, __pgprot(_PAGE_PROTECT)); | |
1da177e4 LT |
1095 | } |
1096 | ||
2f0584f3 | 1097 | static inline pte_t pte_mkwrite_novma(pte_t pte) |
1da177e4 | 1098 | { |
4a366f51 | 1099 | pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE)); |
e5098611 | 1100 | if (pte_val(pte) & _PAGE_DIRTY) |
4a366f51 | 1101 | pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT)); |
1da177e4 LT |
1102 | return pte; |
1103 | } | |
1104 | ||
4448aaf0 | 1105 | static inline pte_t pte_mkclean(pte_t pte) |
1da177e4 | 1106 | { |
4a366f51 HC |
1107 | pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY)); |
1108 | return set_pte_bit(pte, __pgprot(_PAGE_PROTECT)); | |
1da177e4 LT |
1109 | } |
1110 | ||
4448aaf0 | 1111 | static inline pte_t pte_mkdirty(pte_t pte) |
1da177e4 | 1112 | { |
4a366f51 | 1113 | pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY)); |
e5098611 | 1114 | if (pte_val(pte) & _PAGE_WRITE) |
4a366f51 | 1115 | pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT)); |
1da177e4 LT |
1116 | return pte; |
1117 | } | |
1118 | ||
4448aaf0 | 1119 | static inline pte_t pte_mkold(pte_t pte) |
1da177e4 | 1120 | { |
4a366f51 HC |
1121 | pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG)); |
1122 | return set_pte_bit(pte, __pgprot(_PAGE_INVALID)); | |
1da177e4 LT |
1123 | } |
1124 | ||
4448aaf0 | 1125 | static inline pte_t pte_mkyoung(pte_t pte) |
1da177e4 | 1126 | { |
4a366f51 | 1127 | pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG)); |
0944fe3f | 1128 | if (pte_val(pte) & _PAGE_READ) |
4a366f51 | 1129 | pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID)); |
1da177e4 LT |
1130 | return pte; |
1131 | } | |
1132 | ||
7e675137 NP |
1133 | static inline pte_t pte_mkspecial(pte_t pte) |
1134 | { | |
4a366f51 | 1135 | return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL)); |
7e675137 NP |
1136 | } |
1137 | ||
84afdcee HC |
1138 | #ifdef CONFIG_HUGETLB_PAGE |
1139 | static inline pte_t pte_mkhuge(pte_t pte) | |
1140 | { | |
4a366f51 | 1141 | return set_pte_bit(pte, __pgprot(_PAGE_LARGE)); |
84afdcee HC |
1142 | } |
1143 | #endif | |
1144 | ||
34eeaf37 MS |
1145 | #define IPTE_GLOBAL 0 |
1146 | #define IPTE_LOCAL 1 | |
53e857f3 | 1147 | |
118bd31b | 1148 | #define IPTE_NODAT 0x400 |
28c807e5 | 1149 | #define IPTE_GUEST_ASCE 0x800 |
118bd31b | 1150 | |
0807b856 GS |
1151 | static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep, |
1152 | unsigned long opt, unsigned long asce, | |
1153 | int local) | |
1154 | { | |
1155 | unsigned long pto; | |
1156 | ||
1157 | pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1); | |
1158 | asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%[asce],%[m4]" | |
1159 | : "+m" (*ptep) | |
1160 | : [r1] "a" (pto), [r2] "a" ((addr & PAGE_MASK) | opt), | |
1161 | [asce] "a" (asce), [m4] "i" (local)); | |
1162 | } | |
1163 | ||
6818b542 HC |
1164 | static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep, |
1165 | unsigned long opt, unsigned long asce, | |
1166 | int local) | |
1b948d6c | 1167 | { |
273cd173 | 1168 | unsigned long pto = __pa(ptep); |
1b948d6c | 1169 | |
118bd31b MS |
1170 | if (__builtin_constant_p(opt) && opt == 0) { |
1171 | /* Invalidation + TLB flush for the pte */ | |
1172 | asm volatile( | |
731efc96 | 1173 | " ipte %[r1],%[r2],0,%[m4]" |
118bd31b MS |
1174 | : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address), |
1175 | [m4] "i" (local)); | |
1176 | return; | |
1177 | } | |
1178 | ||
1179 | /* Invalidate ptes with options + TLB flush of the ptes */ | |
28c807e5 | 1180 | opt = opt | (asce & _ASCE_ORIGIN); |
1b948d6c | 1181 | asm volatile( |
731efc96 | 1182 | " ipte %[r1],%[r2],%[r3],%[m4]" |
118bd31b MS |
1183 | : [r2] "+a" (address), [r3] "+a" (opt) |
1184 | : [r1] "a" (pto), [m4] "i" (local) : "memory"); | |
1b948d6c MS |
1185 | } |
1186 | ||
6818b542 HC |
1187 | static __always_inline void __ptep_ipte_range(unsigned long address, int nr, |
1188 | pte_t *ptep, int local) | |
cfb0b241 | 1189 | { |
273cd173 | 1190 | unsigned long pto = __pa(ptep); |
cfb0b241 | 1191 | |
34eeaf37 | 1192 | /* Invalidate a range of ptes + TLB flush of the ptes */ |
cfb0b241 HC |
1193 | do { |
1194 | asm volatile( | |
731efc96 | 1195 | " ipte %[r1],%[r2],%[r3],%[m4]" |
34eeaf37 MS |
1196 | : [r2] "+a" (address), [r3] "+a" (nr) |
1197 | : [r1] "a" (pto), [m4] "i" (local) : "memory"); | |
cfb0b241 HC |
1198 | } while (nr != 255); |
1199 | } | |
1200 | ||
0a61b222 | 1201 | /* |
ebde765c MS |
1202 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush |
1203 | * both clear the TLB for the unmapped pte. The reason is that | |
1204 | * ptep_get_and_clear is used in common code (e.g. change_pte_range) | |
1205 | * to modify an active pte. The sequence is | |
1206 | * 1) ptep_get_and_clear | |
1207 | * 2) set_pte_at | |
1208 | * 3) flush_tlb_range | |
1209 | * On s390 the tlb needs to get flushed with the modification of the pte | |
1210 | * if the pte is active. The only way how this can be implemented is to | |
1211 | * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range | |
1212 | * is a nop. | |
0a61b222 | 1213 | */ |
ebde765c MS |
1214 | pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t); |
1215 | pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t); | |
0a61b222 | 1216 | |
0944fe3f MS |
1217 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
1218 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
1219 | unsigned long addr, pte_t *ptep) | |
1220 | { | |
ebde765c | 1221 | pte_t pte = *ptep; |
0944fe3f | 1222 | |
ebde765c MS |
1223 | pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte)); |
1224 | return pte_young(pte); | |
0944fe3f MS |
1225 | } |
1226 | ||
1227 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
1228 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, | |
1229 | unsigned long address, pte_t *ptep) | |
1230 | { | |
1231 | return ptep_test_and_clear_young(vma, address, ptep); | |
1232 | } | |
1233 | ||
ba8a9229 | 1234 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
b2fa47e6 | 1235 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
ebde765c | 1236 | unsigned long addr, pte_t *ptep) |
b2fa47e6 | 1237 | { |
214d9bbc CI |
1238 | pte_t res; |
1239 | ||
1240 | res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); | |
380d97bd | 1241 | /* At this point the reference through the mapping is still present */ |
214d9bbc | 1242 | if (mm_is_protected(mm) && pte_present(res)) |
7d171434 | 1243 | uv_convert_from_secure_pte(res); |
214d9bbc | 1244 | return res; |
b2fa47e6 MS |
1245 | } |
1246 | ||
1247 | #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION | |
0cbe3e26 | 1248 | pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *); |
04a86453 AK |
1249 | void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long, |
1250 | pte_t *, pte_t, pte_t); | |
ba8a9229 MS |
1251 | |
1252 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH | |
f0e47c22 | 1253 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, |
ebde765c | 1254 | unsigned long addr, pte_t *ptep) |
f0e47c22 | 1255 | { |
214d9bbc CI |
1256 | pte_t res; |
1257 | ||
1258 | res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID)); | |
380d97bd | 1259 | /* At this point the reference through the mapping is still present */ |
214d9bbc | 1260 | if (mm_is_protected(vma->vm_mm) && pte_present(res)) |
7d171434 | 1261 | uv_convert_from_secure_pte(res); |
214d9bbc | 1262 | return res; |
1da177e4 LT |
1263 | } |
1264 | ||
ba8a9229 MS |
1265 | /* |
1266 | * The batched pte unmap code uses ptep_get_and_clear_full to clear the | |
1267 | * ptes. Here an optimization is possible. tlb_gather_mmu flushes all | |
1268 | * tlbs of an mm if it can guarantee that the ptes of the mm_struct | |
1269 | * cannot be accessed while the batched unmap is running. In this case | |
1270 | * full==1 and a simple pte_clear is enough. See tlb.h. | |
1271 | */ | |
1272 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | |
1273 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | |
ebde765c | 1274 | unsigned long addr, |
ba8a9229 | 1275 | pte_t *ptep, int full) |
1da177e4 | 1276 | { |
214d9bbc CI |
1277 | pte_t res; |
1278 | ||
ebde765c | 1279 | if (full) { |
214d9bbc | 1280 | res = *ptep; |
b8e3b379 | 1281 | set_pte(ptep, __pte(_PAGE_INVALID)); |
214d9bbc CI |
1282 | } else { |
1283 | res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); | |
b2fa47e6 | 1284 | } |
9bf811da CI |
1285 | /* Nothing to do */ |
1286 | if (!mm_is_protected(mm) || !pte_present(res)) | |
1287 | return res; | |
1288 | /* | |
1289 | * At this point the reference through the mapping is still present. | |
1290 | * The notifier should have destroyed all protected vCPUs at this | |
1291 | * point, so the destroy should be successful. | |
1292 | */ | |
70631506 | 1293 | if (full && !uv_destroy_pte(res)) |
9bf811da CI |
1294 | return res; |
1295 | /* | |
1296 | * If something went wrong and the page could not be destroyed, or | |
1297 | * if this is not a mm teardown, the slower export is used as | |
1298 | * fallback instead. | |
1299 | */ | |
7d171434 | 1300 | uv_convert_from_secure_pte(res); |
214d9bbc | 1301 | return res; |
1da177e4 LT |
1302 | } |
1303 | ||
ba8a9229 | 1304 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
ebde765c MS |
1305 | static inline void ptep_set_wrprotect(struct mm_struct *mm, |
1306 | unsigned long addr, pte_t *ptep) | |
b2fa47e6 | 1307 | { |
b2fa47e6 MS |
1308 | pte_t pte = *ptep; |
1309 | ||
ebde765c MS |
1310 | if (pte_write(pte)) |
1311 | ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte)); | |
b2fa47e6 | 1312 | } |
ba8a9229 | 1313 | |
0807b856 GS |
1314 | /* |
1315 | * Check if PTEs only differ in _PAGE_PROTECT HW bit, but also allow SW PTE | |
1316 | * bits in the comparison. Those might change e.g. because of dirty and young | |
1317 | * tracking. | |
1318 | */ | |
1319 | static inline int pte_allow_rdp(pte_t old, pte_t new) | |
1320 | { | |
1321 | /* | |
1322 | * Only allow changes from RO to RW | |
1323 | */ | |
1324 | if (!(pte_val(old) & _PAGE_PROTECT) || pte_val(new) & _PAGE_PROTECT) | |
1325 | return 0; | |
1326 | ||
1327 | return (pte_val(old) & _PAGE_RDP_MASK) == (pte_val(new) & _PAGE_RDP_MASK); | |
1328 | } | |
1329 | ||
1330 | static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, | |
99c29133 GS |
1331 | unsigned long address, |
1332 | pte_t *ptep) | |
0807b856 GS |
1333 | { |
1334 | /* | |
1335 | * RDP might not have propagated the PTE protection reset to all CPUs, | |
1336 | * so there could be spurious TLB protection faults. | |
1337 | * NOTE: This will also be called when a racing pagetable update on | |
1338 | * another thread already installed the correct PTE. Both cases cannot | |
1339 | * really be distinguished. | |
99c29133 GS |
1340 | * Therefore, only do the local TLB flush when RDP can be used, and the |
1341 | * PTE does not have _PAGE_PROTECT set, to avoid unnecessary overhead. | |
1342 | * A local RDP can be used to do the flush. | |
0807b856 | 1343 | */ |
15a36036 | 1344 | if (cpu_has_rdp() && !(pte_val(*ptep) & _PAGE_PROTECT)) |
99c29133 | 1345 | __ptep_rdp(address, ptep, 0, 0, 1); |
0807b856 GS |
1346 | } |
1347 | #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault | |
1348 | ||
1349 | void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep, | |
1350 | pte_t new); | |
1351 | ||
ba8a9229 | 1352 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
b2fa47e6 | 1353 | static inline int ptep_set_access_flags(struct vm_area_struct *vma, |
ebde765c | 1354 | unsigned long addr, pte_t *ptep, |
b2fa47e6 MS |
1355 | pte_t entry, int dirty) |
1356 | { | |
ebde765c | 1357 | if (pte_same(*ptep, entry)) |
b2fa47e6 | 1358 | return 0; |
15a36036 | 1359 | if (cpu_has_rdp() && !mm_has_pgste(vma->vm_mm) && pte_allow_rdp(*ptep, entry)) |
0807b856 GS |
1360 | ptep_reset_dat_prot(vma->vm_mm, addr, ptep, entry); |
1361 | else | |
1362 | ptep_xchg_direct(vma->vm_mm, addr, ptep, entry); | |
ebde765c MS |
1363 | return 1; |
1364 | } | |
b2fa47e6 | 1365 | |
1e133ab2 MS |
1366 | /* |
1367 | * Additional functions to handle KVM guest page tables | |
1368 | */ | |
1369 | void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, | |
1370 | pte_t *ptep, pte_t entry); | |
1371 | void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | |
4be130a0 MS |
1372 | void ptep_notify(struct mm_struct *mm, unsigned long addr, |
1373 | pte_t *ptep, unsigned long bits); | |
b2d73b2a | 1374 | int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr, |
4be130a0 | 1375 | pte_t *ptep, int prot, unsigned long bit); |
1e133ab2 MS |
1376 | void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, |
1377 | pte_t *ptep , int reset); | |
1378 | void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | |
4be130a0 | 1379 | int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, |
a9d23e71 | 1380 | pte_t *sptep, pte_t *tptep, pte_t pte); |
4be130a0 | 1381 | void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep); |
1e133ab2 | 1382 | |
0959e168 JF |
1383 | bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address, |
1384 | pte_t *ptep); | |
1e133ab2 MS |
1385 | int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, |
1386 | unsigned char key, bool nq); | |
1824c723 DH |
1387 | int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr, |
1388 | unsigned char key, unsigned char *oldkey, | |
1389 | bool nq, bool mr, bool mc); | |
a7e19ab5 | 1390 | int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr); |
154c8c19 DH |
1391 | int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, |
1392 | unsigned char *key); | |
b2fa47e6 | 1393 | |
2d42f947 CI |
1394 | int set_pgste_bits(struct mm_struct *mm, unsigned long addr, |
1395 | unsigned long bits, unsigned long value); | |
1396 | int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep); | |
1397 | int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, | |
1398 | unsigned long *oldpte, unsigned long *oldpgste); | |
6a376277 JF |
1399 | void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr); |
1400 | void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr); | |
1401 | void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr); | |
1402 | void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); | |
2d42f947 | 1403 | |
b02002cc NS |
1404 | #define pgprot_writecombine pgprot_writecombine |
1405 | pgprot_t pgprot_writecombine(pgprot_t prot); | |
1406 | ||
4555ac8b DH |
1407 | #define PFN_PTE_SHIFT PAGE_SHIFT |
1408 | ||
ebde765c | 1409 | /* |
843f9310 MWO |
1410 | * Set multiple PTEs to consecutive pages with a single call. All PTEs |
1411 | * are within the same folio, PMD and VMA. | |
ebde765c | 1412 | */ |
843f9310 MWO |
1413 | static inline void set_ptes(struct mm_struct *mm, unsigned long addr, |
1414 | pte_t *ptep, pte_t entry, unsigned int nr) | |
ebde765c | 1415 | { |
a8f60d1f | 1416 | if (pte_present(entry)) |
4a366f51 | 1417 | entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED)); |
843f9310 MWO |
1418 | if (mm_has_pgste(mm)) { |
1419 | for (;;) { | |
1420 | ptep_set_pte_at(mm, addr, ptep, entry); | |
1421 | if (--nr == 0) | |
1422 | break; | |
1423 | ptep++; | |
1424 | entry = __pte(pte_val(entry) + PAGE_SIZE); | |
1425 | addr += PAGE_SIZE; | |
1426 | } | |
1427 | } else { | |
1428 | for (;;) { | |
1429 | set_pte(ptep, entry); | |
1430 | if (--nr == 0) | |
1431 | break; | |
1432 | ptep++; | |
1433 | entry = __pte(pte_val(entry) + PAGE_SIZE); | |
1434 | } | |
1435 | } | |
b2fa47e6 | 1436 | } |
843f9310 | 1437 | #define set_ptes set_ptes |
1da177e4 | 1438 | |
1da177e4 LT |
1439 | /* |
1440 | * Conversion functions: convert a page and protection to a page entry, | |
1441 | * and a page entry and page directory to the page they refer to. | |
1442 | */ | |
1443 | static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) | |
1444 | { | |
1445 | pte_t __pte; | |
b02002cc | 1446 | |
4a366f51 | 1447 | __pte = __pte(physpage | pgprot_val(pgprot)); |
0944fe3f | 1448 | return pte_mkyoung(__pte); |
1da177e4 LT |
1449 | } |
1450 | ||
190a1d72 | 1451 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
1aea9b3f | 1452 | #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1)) |
190a1d72 MS |
1453 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) |
1454 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | |
1da177e4 | 1455 | |
86c827b3 AG |
1456 | #define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN)) |
1457 | #define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN)) | |
1da177e4 | 1458 | |
b0e98aa9 GS |
1459 | static inline unsigned long pmd_deref(pmd_t pmd) |
1460 | { | |
1461 | unsigned long origin_mask; | |
1462 | ||
1463 | origin_mask = _SEGMENT_ENTRY_ORIGIN; | |
2f709f7b | 1464 | if (pmd_leaf(pmd)) |
b0e98aa9 | 1465 | origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; |
86c827b3 | 1466 | return (unsigned long)__va(pmd_val(pmd) & origin_mask); |
b0e98aa9 GS |
1467 | } |
1468 | ||
1469 | static inline unsigned long pmd_pfn(pmd_t pmd) | |
1470 | { | |
86c827b3 | 1471 | return __pa(pmd_deref(pmd)) >> PAGE_SHIFT; |
b0e98aa9 GS |
1472 | } |
1473 | ||
1474 | static inline unsigned long pud_deref(pud_t pud) | |
1475 | { | |
1476 | unsigned long origin_mask; | |
1477 | ||
1478 | origin_mask = _REGION_ENTRY_ORIGIN; | |
0a845e0f | 1479 | if (pud_leaf(pud)) |
b0e98aa9 | 1480 | origin_mask = _REGION3_ENTRY_ORIGIN_LARGE; |
86c827b3 | 1481 | return (unsigned long)__va(pud_val(pud) & origin_mask); |
b0e98aa9 GS |
1482 | } |
1483 | ||
35a76f5c | 1484 | #define pud_pfn pud_pfn |
b0e98aa9 GS |
1485 | static inline unsigned long pud_pfn(pud_t pud) |
1486 | { | |
86c827b3 | 1487 | return __pa(pud_deref(pud)) >> PAGE_SHIFT; |
b0e98aa9 GS |
1488 | } |
1489 | ||
d1874a0c MS |
1490 | /* |
1491 | * The pgd_offset function *always* adds the index for the top-level | |
1492 | * region/segment table. This is done to get a sequence like the | |
1493 | * following to work: | |
1494 | * pgdp = pgd_offset(current->mm, addr); | |
1495 | * pgd = READ_ONCE(*pgdp); | |
1496 | * p4dp = p4d_offset(&pgd, addr); | |
1497 | * ... | |
1498 | * The subsequent p4d_offset, pud_offset and pmd_offset functions | |
1499 | * only add an index if they dereferenced the pointer. | |
1500 | */ | |
1501 | static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address) | |
5a216a20 | 1502 | { |
d1874a0c MS |
1503 | unsigned long rste; |
1504 | unsigned int shift; | |
1aea9b3f | 1505 | |
d1874a0c MS |
1506 | /* Get the first entry of the top level table */ |
1507 | rste = pgd_val(*pgd); | |
1508 | /* Pick up the shift from the table type of the first entry */ | |
1509 | shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20; | |
1510 | return pgd + ((address >> shift) & (PTRS_PER_PGD - 1)); | |
1aea9b3f MS |
1511 | } |
1512 | ||
d1874a0c | 1513 | #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address) |
d1874a0c | 1514 | |
d3f7b1bb | 1515 | static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address) |
1aea9b3f | 1516 | { |
d3f7b1bb VG |
1517 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1) |
1518 | return (p4d_t *) pgd_deref(pgd) + p4d_index(address); | |
1519 | return (p4d_t *) pgdp; | |
d1874a0c | 1520 | } |
d3f7b1bb | 1521 | #define p4d_offset_lockless p4d_offset_lockless |
1aea9b3f | 1522 | |
d3f7b1bb | 1523 | static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address) |
d1874a0c | 1524 | { |
d3f7b1bb VG |
1525 | return p4d_offset_lockless(pgdp, *pgdp, address); |
1526 | } | |
1527 | ||
1528 | static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address) | |
1529 | { | |
1530 | if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2) | |
1531 | return (pud_t *) p4d_deref(p4d) + pud_index(address); | |
1532 | return (pud_t *) p4dp; | |
1533 | } | |
1534 | #define pud_offset_lockless pud_offset_lockless | |
1535 | ||
1536 | static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address) | |
1537 | { | |
1538 | return pud_offset_lockless(p4dp, *p4dp, address); | |
5a216a20 | 1539 | } |
974b9b2c | 1540 | #define pud_offset pud_offset |
1da177e4 | 1541 | |
d3f7b1bb VG |
1542 | static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address) |
1543 | { | |
1544 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3) | |
1545 | return (pmd_t *) pud_deref(pud) + pmd_index(address); | |
1546 | return (pmd_t *) pudp; | |
1547 | } | |
1548 | #define pmd_offset_lockless pmd_offset_lockless | |
1549 | ||
1550 | static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address) | |
1da177e4 | 1551 | { |
d3f7b1bb | 1552 | return pmd_offset_lockless(pudp, *pudp, address); |
d1874a0c | 1553 | } |
974b9b2c | 1554 | #define pmd_offset pmd_offset |
1aea9b3f | 1555 | |
974b9b2c | 1556 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) |
d1874a0c | 1557 | { |
974b9b2c | 1558 | return (unsigned long) pmd_deref(pmd); |
1da177e4 LT |
1559 | } |
1560 | ||
26f4c328 | 1561 | static inline bool gup_fast_permitted(unsigned long start, unsigned long end) |
1a42010c | 1562 | { |
1a42010c MS |
1563 | return end <= current->mm->context.asce_limit; |
1564 | } | |
1565 | #define gup_fast_permitted gup_fast_permitted | |
1566 | ||
0f3bf303 | 1567 | #define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot)) |
190a1d72 MS |
1568 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) |
1569 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | |
1da177e4 | 1570 | |
152125b7 | 1571 | #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) |
d08de8e2 | 1572 | #define pud_page(pud) pfn_to_page(pud_pfn(pud)) |
d0e2eb0a VG |
1573 | #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d)) |
1574 | #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) | |
1da177e4 | 1575 | |
152125b7 | 1576 | static inline pmd_t pmd_wrprotect(pmd_t pmd) |
0944fe3f | 1577 | { |
4a366f51 HC |
1578 | pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE)); |
1579 | return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); | |
152125b7 MS |
1580 | } |
1581 | ||
2f0584f3 | 1582 | static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) |
152125b7 | 1583 | { |
4a366f51 | 1584 | pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE)); |
2d1fc1eb | 1585 | if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) |
4a366f51 | 1586 | pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); |
152125b7 MS |
1587 | return pmd; |
1588 | } | |
1589 | ||
1590 | static inline pmd_t pmd_mkclean(pmd_t pmd) | |
1591 | { | |
4a366f51 HC |
1592 | pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY)); |
1593 | return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); | |
152125b7 MS |
1594 | } |
1595 | ||
1596 | static inline pmd_t pmd_mkdirty(pmd_t pmd) | |
1597 | { | |
4a366f51 | 1598 | pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY)); |
2d1fc1eb | 1599 | if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) |
4a366f51 | 1600 | pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); |
152125b7 MS |
1601 | return pmd; |
1602 | } | |
1603 | ||
9e20b4da HC |
1604 | static inline pud_t pud_wrprotect(pud_t pud) |
1605 | { | |
4a366f51 HC |
1606 | pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE)); |
1607 | return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); | |
9e20b4da HC |
1608 | } |
1609 | ||
1610 | static inline pud_t pud_mkwrite(pud_t pud) | |
1611 | { | |
4a366f51 | 1612 | pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE)); |
2d1fc1eb | 1613 | if (pud_val(pud) & _REGION3_ENTRY_DIRTY) |
4a366f51 | 1614 | pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); |
9e20b4da HC |
1615 | return pud; |
1616 | } | |
1617 | ||
1618 | static inline pud_t pud_mkclean(pud_t pud) | |
1619 | { | |
4a366f51 HC |
1620 | pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY)); |
1621 | return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); | |
9e20b4da HC |
1622 | } |
1623 | ||
1624 | static inline pud_t pud_mkdirty(pud_t pud) | |
1625 | { | |
4a366f51 | 1626 | pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY)); |
2d1fc1eb | 1627 | if (pud_val(pud) & _REGION3_ENTRY_WRITE) |
4a366f51 | 1628 | pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); |
9e20b4da HC |
1629 | return pud; |
1630 | } | |
1631 | ||
1632 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) | |
1633 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) | |
1634 | { | |
1635 | /* | |
57d7f939 MS |
1636 | * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX |
1637 | * (see __Pxxx / __Sxxx). Convert to segment table entry format. | |
9e20b4da HC |
1638 | */ |
1639 | if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) | |
1640 | return pgprot_val(SEGMENT_NONE); | |
57d7f939 MS |
1641 | if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) |
1642 | return pgprot_val(SEGMENT_RO); | |
1643 | if (pgprot_val(pgprot) == pgprot_val(PAGE_RX)) | |
1644 | return pgprot_val(SEGMENT_RX); | |
1645 | if (pgprot_val(pgprot) == pgprot_val(PAGE_RW)) | |
1646 | return pgprot_val(SEGMENT_RW); | |
1647 | return pgprot_val(SEGMENT_RWX); | |
9e20b4da HC |
1648 | } |
1649 | ||
152125b7 MS |
1650 | static inline pmd_t pmd_mkyoung(pmd_t pmd) |
1651 | { | |
4a366f51 | 1652 | pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG)); |
2d1fc1eb | 1653 | if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) |
4a366f51 | 1654 | pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID)); |
0944fe3f MS |
1655 | return pmd; |
1656 | } | |
1657 | ||
1658 | static inline pmd_t pmd_mkold(pmd_t pmd) | |
1659 | { | |
4a366f51 HC |
1660 | pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG)); |
1661 | return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID)); | |
0944fe3f MS |
1662 | } |
1663 | ||
1ae1c1d0 GS |
1664 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
1665 | { | |
4a366f51 HC |
1666 | unsigned long mask; |
1667 | ||
1668 | mask = _SEGMENT_ENTRY_ORIGIN_LARGE; | |
1669 | mask |= _SEGMENT_ENTRY_DIRTY; | |
1670 | mask |= _SEGMENT_ENTRY_YOUNG; | |
1671 | mask |= _SEGMENT_ENTRY_LARGE; | |
1672 | mask |= _SEGMENT_ENTRY_SOFT_DIRTY; | |
1673 | pmd = __pmd(pmd_val(pmd) & mask); | |
1674 | pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot))); | |
2d1fc1eb | 1675 | if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) |
4a366f51 | 1676 | pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); |
2d1fc1eb | 1677 | if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) |
4a366f51 | 1678 | pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID)); |
1ae1c1d0 GS |
1679 | return pmd; |
1680 | } | |
1681 | ||
106c992a | 1682 | static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) |
1ae1c1d0 | 1683 | { |
4a366f51 | 1684 | return __pmd(physpage + massage_pgprot_pmd(pgprot)); |
1ae1c1d0 GS |
1685 | } |
1686 | ||
106c992a GS |
1687 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ |
1688 | ||
1b948d6c MS |
1689 | static inline void __pmdp_csp(pmd_t *pmdp) |
1690 | { | |
4ccccc52 HC |
1691 | csp((unsigned int *)pmdp + 1, pmd_val(*pmdp), |
1692 | pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); | |
1b948d6c MS |
1693 | } |
1694 | ||
47e4d851 MS |
1695 | #define IDTE_GLOBAL 0 |
1696 | #define IDTE_LOCAL 1 | |
d08de8e2 | 1697 | |
118bd31b MS |
1698 | #define IDTE_PTOA 0x0800 |
1699 | #define IDTE_NODAT 0x1000 | |
28c807e5 | 1700 | #define IDTE_GUEST_ASCE 0x2000 |
118bd31b | 1701 | |
6818b542 HC |
1702 | static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp, |
1703 | unsigned long opt, unsigned long asce, | |
1704 | int local) | |
1b948d6c MS |
1705 | { |
1706 | unsigned long sto; | |
1707 | ||
273cd173 | 1708 | sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t); |
28c807e5 MS |
1709 | if (__builtin_constant_p(opt) && opt == 0) { |
1710 | /* flush without guest asce */ | |
1711 | asm volatile( | |
731efc96 | 1712 | " idte %[r1],0,%[r2],%[m4]" |
28c807e5 MS |
1713 | : "+m" (*pmdp) |
1714 | : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)), | |
1715 | [m4] "i" (local) | |
1716 | : "cc" ); | |
1717 | } else { | |
1718 | /* flush with guest asce */ | |
1719 | asm volatile( | |
731efc96 | 1720 | " idte %[r1],%[r3],%[r2],%[m4]" |
28c807e5 MS |
1721 | : "+m" (*pmdp) |
1722 | : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt), | |
1723 | [r3] "a" (asce), [m4] "i" (local) | |
1724 | : "cc" ); | |
1725 | } | |
1b948d6c MS |
1726 | } |
1727 | ||
6818b542 HC |
1728 | static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp, |
1729 | unsigned long opt, unsigned long asce, | |
1730 | int local) | |
d08de8e2 GS |
1731 | { |
1732 | unsigned long r3o; | |
1733 | ||
273cd173 | 1734 | r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t); |
d08de8e2 | 1735 | r3o |= _ASCE_TYPE_REGION3; |
28c807e5 MS |
1736 | if (__builtin_constant_p(opt) && opt == 0) { |
1737 | /* flush without guest asce */ | |
1738 | asm volatile( | |
731efc96 | 1739 | " idte %[r1],0,%[r2],%[m4]" |
28c807e5 MS |
1740 | : "+m" (*pudp) |
1741 | : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)), | |
1742 | [m4] "i" (local) | |
1743 | : "cc"); | |
1744 | } else { | |
1745 | /* flush with guest asce */ | |
1746 | asm volatile( | |
731efc96 | 1747 | " idte %[r1],%[r3],%[r2],%[m4]" |
28c807e5 MS |
1748 | : "+m" (*pudp) |
1749 | : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt), | |
1750 | [r3] "a" (asce), [m4] "i" (local) | |
1751 | : "cc" ); | |
1752 | } | |
d08de8e2 GS |
1753 | } |
1754 | ||
227be799 MS |
1755 | pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t); |
1756 | pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t); | |
d08de8e2 | 1757 | pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t); |
1b948d6c | 1758 | |
227be799 MS |
1759 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1760 | ||
1761 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | |
1762 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | |
1763 | pgtable_t pgtable); | |
1764 | ||
1765 | #define __HAVE_ARCH_PGTABLE_WITHDRAW | |
1766 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); | |
1b948d6c | 1767 | |
227be799 MS |
1768 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS |
1769 | static inline int pmdp_set_access_flags(struct vm_area_struct *vma, | |
1770 | unsigned long addr, pmd_t *pmdp, | |
1771 | pmd_t entry, int dirty) | |
3eabaee9 | 1772 | { |
227be799 | 1773 | VM_BUG_ON(addr & ~HPAGE_MASK); |
3eabaee9 | 1774 | |
227be799 MS |
1775 | entry = pmd_mkyoung(entry); |
1776 | if (dirty) | |
1777 | entry = pmd_mkdirty(entry); | |
1778 | if (pmd_val(*pmdp) == pmd_val(entry)) | |
1779 | return 0; | |
1780 | pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry); | |
1781 | return 1; | |
3eabaee9 MS |
1782 | } |
1783 | ||
227be799 MS |
1784 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG |
1785 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
1786 | unsigned long addr, pmd_t *pmdp) | |
1787 | { | |
1788 | pmd_t pmd = *pmdp; | |
106c992a | 1789 | |
227be799 MS |
1790 | pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd)); |
1791 | return pmd_young(pmd); | |
1792 | } | |
106c992a | 1793 | |
227be799 MS |
1794 | #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH |
1795 | static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, | |
1796 | unsigned long addr, pmd_t *pmdp) | |
1797 | { | |
1798 | VM_BUG_ON(addr & ~HPAGE_MASK); | |
1799 | return pmdp_test_and_clear_young(vma, addr, pmdp); | |
1800 | } | |
106c992a | 1801 | |
106c992a GS |
1802 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
1803 | pmd_t *pmdp, pmd_t entry) | |
1804 | { | |
b8e3b379 | 1805 | set_pmd(pmdp, entry); |
106c992a GS |
1806 | } |
1807 | ||
1808 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | |
1809 | { | |
4a366f51 HC |
1810 | pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE)); |
1811 | pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG)); | |
1812 | return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); | |
1ae1c1d0 GS |
1813 | } |
1814 | ||
8809aa2d AK |
1815 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR |
1816 | static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, | |
227be799 | 1817 | unsigned long addr, pmd_t *pmdp) |
1ae1c1d0 | 1818 | { |
54397bb0 | 1819 | return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); |
1ae1c1d0 GS |
1820 | } |
1821 | ||
8809aa2d | 1822 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL |
93a98695 | 1823 | static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, |
227be799 | 1824 | unsigned long addr, |
8809aa2d | 1825 | pmd_t *pmdp, int full) |
fcbe08d6 | 1826 | { |
227be799 MS |
1827 | if (full) { |
1828 | pmd_t pmd = *pmdp; | |
b8e3b379 | 1829 | set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); |
227be799 MS |
1830 | return pmd; |
1831 | } | |
93a98695 | 1832 | return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); |
fcbe08d6 MS |
1833 | } |
1834 | ||
8809aa2d AK |
1835 | #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH |
1836 | static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, | |
227be799 | 1837 | unsigned long addr, pmd_t *pmdp) |
1ae1c1d0 | 1838 | { |
227be799 | 1839 | return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); |
1ae1c1d0 GS |
1840 | } |
1841 | ||
1842 | #define __HAVE_ARCH_PMDP_INVALIDATE | |
9c4563f1 | 1843 | static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma, |
227be799 | 1844 | unsigned long addr, pmd_t *pmdp) |
1ae1c1d0 | 1845 | { |
3a5a8d34 | 1846 | pmd_t pmd; |
91c575b3 | 1847 | |
3a5a8d34 RR |
1848 | VM_WARN_ON_ONCE(!pmd_present(*pmdp)); |
1849 | pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); | |
9c4563f1 | 1850 | return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd); |
1ae1c1d0 GS |
1851 | } |
1852 | ||
be328650 GS |
1853 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT |
1854 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |
227be799 | 1855 | unsigned long addr, pmd_t *pmdp) |
be328650 GS |
1856 | { |
1857 | pmd_t pmd = *pmdp; | |
1858 | ||
227be799 MS |
1859 | if (pmd_write(pmd)) |
1860 | pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd)); | |
be328650 GS |
1861 | } |
1862 | ||
f28b6ff8 AK |
1863 | static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, |
1864 | unsigned long address, | |
1865 | pmd_t *pmdp) | |
1866 | { | |
8809aa2d | 1867 | return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); |
f28b6ff8 AK |
1868 | } |
1869 | #define pmdp_collapse_flush pmdp_collapse_flush | |
1870 | ||
0f3bf303 | 1871 | #define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot)) |
1ae1c1d0 GS |
1872 | |
1873 | static inline int pmd_trans_huge(pmd_t pmd) | |
1874 | { | |
03e6db16 | 1875 | return pmd_leaf(pmd); |
1ae1c1d0 GS |
1876 | } |
1877 | ||
fd8cfd30 | 1878 | #define has_transparent_hugepage has_transparent_hugepage |
1ae1c1d0 GS |
1879 | static inline int has_transparent_hugepage(void) |
1880 | { | |
2e2ff71f | 1881 | return cpu_has_edat1() ? 1 : 0; |
1ae1c1d0 | 1882 | } |
75077afb GS |
1883 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
1884 | ||
1da177e4 | 1885 | /* |
1da177e4 LT |
1886 | * 64 bit swap entry format: |
1887 | * A page-table entry has some bits we have to treat in a special way. | |
8043d26c DH |
1888 | * Bits 54 and 63 are used to indicate the page type. Bit 53 marks the pte |
1889 | * as invalid. | |
a1c843b8 | 1890 | * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200 |
92cd58bd | 1891 | * | offset |E11XX|type |S0| |
a1c843b8 MS |
1892 | * |0000000000111111111122222222223333333333444444444455|55555|55566|66| |
1893 | * |0123456789012345678901234567890123456789012345678901|23456|78901|23| | |
8043d26c DH |
1894 | * |
1895 | * Bits 0-51 store the offset. | |
92cd58bd | 1896 | * Bit 52 (E) is used to remember PG_anon_exclusive. |
8043d26c DH |
1897 | * Bits 57-61 store the type. |
1898 | * Bit 62 (S) is used for softdirty tracking. | |
92cd58bd | 1899 | * Bits 55 and 56 (X) are unused. |
1da177e4 | 1900 | */ |
5a79859a | 1901 | |
a1c843b8 MS |
1902 | #define __SWP_OFFSET_MASK ((1UL << 52) - 1) |
1903 | #define __SWP_OFFSET_SHIFT 12 | |
1904 | #define __SWP_TYPE_MASK ((1UL << 5) - 1) | |
1905 | #define __SWP_TYPE_SHIFT 2 | |
5a79859a | 1906 | |
4448aaf0 | 1907 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) |
1da177e4 | 1908 | { |
4a366f51 | 1909 | unsigned long pteval; |
a1c843b8 | 1910 | |
4a366f51 HC |
1911 | pteval = _PAGE_INVALID | _PAGE_PROTECT; |
1912 | pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; | |
1913 | pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; | |
1914 | return __pte(pteval); | |
1da177e4 LT |
1915 | } |
1916 | ||
a1c843b8 MS |
1917 | static inline unsigned long __swp_type(swp_entry_t entry) |
1918 | { | |
1919 | return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK; | |
1920 | } | |
1921 | ||
1922 | static inline unsigned long __swp_offset(swp_entry_t entry) | |
1923 | { | |
1924 | return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK; | |
1925 | } | |
1926 | ||
1927 | static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) | |
1928 | { | |
1929 | return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) }; | |
1930 | } | |
1da177e4 LT |
1931 | |
1932 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
1933 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | |
1934 | ||
f934f6be GS |
1935 | /* |
1936 | * 64 bit swap entry format for REGION3 and SEGMENT table entries (RSTE) | |
1937 | * Bits 59 and 63 are used to indicate the swap entry. Bit 58 marks the rste | |
1938 | * as invalid. | |
1939 | * A swap entry is indicated by bit pattern (rste & 0x011) == 0x010 | |
1940 | * | offset |Xtype |11TT|S0| | |
1941 | * |0000000000111111111122222222223333333333444444444455|555555|5566|66| | |
1942 | * |0123456789012345678901234567890123456789012345678901|234567|8901|23| | |
1943 | * | |
1944 | * Bits 0-51 store the offset. | |
1945 | * Bits 53-57 store the type. | |
1946 | * Bit 62 (S) is used for softdirty tracking. | |
1947 | * Bits 60-61 (TT) indicate the table type: 0x01 for REGION3 and 0x00 for SEGMENT. | |
1948 | * Bit 52 (X) is unused. | |
1949 | */ | |
1950 | ||
1951 | #define __SWP_OFFSET_MASK_RSTE ((1UL << 52) - 1) | |
1952 | #define __SWP_OFFSET_SHIFT_RSTE 12 | |
1953 | #define __SWP_TYPE_MASK_RSTE ((1UL << 5) - 1) | |
1954 | #define __SWP_TYPE_SHIFT_RSTE 6 | |
1955 | ||
1956 | /* | |
1957 | * TT bits set to 0x00 == SEGMENT. For REGION3 entries, caller must add R3 | |
1958 | * bits 0x01. See also __set_huge_pte_at(). | |
1959 | */ | |
1960 | static inline unsigned long mk_swap_rste(unsigned long type, unsigned long offset) | |
1961 | { | |
1962 | unsigned long rste; | |
1963 | ||
1964 | rste = _RST_ENTRY_INVALID | _RST_ENTRY_COMM; | |
1965 | rste |= (offset & __SWP_OFFSET_MASK_RSTE) << __SWP_OFFSET_SHIFT_RSTE; | |
1966 | rste |= (type & __SWP_TYPE_MASK_RSTE) << __SWP_TYPE_SHIFT_RSTE; | |
1967 | return rste; | |
1968 | } | |
1969 | ||
1970 | static inline unsigned long __swp_type_rste(swp_entry_t entry) | |
1971 | { | |
1972 | return (entry.val >> __SWP_TYPE_SHIFT_RSTE) & __SWP_TYPE_MASK_RSTE; | |
1973 | } | |
1974 | ||
1975 | static inline unsigned long __swp_offset_rste(swp_entry_t entry) | |
1976 | { | |
1977 | return (entry.val >> __SWP_OFFSET_SHIFT_RSTE) & __SWP_OFFSET_MASK_RSTE; | |
1978 | } | |
1979 | ||
1980 | #define __rste_to_swp_entry(rste) ((swp_entry_t) { rste }) | |
1981 | ||
17f34580 | 1982 | extern int vmem_add_mapping(unsigned long start, unsigned long size); |
f05f62d0 | 1983 | extern void vmem_remove_mapping(unsigned long start, unsigned long size); |
4df29d2b AG |
1984 | extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc); |
1985 | extern int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot); | |
1986 | extern void vmem_unmap_4k_page(unsigned long addr); | |
2f0e8aae | 1987 | extern pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc); |
402b0862 | 1988 | extern int s390_enable_sie(void); |
3ac8e380 | 1989 | extern int s390_enable_skey(void); |
a13cff31 | 1990 | extern void s390_reset_cmma(struct mm_struct *mm); |
f4eb07c1 | 1991 | |
1f6b83e5 MS |
1992 | /* s390 has a private copy of get unmapped area to deal with cache synonyms */ |
1993 | #define HAVE_ARCH_UNMAPPED_AREA | |
1994 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | |
1995 | ||
1c2f7d14 AK |
1996 | #define pmd_pgtable(pmd) \ |
1997 | ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)) | |
1998 | ||
84b73876 CI |
1999 | static inline unsigned long gmap_pgste_get_pgt_addr(unsigned long *pgt) |
2000 | { | |
2001 | unsigned long *pgstes, res; | |
2002 | ||
2003 | pgstes = pgt + _PAGE_ENTRIES; | |
2004 | ||
2005 | res = (pgstes[0] & PGSTE_ST2_MASK) << 16; | |
2006 | res |= pgstes[1] & PGSTE_ST2_MASK; | |
2007 | res |= (pgstes[2] & PGSTE_ST2_MASK) >> 16; | |
2008 | res |= (pgstes[3] & PGSTE_ST2_MASK) >> 32; | |
2009 | ||
2010 | return res; | |
2011 | } | |
2012 | ||
1da177e4 | 2013 | #endif /* _S390_PAGE_H */ |