Merge tag 'sched-core-2024-09-19' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / x86 / include / asm / page.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_PAGE_H
3#define _ASM_X86_PAGE_H
83a5101b 4
56cefcea
IM
5#include <linux/types.h>
6
11b7c7dc
JF
7#ifdef __KERNEL__
8
51c78eb3 9#include <asm/page_types.h>
83a5101b
JF
10
11#ifdef CONFIG_X86_64
11b7c7dc
JF
12#include <asm/page_64.h>
13#else
14#include <asm/page_32.h>
83a5101b
JF
15#endif /* CONFIG_X86_64 */
16
345b904c 17#ifndef __ASSEMBLY__
5f5192b9 18
345b904c
JF
19struct page;
20
0e691cf8
YL
21#include <linux/range.h>
22extern struct range pfn_mapped[];
23extern int nr_pfn_mapped;
24
f2f7abcb 25static inline void clear_user_page(void *page, unsigned long vaddr,
51c78eb3 26 struct page *pg)
345b904c
JF
27{
28 clear_page(page);
29}
30
f2f7abcb 31static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
51c78eb3 32 struct page *topage)
345b904c
JF
33{
34 copy_page(to, from);
35}
36
6bc56a4d
MWO
37#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
38 vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
345b904c 39
cf4fb15b 40#ifndef __pa
98fd5aee 41#define __pa(x) __phys_addr((unsigned long)(x))
cf4fb15b
YL
42#endif
43
af5c2bd1 44#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
98fd5aee
JF
45/* __pa_symbol should be used for C visible symbols.
46 This seems to be the official gcc blessed way to do such arithmetic. */
8fd49936
NK
47/*
48 * We need __phys_reloc_hide() here because gcc may assume that there is no
49 * overflow during __pa() calculation and can optimize it unexpectedly.
50 * Newer versions of gcc provide -fno-strict-overflow switch to handle this
51 * case properly. Once all supported versions of gcc understand it, we can
52 * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
53 */
7d74275d
AD
54#define __pa_symbol(x) \
55 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
98fd5aee 56
cf4fb15b 57#ifndef __va
98fd5aee 58#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
cf4fb15b 59#endif
98fd5aee
JF
60
61#define __boot_va(x) __va(x)
62#define __boot_pa(x) __pa(x)
63
af5c2bd1
VN
64/*
65 * virt_to_page(kaddr) returns a valid pointer if and only if
66 * virt_addr_valid(kaddr) returns true.
67 */
98fd5aee 68#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
af5c2bd1
VN
69extern bool __virt_addr_valid(unsigned long kaddr);
70#define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
98fd5aee 71
8e5647a7
MR
72static __always_inline void *pfn_to_kaddr(unsigned long pfn)
73{
74 return __va(pfn << PAGE_SHIFT);
75}
76
1fb85d06
AH
77static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
78{
79 return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
80}
81
82static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits)
83{
84 return __canonical_address(vaddr, vaddr_bits) == vaddr;
85}
86
345b904c
JF
87#endif /* __ASSEMBLY__ */
88
e62f4473 89#include <asm-generic/memory_model.h>
5b17e1cd 90#include <asm-generic/getorder.h>
e62f4473 91
fd8526ad 92#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
345b904c 93
11b7c7dc 94#endif /* __KERNEL__ */
1965aae3 95#endif /* _ASM_X86_PAGE_H */