mm: introduce common STRUCT_PAGE_MAX_SHIFT define
[linux-2.6-block.git] / arch / arm64 / include / asm / memory.h
CommitLineData
4f04d8f0
CM
1/*
2 * Based on arch/arm/include/asm/memory.h
3 *
4 * Copyright (C) 2000-2002 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * Note: this file should not be included by non-asm/.h files
20 */
21#ifndef __ASM_MEMORY_H
22#define __ASM_MEMORY_H
23
24#include <linux/compiler.h>
25#include <linux/const.h>
26#include <linux/types.h>
a7f8de16 27#include <asm/bug.h>
b6531456 28#include <asm/page-def.h>
4f04d8f0
CM
29#include <asm/sizes.h>
30
aa03c428
MR
31/*
32 * Size of the PCI I/O space. This must remain a power of two so that
33 * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses.
34 */
35#define PCI_IO_SIZE SZ_16M
36
3e1907d5
AB
37/*
38 * VMEMMAP_SIZE - allows the whole linear region to be covered by
39 * a struct page array
40 */
41#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
42
4f04d8f0 43/*
a13e3a5b 44 * PAGE_OFFSET - the virtual address of the start of the linear map (top
847264fb 45 * (VA_BITS - 1))
a13e3a5b 46 * KIMAGE_VADDR - the virtual address of the start of the kernel image
4f04d8f0 47 * VA_BITS - the maximum number of bits for virtual addresses.
127db024 48 * VA_START - the first kernel virtual address.
4f04d8f0 49 */
e41ceed0 50#define VA_BITS (CONFIG_ARM64_VA_BITS)
82cd5880
ND
51#define VA_START (UL(0xffffffffffffffff) - \
52 (UL(1) << VA_BITS) + 1)
53#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
54 (UL(1) << (VA_BITS - 1)) + 1)
f9040773
AB
55#define KIMAGE_VADDR (MODULES_END)
56#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
57#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE)
f80fb3a3 58#define MODULES_VSIZE (SZ_128M)
3e1907d5
AB
59#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE)
60#define PCI_IO_END (VMEMMAP_START - SZ_2M)
aa03c428
MR
61#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
62#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
4f04d8f0 63
28c72583
JM
64#define KERNEL_START _text
65#define KERNEL_END _end
66
f9040773 67/*
b02faed1
MR
68 * KASAN requires 1/8th of the kernel virtual address space for the shadow
69 * region. KASAN can bloat the stack significantly, so double the (minimum)
70 * stack size when KASAN is in use.
f9040773
AB
71 */
72#ifdef CONFIG_KASAN
917538e2
AK
73#define KASAN_SHADOW_SCALE_SHIFT 3
74#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
b02faed1 75#define KASAN_THREAD_SHIFT 1
f9040773
AB
76#else
77#define KASAN_SHADOW_SIZE (0)
b02faed1 78#define KASAN_THREAD_SHIFT 0
f9040773
AB
79#endif
80
b02faed1 81#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
e3067861
MR
82
83/*
84 * VMAP'd stacks are allocated at page granularity, so we must ensure that such
85 * stacks are a multiple of page size.
86 */
87#if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT)
88#define THREAD_SHIFT PAGE_SHIFT
89#else
90#define THREAD_SHIFT MIN_THREAD_SHIFT
91#endif
dbc9344a
MR
92
93#if THREAD_SHIFT >= PAGE_SHIFT
94#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
95#endif
96
97#define THREAD_SIZE (UL(1) << THREAD_SHIFT)
98
e3067861
MR
99/*
100 * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
101 * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
102 * assembly.
103 */
104#ifdef CONFIG_VMAP_STACK
105#define THREAD_ALIGN (2 * THREAD_SIZE)
106#else
107#define THREAD_ALIGN THREAD_SIZE
108#endif
109
f60ad4ed
MR
110#define IRQ_STACK_SIZE THREAD_SIZE
111
872d8327
MR
112#define OVERFLOW_STACK_SIZE SZ_4K
113
8018ba4e
MR
114/*
115 * Alignment of kernel segments (e.g. .text, .data).
116 */
117#if defined(CONFIG_DEBUG_ALIGN_RODATA)
118/*
119 * 4 KB granule: 1 level 2 entry
120 * 16 KB granule: 128 level 3 entries, with contiguous bit
121 * 64 KB granule: 32 level 3 entries, with contiguous bit
122 */
123#define SEGMENT_ALIGN SZ_2M
124#else
125/*
126 * 4 KB granule: 16 level 3 entries, with contiguous bit
127 * 16 KB granule: 4 level 3 entries, without contiguous bit
128 * 64 KB granule: 1 level 3 entry
129 */
130#define SEGMENT_ALIGN SZ_64K
131#endif
132
4f04d8f0
CM
133/*
134 * Memory types available.
135 */
136#define MT_DEVICE_nGnRnE 0
137#define MT_DEVICE_nGnRE 1
138#define MT_DEVICE_GRE 2
139#define MT_NORMAL_NC 3
140#define MT_NORMAL 4
8d446c86 141#define MT_NORMAL_WT 5
4f04d8f0 142
36311607
MZ
143/*
144 * Memory types for Stage-2 translation
145 */
146#define MT_S2_NORMAL 0xf
147#define MT_S2_DEVICE_nGnRE 0x1
148
e48d53a9
MZ
149/*
150 * Memory types for Stage-2 translation when ID_AA64MMFR2_EL1.FWB is 0001
151 * Stage-2 enforces Normal-WB and Device-nGnRE
152 */
153#define MT_S2_FWB_NORMAL 6
154#define MT_S2_FWB_DEVICE_nGnRE 1
155
324420bf
AB
156#ifdef CONFIG_ARM64_4K_PAGES
157#define IOREMAP_MAX_ORDER (PUD_SHIFT)
158#else
159#define IOREMAP_MAX_ORDER (PMD_SHIFT)
160#endif
161
a89dea58
AB
162#ifdef CONFIG_BLK_DEV_INITRD
163#define __early_init_dt_declare_initrd(__start, __end) \
164 do { \
165 initrd_start = (__start); \
166 initrd_end = (__end); \
167 } while (0)
168#endif
169
4f04d8f0
CM
170#ifndef __ASSEMBLY__
171
8439e62a 172#include <linux/bitops.h>
a92405f0
AB
173#include <linux/mmdebug.h>
174
020d044f 175extern s64 memstart_addr;
4f04d8f0 176/* PHYS_OFFSET - the physical address of the start of memory. */
a92405f0 177#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
a7f8de16 178
f80fb3a3
AB
179/* the virtual base of the kernel image (minus TEXT_OFFSET) */
180extern u64 kimage_vaddr;
181
a7f8de16
AB
182/* the offset between the kernel virtual and physical mappings */
183extern u64 kimage_voffset;
4f04d8f0 184
7ede8665
AP
185static inline unsigned long kaslr_offset(void)
186{
187 return kimage_vaddr - KIMAGE_VADDR;
188}
189
34ba2c42 190/*
a7f8de16 191 * Allow all memory at the discovery stage. We will clip it later.
34ba2c42 192 */
a7f8de16
AB
193#define MIN_MEMBLOCK_ADDR 0
194#define MAX_MEMBLOCK_ADDR U64_MAX
34ba2c42 195
4f04d8f0
CM
196/*
197 * PFNs are used to describe any physical page; this means
198 * PFN 0 == physical address 0.
199 *
200 * This is the PFN of the first RAM page in the kernel
201 * direct-mapped view. We assume this is the first page
202 * of RAM in the mem_map as well.
203 */
204#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
205
9e22eb61
LA
206/*
207 * Physical vs virtual RAM address space conversion. These are
208 * private definitions which should NOT be used outside memory.h
209 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
210 */
ec6d06ef
LA
211
212
213/*
214 * The linear kernel range starts in the middle of the virtual adddress
215 * space. Testing the top bit for the start of the region is a
216 * sufficient check.
217 */
218#define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1)))
219
220#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
221#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
222
223#define __virt_to_phys_nodebug(x) ({ \
9e22eb61 224 phys_addr_t __x = (phys_addr_t)(x); \
ec6d06ef
LA
225 __is_lm_address(__x) ? __lm_to_phys(__x) : \
226 __kimg_to_phys(__x); \
227})
228
229#define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x))
230
231#ifdef CONFIG_DEBUG_VIRTUAL
232extern phys_addr_t __virt_to_phys(unsigned long x);
233extern phys_addr_t __phys_addr_symbol(unsigned long x);
234#else
235#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
236#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
237#endif
9e22eb61
LA
238
239#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
240#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
241
242/*
243 * Convert a page to/from a physical address
244 */
245#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
246#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
247
4f04d8f0
CM
248/*
249 * Note: Drivers should NOT use these. They are the wrong
250 * translation for translating DMA addresses. Use the driver
251 * DMA support - see dma-mapping.h.
252 */
09a57239 253#define virt_to_phys virt_to_phys
4f04d8f0
CM
254static inline phys_addr_t virt_to_phys(const volatile void *x)
255{
256 return __virt_to_phys((unsigned long)(x));
257}
258
09a57239 259#define phys_to_virt phys_to_virt
4f04d8f0
CM
260static inline void *phys_to_virt(phys_addr_t x)
261{
262 return (void *)(__phys_to_virt(x));
263}
264
265/*
266 * Drivers should NOT use these either.
267 */
268#define __pa(x) __virt_to_phys((unsigned long)(x))
ec6d06ef
LA
269#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
270#define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x))
4f04d8f0
CM
271#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
272#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
869dcfd1 273#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
2077be67 274#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
4f04d8f0
CM
275
276/*
277 * virt_to_page(k) convert a _valid_ virtual address to struct page *
278 * virt_addr_valid(k) indicates whether a virtual address is valid
279 */
5fd6690c 280#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
4f04d8f0 281
9f287591 282#ifndef CONFIG_SPARSEMEM_VMEMMAP
4f04d8f0 283#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
ca219452 284#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
9f287591
AB
285#else
286#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
1c8a946b 287#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
9f287591
AB
288
289#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
290#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
4f04d8f0 291
ca219452 292#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
9f287591
AB
293 + PHYS_OFFSET) >> PAGE_SHIFT)
294#endif
4f04d8f0
CM
295#endif
296
ca219452
LA
297#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET)
298#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
299 _virt_addr_valid(kaddr))
300
4f04d8f0
CM
301#include <asm-generic/memory_model.h>
302
303#endif