powerpc/mm/hash64: Map all the kernel regions in the same 0xc range
[linux-2.6-block.git] / arch / powerpc / mm / init-common.c
CommitLineData
9b081e10
CL
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 *
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
11 *
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#undef DEBUG
23
24#include <linux/string.h>
25#include <asm/pgalloc.h>
26#include <asm/pgtable.h>
69795cab
CL
27#include <asm/kup.h>
28
0fb1c25a 29static bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
de78a9c4 30static bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
0fb1c25a
CL
31
32static int __init parse_nosmep(char *p)
33{
34 disable_kuep = true;
35 pr_warn("Disabling Kernel Userspace Execution Prevention\n");
36 return 0;
37}
38early_param("nosmep", parse_nosmep);
39
de78a9c4
CL
40static int __init parse_nosmap(char *p)
41{
42 disable_kuap = true;
43 pr_warn("Disabling Kernel Userspace Access Protection\n");
44 return 0;
45}
46early_param("nosmap", parse_nosmap);
47
b28c9750 48void setup_kup(void)
69795cab 49{
0fb1c25a 50 setup_kuep(disable_kuep);
de78a9c4 51 setup_kuap(disable_kuap);
69795cab 52}
9b081e10 53
1e03c7e2
CL
54#define CTOR(shift) static void ctor_##shift(void *addr) \
55{ \
56 memset(addr, 0, sizeof(void *) << (shift)); \
9b081e10
CL
57}
58
1e03c7e2
CL
59CTOR(0); CTOR(1); CTOR(2); CTOR(3); CTOR(4); CTOR(5); CTOR(6); CTOR(7);
60CTOR(8); CTOR(9); CTOR(10); CTOR(11); CTOR(12); CTOR(13); CTOR(14); CTOR(15);
9b081e10 61
1e03c7e2 62static inline void (*ctor(int shift))(void *)
9b081e10 63{
1e03c7e2
CL
64 BUILD_BUG_ON(MAX_PGTABLE_INDEX_SIZE != 15);
65
66 switch (shift) {
67 case 0: return ctor_0;
68 case 1: return ctor_1;
69 case 2: return ctor_2;
70 case 3: return ctor_3;
71 case 4: return ctor_4;
72 case 5: return ctor_5;
73 case 6: return ctor_6;
74 case 7: return ctor_7;
75 case 8: return ctor_8;
76 case 9: return ctor_9;
77 case 10: return ctor_10;
78 case 11: return ctor_11;
79 case 12: return ctor_12;
80 case 13: return ctor_13;
81 case 14: return ctor_14;
82 case 15: return ctor_15;
83 }
84 return NULL;
9b081e10
CL
85}
86
129dd323 87struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE + 1];
ba9b399a 88EXPORT_SYMBOL_GPL(pgtable_cache); /* used by kvm_hv module */
9b081e10
CL
89
90/*
91 * Create a kmem_cache() for pagetables. This is not used for PTE
92 * pages - they're linked to struct page, come from the normal free
93 * pages pool and have a different entry size (see real_pte_t) to
94 * everything else. Caches created by this function are used for all
95 * the higher level pagetables, and for hugepage pagetables.
96 */
1e03c7e2 97void pgtable_cache_add(unsigned int shift)
9b081e10
CL
98{
99 char *name;
100 unsigned long table_size = sizeof(void *) << shift;
101 unsigned long align = table_size;
102
103 /* When batching pgtable pointers for RCU freeing, we store
104 * the index size in the low bits. Table alignment must be
105 * big enough to fit it.
106 *
107 * Likewise, hugeapge pagetable pointers contain a (different)
108 * shift value in the low bits. All tables must be aligned so
109 * as to leave enough 0 bits in the address to contain it. */
110 unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
111 HUGEPD_SHIFT_MASK + 1);
112 struct kmem_cache *new;
113
114 /* It would be nice if this was a BUILD_BUG_ON(), but at the
115 * moment, gcc doesn't seem to recognize is_power_of_2 as a
116 * constant expression, so so much for that. */
117 BUG_ON(!is_power_of_2(minalign));
129dd323 118 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
9b081e10
CL
119
120 if (PGT_CACHE(shift))
121 return; /* Already have a cache of this size */
122
123 align = max_t(unsigned long, align, minalign);
124 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
1e03c7e2 125 new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
bf5ca68d
NP
126 if (!new)
127 panic("Could not allocate pgtable cache for order %d", shift);
128
9b081e10 129 kfree(name);
129dd323 130 pgtable_cache[shift] = new;
bf5ca68d 131
9b081e10
CL
132 pr_debug("Allocated pgtable cache for order %d\n", shift);
133}
ba9b399a 134EXPORT_SYMBOL_GPL(pgtable_cache_add); /* used by kvm_hv module */
9b081e10
CL
135
136void pgtable_cache_init(void)
137{
1e03c7e2 138 pgtable_cache_add(PGD_INDEX_SIZE);
9b081e10 139
32bff4b9 140 if (PMD_CACHE_INDEX)
1e03c7e2 141 pgtable_cache_add(PMD_CACHE_INDEX);
9b081e10
CL
142 /*
143 * In all current configs, when the PUD index exists it's the
144 * same size as either the pgd or pmd index except with THP enabled
145 * on book3s 64
146 */
32bff4b9 147 if (PUD_CACHE_INDEX)
1e03c7e2 148 pgtable_cache_add(PUD_CACHE_INDEX);
9b081e10 149}