treewide: kzalloc() -> kcalloc()
[linux-2.6-block.git] / arch / arm64 / mm / context.c
CommitLineData
b3901d54
CM
1/*
2 * Based on arch/arm/mm/context.c
3 *
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
5aec715d 20#include <linux/bitops.h>
b3901d54 21#include <linux/sched.h>
5aec715d 22#include <linux/slab.h>
b3901d54 23#include <linux/mm.h>
b3901d54 24
5aec715d 25#include <asm/cpufeature.h>
b3901d54 26#include <asm/mmu_context.h>
13f417f3 27#include <asm/smp.h>
b3901d54 28#include <asm/tlbflush.h>
b3901d54 29
5aec715d
WD
30static u32 asid_bits;
31static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
b3901d54 32
5aec715d
WD
33static atomic64_t asid_generation;
34static unsigned long *asid_map;
b3901d54 35
5aec715d
WD
36static DEFINE_PER_CPU(atomic64_t, active_asids);
37static DEFINE_PER_CPU(u64, reserved_asids);
38static cpumask_t tlb_flush_pending;
b3901d54 39
5aec715d
WD
40#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
41#define ASID_FIRST_VERSION (1UL << asid_bits)
0c8ea531
WD
42
43#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
44#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
45#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
46#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
47#else
48#define NUM_USER_ASIDS (ASID_FIRST_VERSION)
49#define asid2idx(asid) ((asid) & ~ASID_MASK)
50#define idx2asid(idx) asid2idx(idx)
51#endif
5aec715d 52
038dc9c6
SP
53/* Get the ASIDBits supported by the current CPU */
54static u32 get_cpu_asid_bits(void)
55{
56 u32 asid;
1cc6ed90 57 int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
038dc9c6
SP
58 ID_AA64MMFR0_ASID_SHIFT);
59
60 switch (fld) {
61 default:
62 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
63 smp_processor_id(), fld);
64 /* Fallthrough */
65 case 0:
66 asid = 8;
67 break;
68 case 2:
69 asid = 16;
70 }
71
72 return asid;
73}
74
13f417f3
SP
75/* Check if the current cpu's ASIDBits is compatible with asid_bits */
76void verify_cpu_asid_bits(void)
77{
78 u32 asid = get_cpu_asid_bits();
79
80 if (asid < asid_bits) {
81 /*
82 * We cannot decrease the ASID size at runtime, so panic if we support
83 * fewer ASID bits than the boot CPU.
84 */
85 pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
86 smp_processor_id(), asid, asid_bits);
17eebd1a 87 cpu_panic_kernel();
13f417f3
SP
88 }
89}
90
5aec715d 91static void flush_context(unsigned int cpu)
b3901d54 92{
5aec715d
WD
93 int i;
94 u64 asid;
95
96 /* Update the list of reserved ASIDs and the ASID bitmap. */
97 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
98
5aec715d
WD
99 for_each_possible_cpu(i) {
100 asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
101 /*
102 * If this CPU has already been through a
103 * rollover, but hasn't run another task in
104 * the meantime, we must preserve its reserved
105 * ASID, as this is the only trace we have of
106 * the process it is still running.
107 */
108 if (asid == 0)
109 asid = per_cpu(reserved_asids, i);
0c8ea531 110 __set_bit(asid2idx(asid), asid_map);
5aec715d
WD
111 per_cpu(reserved_asids, i) = asid;
112 }
113
f81a3487
MR
114 /*
115 * Queue a TLB invalidation for each CPU to perform on next
116 * context-switch
117 */
5aec715d 118 cpumask_setall(&tlb_flush_pending);
b3901d54
CM
119}
120
0ebea808 121static bool check_update_reserved_asid(u64 asid, u64 newasid)
b3901d54 122{
5aec715d 123 int cpu;
0ebea808
WD
124 bool hit = false;
125
126 /*
127 * Iterate over the set of reserved ASIDs looking for a match.
128 * If we find one, then we can update our mm to use newasid
129 * (i.e. the same ASID in the current generation) but we can't
130 * exit the loop early, since we need to ensure that all copies
131 * of the old ASID are updated to reflect the mm. Failure to do
132 * so could result in us missing the reserved ASID in a future
133 * generation.
134 */
135 for_each_possible_cpu(cpu) {
136 if (per_cpu(reserved_asids, cpu) == asid) {
137 hit = true;
138 per_cpu(reserved_asids, cpu) = newasid;
139 }
140 }
141
142 return hit;
b3901d54
CM
143}
144
5aec715d 145static u64 new_context(struct mm_struct *mm, unsigned int cpu)
b3901d54 146{
5aec715d
WD
147 static u32 cur_idx = 1;
148 u64 asid = atomic64_read(&mm->context.id);
149 u64 generation = atomic64_read(&asid_generation);
b3901d54 150
5aec715d 151 if (asid != 0) {
0ebea808
WD
152 u64 newasid = generation | (asid & ~ASID_MASK);
153
b3901d54 154 /*
5aec715d
WD
155 * If our current ASID was active during a rollover, we
156 * can continue to use it and this was just a false alarm.
b3901d54 157 */
0ebea808
WD
158 if (check_update_reserved_asid(asid, newasid))
159 return newasid;
5aec715d
WD
160
161 /*
162 * We had a valid ASID in a previous life, so try to re-use
163 * it if possible.
164 */
0c8ea531 165 if (!__test_and_set_bit(asid2idx(asid), asid_map))
0ebea808 166 return newasid;
b3901d54 167 }
b3901d54
CM
168
169 /*
5aec715d 170 * Allocate a free ASID. If we can't find one, take a note of the
0c8ea531
WD
171 * currently active ASIDs and mark the TLBs as requiring flushes. We
172 * always count from ASID #2 (index 1), as we use ASID #0 when setting
173 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
174 * pairs.
b3901d54 175 */
5aec715d
WD
176 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
177 if (asid != NUM_USER_ASIDS)
178 goto set_asid;
179
180 /* We're out of ASIDs, so increment the global generation count */
181 generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
182 &asid_generation);
183 flush_context(cpu);
184
f7e0efc9 185 /* We have more ASIDs than CPUs, so this will always succeed */
5aec715d
WD
186 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
187
188set_asid:
189 __set_bit(asid, asid_map);
190 cur_idx = asid;
0c8ea531 191 return idx2asid(asid) | generation;
b3901d54
CM
192}
193
5aec715d 194void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
b3901d54 195{
5aec715d 196 unsigned long flags;
a8ffaaa0 197 u64 asid, old_active_asid;
5aec715d
WD
198
199 asid = atomic64_read(&mm->context.id);
b3901d54 200
565630d5 201 /*
3a33c760 202 * The memory ordering here is subtle.
a8ffaaa0
CM
203 * If our active_asids is non-zero and the ASID matches the current
204 * generation, then we update the active_asids entry with a relaxed
205 * cmpxchg. Racing with a concurrent rollover means that either:
3a33c760 206 *
a8ffaaa0 207 * - We get a zero back from the cmpxchg and end up waiting on the
3a33c760
WD
208 * lock. Taking the lock synchronises with the rollover and so
209 * we are forced to see the updated generation.
210 *
a8ffaaa0 211 * - We get a valid ASID back from the cmpxchg, which means the
3a33c760
WD
212 * relaxed xchg in flush_context will treat us as reserved
213 * because atomic RmWs are totally ordered for a given location.
565630d5 214 */
a8ffaaa0
CM
215 old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
216 if (old_active_asid &&
217 !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) &&
218 atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
219 old_active_asid, asid))
5aec715d
WD
220 goto switch_mm_fastpath;
221
222 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
223 /* Check that our ASID belongs to the current generation. */
224 asid = atomic64_read(&mm->context.id);
225 if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
226 asid = new_context(mm, cpu);
227 atomic64_set(&mm->context.id, asid);
228 }
565630d5 229
5aec715d
WD
230 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
231 local_flush_tlb_all();
b3901d54 232
5aec715d 233 atomic64_set(&per_cpu(active_asids, cpu), asid);
5aec715d 234 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
b3901d54 235
5aec715d 236switch_mm_fastpath:
a8e4c0a9
MZ
237
238 arm64_apply_bp_hardening();
239
39bc88e5
CM
240 /*
241 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
242 * emulating PAN.
243 */
244 if (!system_uses_ttbr0_pan())
245 cpu_switch_mm(mm->pgd, mm);
b3901d54
CM
246}
247
95e3de35
MZ
248/* Errata workaround post TTBRx_EL1 update. */
249asmlinkage void post_ttbr_update_workaround(void)
250{
251 asm(ALTERNATIVE("nop; nop; nop",
252 "ic iallu; dsb nsh; isb",
253 ARM64_WORKAROUND_CAVIUM_27456,
254 CONFIG_CAVIUM_ERRATUM_27456));
255}
256
5aec715d 257static int asids_init(void)
b3901d54 258{
038dc9c6 259 asid_bits = get_cpu_asid_bits();
f7e0efc9
JPB
260 /*
261 * Expect allocation after rollover to fail if we don't have at least
262 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
263 */
264 WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
5aec715d 265 atomic64_set(&asid_generation, ASID_FIRST_VERSION);
6396bb22 266 asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
5aec715d
WD
267 GFP_KERNEL);
268 if (!asid_map)
269 panic("Failed to allocate bitmap for %lu ASIDs\n",
270 NUM_USER_ASIDS);
271
272 pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
273 return 0;
b3901d54 274}
5aec715d 275early_initcall(asids_init);