2 * linux/arch/arm/mm/context.c
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 * Copyright (C) 2012 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/init.h>
14 #include <linux/sched.h>
16 #include <linux/smp.h>
17 #include <linux/percpu.h>
19 #include <asm/mmu_context.h>
20 #include <asm/smp_plat.h>
21 #include <asm/thread_notify.h>
22 #include <asm/tlbflush.h>
25 * On ARMv6, we have the following structure in the Context ID:
28 * +-------------------------+-----------+
29 * | process ID | ASID |
30 * +-------------------------+-----------+
32 * +-------------------------------------+
34 * The ASID is used to tag entries in the CPU caches and TLBs.
35 * The context ID is used by debuggers and trace logic, and
36 * should be unique within all running processes.
38 #define ASID_FIRST_VERSION (1ULL << ASID_BITS)
40 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
41 static atomic64_t cpu_last_asid = ATOMIC64_INIT(ASID_FIRST_VERSION);
43 static DEFINE_PER_CPU(atomic64_t, active_asids);
44 static DEFINE_PER_CPU(u64, reserved_asids);
45 static cpumask_t tlb_flush_pending;
47 #ifdef CONFIG_ARM_LPAE
48 static void cpu_set_reserved_ttbr0(void)
50 unsigned long ttbl = __pa(swapper_pg_dir);
51 unsigned long ttbh = 0;
54 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
58 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
60 : "r" (ttbl), "r" (ttbh));
64 static void cpu_set_reserved_ttbr0(void)
67 /* Copy TTBR1 into TTBR0 */
69 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
70 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
76 #ifdef CONFIG_PID_IN_CONTEXTIDR
77 static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
82 struct thread_info *thread = t;
84 if (cmd != THREAD_NOTIFY_SWITCH)
87 pid = task_pid_nr(thread->task) << ASID_BITS;
89 " mrc p15, 0, %0, c13, c0, 1\n"
92 " mcr p15, 0, %0, c13, c0, 1\n"
93 : "=r" (contextidr), "+r" (pid)
100 static struct notifier_block contextidr_notifier_block = {
101 .notifier_call = contextidr_notifier,
104 static int __init contextidr_notifier_init(void)
106 return thread_register_notifier(&contextidr_notifier_block);
108 arch_initcall(contextidr_notifier_init);
111 static void flush_context(unsigned int cpu)
115 /* Update the list of reserved ASIDs. */
116 for_each_possible_cpu(i)
117 per_cpu(reserved_asids, i) =
118 atomic64_xchg(&per_cpu(active_asids, i), 0);
119 per_cpu(reserved_asids, cpu) = 0;
121 /* Queue a TLB invalidate and flush the I-cache if necessary. */
122 if (!tlb_ops_need_broadcast())
123 cpumask_set_cpu(cpu, &tlb_flush_pending);
125 cpumask_setall(&tlb_flush_pending);
127 if (icache_is_vivt_asid_tagged())
128 __flush_icache_all();
131 static int is_reserved_asid(u64 asid, u64 mask)
134 for_each_possible_cpu(cpu)
135 if ((per_cpu(reserved_asids, cpu) & mask) == (asid & mask))
140 static void new_context(struct mm_struct *mm, unsigned int cpu)
142 u64 asid = mm->context.id;
144 if (asid != 0 && is_reserved_asid(asid, ULLONG_MAX)) {
146 * Our current ASID was active during a rollover, we can
147 * continue to use it and this was just a false alarm.
149 asid = (atomic64_read(&cpu_last_asid) & ASID_MASK) | \
153 * Allocate a free ASID. If we can't find one, take a
154 * note of the currently active ASIDs and mark the TLBs
155 * as requiring flushes.
158 asid = atomic64_inc_return(&cpu_last_asid);
159 if ((asid & ~ASID_MASK) == 0)
161 } while (is_reserved_asid(asid, ~ASID_MASK));
162 cpumask_clear(mm_cpumask(mm));
165 mm->context.id = asid;
168 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
171 unsigned int cpu = smp_processor_id();
173 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
177 * Required during context switch to avoid speculative page table
178 * walking with the wrong TTBR.
180 cpu_set_reserved_ttbr0();
182 if (!((mm->context.id ^ atomic64_read(&cpu_last_asid)) >> ASID_BITS)
183 && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
184 goto switch_mm_fastpath;
186 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
187 /* Check that our ASID belongs to the current generation. */
188 if ((mm->context.id ^ atomic64_read(&cpu_last_asid)) >> ASID_BITS)
189 new_context(mm, cpu);
191 atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
192 cpumask_set_cpu(cpu, mm_cpumask(mm));
194 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
195 local_flush_tlb_all();
196 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
199 cpu_switch_mm(mm->pgd, mm);