Commit | Line | Data |
---|---|---|
b3901d54 CM |
1 | /* |
2 | * Based on arch/arm/include/asm/mmu_context.h | |
3 | * | |
4 | * Copyright (C) 1996 Russell King. | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | #ifndef __ASM_MMU_CONTEXT_H | |
20 | #define __ASM_MMU_CONTEXT_H | |
21 | ||
22 | #include <linux/compiler.h> | |
23 | #include <linux/sched.h> | |
24 | ||
25 | #include <asm/cacheflush.h> | |
26 | #include <asm/proc-fns.h> | |
27 | #include <asm-generic/mm_hooks.h> | |
28 | #include <asm/cputype.h> | |
29 | #include <asm/pgtable.h> | |
30 | ||
31 | #define MAX_ASID_BITS 16 | |
32 | ||
33 | extern unsigned int cpu_last_asid; | |
34 | ||
35 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); | |
36 | void __new_context(struct mm_struct *mm); | |
37 | ||
38 | /* | |
39 | * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. | |
40 | */ | |
41 | static inline void cpu_set_reserved_ttbr0(void) | |
42 | { | |
43 | unsigned long ttbr = page_to_phys(empty_zero_page); | |
44 | ||
45 | asm( | |
46 | " msr ttbr0_el1, %0 // set TTBR0\n" | |
47 | " isb" | |
48 | : | |
49 | : "r" (ttbr)); | |
50 | } | |
51 | ||
52 | static inline void switch_new_context(struct mm_struct *mm) | |
53 | { | |
54 | unsigned long flags; | |
55 | ||
56 | __new_context(mm); | |
57 | ||
58 | local_irq_save(flags); | |
59 | cpu_switch_mm(mm->pgd, mm); | |
60 | local_irq_restore(flags); | |
61 | } | |
62 | ||
63 | static inline void check_and_switch_context(struct mm_struct *mm, | |
64 | struct task_struct *tsk) | |
65 | { | |
66 | /* | |
67 | * Required during context switch to avoid speculative page table | |
68 | * walking with the wrong TTBR. | |
69 | */ | |
70 | cpu_set_reserved_ttbr0(); | |
71 | ||
72 | if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) | |
73 | /* | |
74 | * The ASID is from the current generation, just switch to the | |
75 | * new pgd. This condition is only true for calls from | |
76 | * context_switch() and interrupts are already disabled. | |
77 | */ | |
78 | cpu_switch_mm(mm->pgd, mm); | |
79 | else if (irqs_disabled()) | |
80 | /* | |
81 | * Defer the new ASID allocation until after the context | |
82 | * switch critical region since __new_context() cannot be | |
83 | * called with interrupts disabled. | |
84 | */ | |
85 | set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); | |
86 | else | |
87 | /* | |
88 | * That is a direct call to switch_mm() or activate_mm() with | |
89 | * interrupts enabled and a new context. | |
90 | */ | |
91 | switch_new_context(mm); | |
92 | } | |
93 | ||
94 | #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) | |
95 | #define destroy_context(mm) do { } while(0) | |
96 | ||
97 | #define finish_arch_post_lock_switch \ | |
98 | finish_arch_post_lock_switch | |
99 | static inline void finish_arch_post_lock_switch(void) | |
100 | { | |
101 | if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { | |
102 | struct mm_struct *mm = current->mm; | |
103 | unsigned long flags; | |
104 | ||
105 | __new_context(mm); | |
106 | ||
107 | local_irq_save(flags); | |
108 | cpu_switch_mm(mm->pgd, mm); | |
109 | local_irq_restore(flags); | |
110 | } | |
111 | } | |
112 | ||
113 | /* | |
114 | * This is called when "tsk" is about to enter lazy TLB mode. | |
115 | * | |
116 | * mm: describes the currently active mm context | |
117 | * tsk: task which is entering lazy tlb | |
118 | * cpu: cpu number which is entering lazy tlb | |
119 | * | |
120 | * tsk->mm will be NULL | |
121 | */ | |
122 | static inline void | |
123 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
124 | { | |
125 | } | |
126 | ||
127 | /* | |
128 | * This is the actual mm switch as far as the scheduler | |
129 | * is concerned. No registers are touched. We avoid | |
130 | * calling the CPU specific function when the mm hasn't | |
131 | * actually changed. | |
132 | */ | |
133 | static inline void | |
134 | switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
135 | struct task_struct *tsk) | |
136 | { | |
137 | unsigned int cpu = smp_processor_id(); | |
138 | ||
139 | #ifdef CONFIG_SMP | |
140 | /* check for possible thread migration */ | |
141 | if (!cpumask_empty(mm_cpumask(next)) && | |
142 | !cpumask_test_cpu(cpu, mm_cpumask(next))) | |
143 | __flush_icache_all(); | |
144 | #endif | |
145 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) | |
146 | check_and_switch_context(next, tsk); | |
147 | } | |
148 | ||
149 | #define deactivate_mm(tsk,mm) do { } while (0) | |
150 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) | |
151 | ||
152 | #endif |