Merge tag 'soc-ep93xx-dt-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-block.git] / arch / arm / include / asm / mmu_context.h
CommitLineData
d2912cb1 1/* SPDX-License-Identifier: GPL-2.0-only */
1da177e4 2/*
4baa9922 3 * arch/arm/include/asm/mmu_context.h
1da177e4
LT
4 *
5 * Copyright (C) 1996 Russell King.
6 *
1da177e4
LT
7 * Changelog:
8 * 27-06-1996 RMK Created
9 */
10#ifndef __ASM_ARM_MMU_CONTEXT_H
11#define __ASM_ARM_MMU_CONTEXT_H
12
8dc39b88 13#include <linux/compiler.h>
87c52578 14#include <linux/sched.h>
589ee628 15#include <linux/mm_types.h>
88f10e37 16#include <linux/preempt.h>
589ee628 17
4fe15ba0 18#include <asm/cacheflush.h>
46097c7d 19#include <asm/cachetype.h>
1da177e4 20#include <asm/proc-fns.h>
621a0147 21#include <asm/smp_plat.h>
f9d4861f 22#include <asm-generic/mm_hooks.h>
1da177e4 23
3e99675a 24void __check_vmalloc_seq(struct mm_struct *mm);
ff0daca5 25
d31e23af
AB
26#ifdef CONFIG_MMU
27static inline void check_vmalloc_seq(struct mm_struct *mm)
28{
29 if (!IS_ENABLED(CONFIG_ARM_LPAE) &&
30 unlikely(atomic_read(&mm->context.vmalloc_seq) !=
31 atomic_read(&init_mm.context.vmalloc_seq)))
32 __check_vmalloc_seq(mm);
33}
34#endif
35
516793c6 36#ifdef CONFIG_CPU_HAS_ASID
1da177e4 37
b5466f87 38void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
292f70d7
NP
39
40#define init_new_context init_new_context
7d74a5f0
AB
41static inline int
42init_new_context(struct task_struct *tsk, struct mm_struct *mm)
43{
44 atomic64_set(&mm->context.id, 0);
45 return 0;
46}
1da177e4 47
0d0752bc
MZ
48#ifdef CONFIG_ARM_ERRATA_798181
49void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
50 cpumask_t *mask);
51#else /* !CONFIG_ARM_ERRATA_798181 */
52static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
53 cpumask_t *mask)
54{
55}
56#endif /* CONFIG_ARM_ERRATA_798181 */
93dc6887 57
7fec1b57
CM
58#else /* !CONFIG_CPU_HAS_ASID */
59
b9d4d42a
CM
60#ifdef CONFIG_MMU
61
7fec1b57
CM
62static inline void check_and_switch_context(struct mm_struct *mm,
63 struct task_struct *tsk)
ff0daca5 64{
d31e23af 65 check_vmalloc_seq(mm);
b9d4d42a
CM
66
67 if (irqs_disabled())
68 /*
69 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
70 * high interrupt latencies, defer the call and continue
71 * running with the old mm. Since we only support UP systems
72 * on non-ASID CPUs, the old mm will remain valid until the
73 * finish_arch_post_lock_switch() call.
74 */
bdae73cd 75 mm->context.switch_pending = 1;
b9d4d42a
CM
76 else
77 cpu_switch_mm(mm->pgd, mm);
ff0daca5
RK
78}
79
ef0491ea 80#ifndef MODULE
b9d4d42a
CM
81#define finish_arch_post_lock_switch \
82 finish_arch_post_lock_switch
83static inline void finish_arch_post_lock_switch(void)
84{
bdae73cd
CM
85 struct mm_struct *mm = current->mm;
86
87 if (mm && mm->context.switch_pending) {
88 /*
89 * Preemption must be disabled during cpu_switch_mm() as we
90 * have some stateful cache flush implementations. Check
91 * switch_pending again in case we were preempted and the
92 * switch to this mm was already done.
93 */
94 preempt_disable();
95 if (mm->context.switch_pending) {
96 mm->context.switch_pending = 0;
97 cpu_switch_mm(mm->pgd, mm);
98 }
99 preempt_enable_no_resched();
b9d4d42a
CM
100 }
101}
ef0491ea 102#endif /* !MODULE */
1da177e4 103
b9d4d42a
CM
104#endif /* CONFIG_MMU */
105
7fec1b57 106#endif /* CONFIG_CPU_HAS_ASID */
1da177e4 107
b5466f87 108#define activate_mm(prev,next) switch_mm(prev, next, NULL)
1da177e4 109
1da177e4
LT
110/*
111 * This is the actual mm switch as far as the scheduler
112 * is concerned. No registers are touched. We avoid
113 * calling the CPU specific function when the mm hasn't
114 * actually changed.
115 */
116static inline void
117switch_mm(struct mm_struct *prev, struct mm_struct *next,
118 struct task_struct *tsk)
119{
002547b4 120#ifdef CONFIG_MMU
1da177e4
LT
121 unsigned int cpu = smp_processor_id();
122
621a0147
WD
123 /*
124 * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
125 * so check for possible thread migration and invalidate the I-cache
126 * if we're new to this CPU.
127 */
128 if (cache_ops_need_broadcast() &&
129 !cpumask_empty(mm_cpumask(next)) &&
56f8ba83 130 !cpumask_test_cpu(cpu, mm_cpumask(next)))
826cbdaf 131 __flush_icache_all();
621a0147 132
56f8ba83 133 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
7fec1b57 134 check_and_switch_context(next, tsk);
7e5e6e9a 135 if (cache_is_vivt())
56f8ba83 136 cpumask_clear_cpu(cpu, mm_cpumask(prev));
1da177e4 137 }
002547b4 138#endif
1da177e4
LT
139}
140
d31e23af
AB
141#ifdef CONFIG_VMAP_STACK
142static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
143{
144 if (mm != &init_mm)
145 check_vmalloc_seq(mm);
146}
147#define enter_lazy_tlb enter_lazy_tlb
148#endif
149
292f70d7 150#include <asm-generic/mmu_context.h>
1da177e4
LT
151
152#endif