Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
3eeffb32 | 2 | * arch/sh/mm/tlb-flush_64.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | |
5 | * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes) | |
a1e20301 | 6 | * Copyright (C) 2003 - 2012 Paul Mundt |
1da177e4 | 7 | * |
3eeffb32 PM |
8 | * This file is subject to the terms and conditions of the GNU General Public |
9 | * License. See the file "COPYING" in the main directory of this archive | |
10 | * for more details. | |
1da177e4 | 11 | */ |
1da177e4 LT |
12 | #include <linux/signal.h> |
13 | #include <linux/rwsem.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/errno.h> | |
17 | #include <linux/string.h> | |
18 | #include <linux/types.h> | |
19 | #include <linux/ptrace.h> | |
20 | #include <linux/mman.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/smp.h> | |
cdd6c482 | 23 | #include <linux/perf_event.h> |
1da177e4 | 24 | #include <linux/interrupt.h> |
1da177e4 LT |
25 | #include <asm/io.h> |
26 | #include <asm/tlb.h> | |
7c0f6ba6 | 27 | #include <linux/uaccess.h> |
1da177e4 LT |
28 | #include <asm/pgalloc.h> |
29 | #include <asm/mmu_context.h> | |
1da177e4 | 30 | |
3eeffb32 | 31 | void local_flush_tlb_one(unsigned long asid, unsigned long page) |
1da177e4 LT |
32 | { |
33 | unsigned long long match, pteh=0, lpage; | |
34 | unsigned long tlb; | |
1da177e4 LT |
35 | |
36 | /* | |
37 | * Sign-extend based on neff. | |
38 | */ | |
c7914834 | 39 | lpage = neff_sign_extend(page); |
3eeffb32 | 40 | match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID; |
1da177e4 LT |
41 | match |= lpage; |
42 | ||
3eeffb32 PM |
43 | for_each_itlb_entry(tlb) { |
44 | asm volatile ("getcfg %1, 0, %0" | |
45 | : "=r" (pteh) | |
46 | : "r" (tlb) ); | |
1da177e4 | 47 | |
3eeffb32 PM |
48 | if (pteh == match) { |
49 | __flush_tlb_slot(tlb); | |
50 | break; | |
1da177e4 LT |
51 | } |
52 | } | |
53 | ||
1da177e4 LT |
54 | for_each_dtlb_entry(tlb) { |
55 | asm volatile ("getcfg %1, 0, %0" | |
56 | : "=r" (pteh) | |
57 | : "r" (tlb) ); | |
58 | ||
59 | if (pteh == match) { | |
60 | __flush_tlb_slot(tlb); | |
61 | break; | |
62 | } | |
63 | ||
64 | } | |
65 | } | |
66 | ||
3eeffb32 | 67 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
1da177e4 LT |
68 | { |
69 | unsigned long flags; | |
70 | ||
1da177e4 LT |
71 | if (vma->vm_mm) { |
72 | page &= PAGE_MASK; | |
73 | local_irq_save(flags); | |
3eeffb32 | 74 | local_flush_tlb_one(get_asid(), page); |
1da177e4 LT |
75 | local_irq_restore(flags); |
76 | } | |
77 | } | |
78 | ||
3eeffb32 PM |
79 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
80 | unsigned long end) | |
1da177e4 LT |
81 | { |
82 | unsigned long flags; | |
83 | unsigned long long match, pteh=0, pteh_epn, pteh_low; | |
84 | unsigned long tlb; | |
3eeffb32 | 85 | unsigned int cpu = smp_processor_id(); |
1da177e4 LT |
86 | struct mm_struct *mm; |
87 | ||
88 | mm = vma->vm_mm; | |
3eeffb32 | 89 | if (cpu_context(cpu, mm) == NO_CONTEXT) |
1da177e4 LT |
90 | return; |
91 | ||
92 | local_irq_save(flags); | |
93 | ||
94 | start &= PAGE_MASK; | |
95 | end &= PAGE_MASK; | |
96 | ||
3eeffb32 | 97 | match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID; |
1da177e4 LT |
98 | |
99 | /* Flush ITLB */ | |
100 | for_each_itlb_entry(tlb) { | |
101 | asm volatile ("getcfg %1, 0, %0" | |
102 | : "=r" (pteh) | |
103 | : "r" (tlb) ); | |
104 | ||
105 | pteh_epn = pteh & PAGE_MASK; | |
106 | pteh_low = pteh & ~PAGE_MASK; | |
107 | ||
108 | if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) | |
109 | __flush_tlb_slot(tlb); | |
110 | } | |
111 | ||
112 | /* Flush DTLB */ | |
113 | for_each_dtlb_entry(tlb) { | |
114 | asm volatile ("getcfg %1, 0, %0" | |
115 | : "=r" (pteh) | |
116 | : "r" (tlb) ); | |
117 | ||
118 | pteh_epn = pteh & PAGE_MASK; | |
119 | pteh_low = pteh & ~PAGE_MASK; | |
120 | ||
121 | if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) | |
122 | __flush_tlb_slot(tlb); | |
123 | } | |
124 | ||
125 | local_irq_restore(flags); | |
126 | } | |
127 | ||
3eeffb32 | 128 | void local_flush_tlb_mm(struct mm_struct *mm) |
1da177e4 LT |
129 | { |
130 | unsigned long flags; | |
3eeffb32 | 131 | unsigned int cpu = smp_processor_id(); |
1da177e4 | 132 | |
3eeffb32 | 133 | if (cpu_context(cpu, mm) == NO_CONTEXT) |
1da177e4 LT |
134 | return; |
135 | ||
136 | local_irq_save(flags); | |
137 | ||
3eeffb32 PM |
138 | cpu_context(cpu, mm) = NO_CONTEXT; |
139 | if (mm == current->mm) | |
140 | activate_context(mm, cpu); | |
1da177e4 LT |
141 | |
142 | local_irq_restore(flags); | |
1da177e4 LT |
143 | } |
144 | ||
3eeffb32 | 145 | void local_flush_tlb_all(void) |
1da177e4 LT |
146 | { |
147 | /* Invalidate all, including shared pages, excluding fixed TLBs */ | |
1da177e4 LT |
148 | unsigned long flags, tlb; |
149 | ||
1da177e4 LT |
150 | local_irq_save(flags); |
151 | ||
152 | /* Flush each ITLB entry */ | |
3eeffb32 | 153 | for_each_itlb_entry(tlb) |
1da177e4 | 154 | __flush_tlb_slot(tlb); |
1da177e4 LT |
155 | |
156 | /* Flush each DTLB entry */ | |
3eeffb32 | 157 | for_each_dtlb_entry(tlb) |
1da177e4 | 158 | __flush_tlb_slot(tlb); |
1da177e4 LT |
159 | |
160 | local_irq_restore(flags); | |
161 | } | |
162 | ||
3eeffb32 | 163 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) |
1da177e4 LT |
164 | { |
165 | /* FIXME: Optimize this later.. */ | |
166 | flush_tlb_all(); | |
167 | } | |
9cef7492 | 168 | |
59615ecd PM |
169 | void __flush_tlb_global(void) |
170 | { | |
171 | flush_tlb_all(); | |
172 | } |