Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_M32R_TLBFLUSH_H |
2 | #define _ASM_M32R_TLBFLUSH_H | |
3 | ||
1da177e4 LT |
4 | #include <asm/m32r.h> |
5 | ||
6 | /* | |
7 | * TLB flushing: | |
8 | * | |
9 | * - flush_tlb() flushes the current mm struct TLBs | |
10 | * - flush_tlb_all() flushes all processes TLBs | |
11 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | |
12 | * - flush_tlb_page(vma, vmaddr) flushes one page | |
13 | * - flush_tlb_range(vma, start, end) flushes a range of pages | |
14 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | |
15 | * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables | |
16 | */ | |
17 | ||
18 | extern void local_flush_tlb_all(void); | |
19 | extern void local_flush_tlb_mm(struct mm_struct *); | |
20 | extern void local_flush_tlb_page(struct vm_area_struct *, unsigned long); | |
21 | extern void local_flush_tlb_range(struct vm_area_struct *, unsigned long, | |
22 | unsigned long); | |
23 | ||
24 | #ifndef CONFIG_SMP | |
25 | #ifdef CONFIG_MMU | |
26 | #define flush_tlb_all() local_flush_tlb_all() | |
27 | #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) | |
28 | #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) | |
29 | #define flush_tlb_range(vma, start, end) \ | |
30 | local_flush_tlb_range(vma, start, end) | |
31 | #define flush_tlb_kernel_range(start, end) local_flush_tlb_all() | |
32 | #else /* CONFIG_MMU */ | |
33 | #define flush_tlb_all() do { } while (0) | |
34 | #define flush_tlb_mm(mm) do { } while (0) | |
35 | #define flush_tlb_page(vma, vmaddr) do { } while (0) | |
36 | #define flush_tlb_range(vma, start, end) do { } while (0) | |
37 | #endif /* CONFIG_MMU */ | |
38 | #else /* CONFIG_SMP */ | |
39 | extern void smp_flush_tlb_all(void); | |
40 | extern void smp_flush_tlb_mm(struct mm_struct *); | |
41 | extern void smp_flush_tlb_page(struct vm_area_struct *, unsigned long); | |
42 | extern void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, | |
43 | unsigned long); | |
44 | ||
45 | #define flush_tlb_all() smp_flush_tlb_all() | |
46 | #define flush_tlb_mm(mm) smp_flush_tlb_mm(mm) | |
47 | #define flush_tlb_page(vma, page) smp_flush_tlb_page(vma, page) | |
48 | #define flush_tlb_range(vma, start, end) \ | |
49 | smp_flush_tlb_range(vma, start, end) | |
50 | #define flush_tlb_kernel_range(start, end) smp_flush_tlb_all() | |
51 | #endif /* CONFIG_SMP */ | |
52 | ||
53 | static __inline__ void __flush_tlb_page(unsigned long page) | |
54 | { | |
55 | unsigned int tmpreg0, tmpreg1, tmpreg2; | |
56 | ||
57 | __asm__ __volatile__ ( | |
58 | "seth %0, #high(%4) \n\t" | |
59 | "st %3, @(%5, %0) \n\t" | |
60 | "ldi %1, #1 \n\t" | |
61 | "st %1, @(%6, %0) \n\t" | |
62 | "add3 %1, %0, %7 \n\t" | |
63 | ".fillinsn \n" | |
64 | "1: \n\t" | |
65 | "ld %2, @(%6, %0) \n\t" | |
66 | "bnez %2, 1b \n\t" | |
67 | "ld %0, @%1+ \n\t" | |
68 | "ld %1, @%1 \n\t" | |
69 | "st %2, @+%0 \n\t" | |
70 | "st %2, @+%1 \n\t" | |
71 | : "=&r" (tmpreg0), "=&r" (tmpreg1), "=&r" (tmpreg2) | |
72 | : "r" (page), "i" (MMU_REG_BASE), "i" (MSVA_offset), | |
73 | "i" (MTOP_offset), "i" (MIDXI_offset) | |
74 | : "memory" | |
75 | ); | |
76 | } | |
77 | ||
78 | static __inline__ void __flush_tlb_all(void) | |
79 | { | |
80 | unsigned int tmpreg0, tmpreg1; | |
81 | ||
82 | __asm__ __volatile__ ( | |
83 | "seth %0, #high(%2) \n\t" | |
84 | "or3 %0, %0, #low(%2) \n\t" | |
85 | "ldi %1, #0xc \n\t" | |
86 | "st %1, @%0 \n\t" | |
87 | ".fillinsn \n" | |
88 | "1: \n\t" | |
89 | "ld %1, @%0 \n\t" | |
90 | "bnez %1, 1b \n\t" | |
91 | : "=&r" (tmpreg0), "=&r" (tmpreg1) | |
92 | : "i" (MTOP) : "memory" | |
93 | ); | |
94 | } | |
95 | ||
96 | #define flush_tlb_pgtables(mm, start, end) do { } while (0) | |
97 | ||
98 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | |
99 | ||
100 | #endif /* _ASM_M32R_TLBFLUSH_H */ | |
101 |