Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _PARISC_TLBFLUSH_H |
2 | #define _PARISC_TLBFLUSH_H | |
3 | ||
4 | /* TLB flushing routines.... */ | |
5 | ||
1da177e4 LT |
6 | #include <linux/mm.h> |
7 | #include <asm/mmu_context.h> | |
8 | ||
04d472dc GG |
9 | |
10 | /* This is for the serialisation of PxTLB broadcasts. At least on the | |
11 | * N class systems, only one PxTLB inter processor broadcast can be | |
12 | * active at any one time on the Merced bus. This tlb purge | |
13 | * synchronisation is fairly lightweight and harmless so we activate | |
29a622dd MW |
14 | * it on all SMP systems not just the N class. We also need to have |
15 | * preemption disabled on uniprocessor machines, and spin_lock does that | |
16 | * nicely. | |
17 | */ | |
04d472dc GG |
18 | extern spinlock_t pa_tlb_lock; |
19 | ||
20 | #define purge_tlb_start(x) spin_lock(&pa_tlb_lock) | |
21 | #define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) | |
22 | ||
1da177e4 | 23 | extern void flush_tlb_all(void); |
1b2425e3 | 24 | extern void flush_tlb_all_local(void *); |
1da177e4 LT |
25 | |
26 | /* | |
27 | * flush_tlb_mm() | |
28 | * | |
29 | * XXX This code is NOT valid for HP-UX compatibility processes, | |
30 | * (although it will probably work 99% of the time). HP-UX | |
31 | * processes are free to play with the space id's and save them | |
32 | * over long periods of time, etc. so we have to preserve the | |
33 | * space and just flush the entire tlb. We need to check the | |
34 | * personality in order to do that, but the personality is not | |
35 | * currently being set correctly. | |
36 | * | |
37 | * Of course, Linux processes could do the same thing, but | |
38 | * we don't support that (and the compilers, dynamic linker, | |
39 | * etc. do not do that). | |
40 | */ | |
41 | ||
592ac93a RC |
42 | static inline void flush_tlb_mm(struct mm_struct *mm) |
43 | { | |
04532c4f KM |
44 | BUG_ON(mm == &init_mm); /* Should never happen */ |
45 | ||
46 | #ifdef CONFIG_SMP | |
47 | flush_tlb_all(); | |
48 | #else | |
49 | if (mm) { | |
50 | if (mm->context != 0) | |
51 | free_sid(mm->context); | |
52 | mm->context = alloc_sid(); | |
53 | if (mm == current->active_mm) | |
54 | load_context(mm->context); | |
55 | } | |
56 | #endif | |
1da177e4 LT |
57 | } |
58 | ||
59 | extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) | |
60 | { | |
61 | } | |
62 | ||
63 | static inline void flush_tlb_page(struct vm_area_struct *vma, | |
64 | unsigned long addr) | |
65 | { | |
66 | /* For one page, it's not worth testing the split_tlb variable */ | |
67 | ||
68 | mb(); | |
69 | mtsp(vma->vm_mm->context,1); | |
70 | purge_tlb_start(); | |
71 | pdtlb(addr); | |
72 | pitlb(addr); | |
73 | purge_tlb_end(); | |
74 | } | |
75 | ||
d6ce8626 RC |
76 | void __flush_tlb_range(unsigned long sid, |
77 | unsigned long start, unsigned long end); | |
1da177e4 | 78 | |
d6ce8626 | 79 | #define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end) |
1da177e4 | 80 | |
d6ce8626 | 81 | #define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end) |
1da177e4 LT |
82 | |
83 | #endif |