Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _SPARC64_TLB_H |
2 | #define _SPARC64_TLB_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/swap.h> |
5 | #include <asm/pgalloc.h> | |
6 | #include <asm/tlbflush.h> | |
7 | #include <asm/mmu_context.h> | |
8 | ||
9 | #define TLB_BATCH_NR 192 | |
10 | ||
11 | /* | |
12 | * For UP we don't need to worry about TLB flush | |
13 | * and page free order so much.. | |
14 | */ | |
15 | #ifdef CONFIG_SMP | |
16 | #define FREE_PTE_NR 506 | |
17 | #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U) | |
18 | #else | |
19 | #define FREE_PTE_NR 1 | |
20 | #define tlb_fast_mode(bp) 1 | |
21 | #endif | |
22 | ||
23 | struct mmu_gather { | |
24 | struct mm_struct *mm; | |
25 | unsigned int pages_nr; | |
26 | unsigned int need_flush; | |
4d6ddfa9 | 27 | unsigned int fullmm; |
1da177e4 | 28 | unsigned int tlb_nr; |
1da177e4 LT |
29 | unsigned long vaddrs[TLB_BATCH_NR]; |
30 | struct page *pages[FREE_PTE_NR]; | |
31 | }; | |
32 | ||
33 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | |
34 | ||
35 | #ifdef CONFIG_SMP | |
36 | extern void smp_flush_tlb_pending(struct mm_struct *, | |
37 | unsigned long, unsigned long *); | |
38 | #endif | |
39 | ||
40 | extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *); | |
41 | extern void flush_tlb_pending(void); | |
42 | ||
43 | static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | |
44 | { | |
15a23ffa | 45 | struct mmu_gather *mp = &get_cpu_var(mmu_gathers); |
1da177e4 LT |
46 | |
47 | BUG_ON(mp->tlb_nr); | |
48 | ||
49 | mp->mm = mm; | |
50 | mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U; | |
4d6ddfa9 | 51 | mp->fullmm = full_mm_flush; |
1da177e4 LT |
52 | |
53 | return mp; | |
54 | } | |
55 | ||
56 | ||
57 | static inline void tlb_flush_mmu(struct mmu_gather *mp) | |
58 | { | |
59 | if (mp->need_flush) { | |
59871bcd HD |
60 | free_pages_and_swap_cache(mp->pages, mp->pages_nr); |
61 | mp->pages_nr = 0; | |
1da177e4 | 62 | mp->need_flush = 0; |
1da177e4 LT |
63 | } |
64 | ||
65 | } | |
66 | ||
67 | #ifdef CONFIG_SMP | |
68 | extern void smp_flush_tlb_mm(struct mm_struct *mm); | |
69 | #define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm) | |
70 | #else | |
71 | #define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT) | |
72 | #endif | |
73 | ||
74 | static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end) | |
75 | { | |
1da177e4 LT |
76 | tlb_flush_mmu(mp); |
77 | ||
62dbec78 | 78 | if (mp->fullmm) |
4d6ddfa9 | 79 | mp->fullmm = 0; |
62dbec78 | 80 | else |
1da177e4 LT |
81 | flush_tlb_pending(); |
82 | ||
83 | /* keep the page table cache within bounds */ | |
84 | check_pgt_cache(); | |
15a23ffa HD |
85 | |
86 | put_cpu_var(mmu_gathers); | |
1da177e4 LT |
87 | } |
88 | ||
1da177e4 LT |
89 | static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page) |
90 | { | |
1da177e4 LT |
91 | if (tlb_fast_mode(mp)) { |
92 | free_page_and_swap_cache(page); | |
93 | return; | |
94 | } | |
59871bcd | 95 | mp->need_flush = 1; |
1da177e4 LT |
96 | mp->pages[mp->pages_nr++] = page; |
97 | if (mp->pages_nr >= FREE_PTE_NR) | |
98 | tlb_flush_mmu(mp); | |
99 | } | |
100 | ||
101 | #define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0) | |
102 | #define pte_free_tlb(mp,ptepage) pte_free(ptepage) | |
103 | #define pmd_free_tlb(mp,pmdp) pmd_free(pmdp) | |
104 | #define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp) | |
105 | ||
106 | #define tlb_migrate_finish(mm) do { } while (0) | |
107 | #define tlb_start_vma(tlb, vma) do { } while (0) | |
108 | #define tlb_end_vma(tlb, vma) do { } while (0) | |
109 | ||
110 | #endif /* _SPARC64_TLB_H */ |