Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/include/asm-arm/tlb.h | |
3 | * | |
4 | * Copyright (C) 2002 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * Experimentation shows that on a StrongARM, it appears to be faster | |
11 | * to use the "invalidate whole tlb" rather than "invalidate single | |
12 | * tlb" for this. | |
13 | * | |
14 | * This appears true for both the process fork+exit case, as well as | |
15 | * the munmap-large-area case. | |
16 | */ | |
17 | #ifndef __ASMARM_TLB_H | |
18 | #define __ASMARM_TLB_H | |
19 | ||
20 | #include <asm/cacheflush.h> | |
21 | #include <asm/tlbflush.h> | |
22 | #include <asm/pgalloc.h> | |
23 | ||
24 | /* | |
25 | * TLB handling. This allows us to remove pages from the page | |
26 | * tables, and efficiently handle the TLB issues. | |
27 | */ | |
28 | struct mmu_gather { | |
29 | struct mm_struct *mm; | |
30 | unsigned int freed; | |
31 | unsigned int fullmm; | |
32 | ||
33 | unsigned int flushes; | |
34 | unsigned int avoided_flushes; | |
35 | }; | |
36 | ||
37 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | |
38 | ||
39 | static inline struct mmu_gather * | |
40 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | |
41 | { | |
42 | int cpu = smp_processor_id(); | |
43 | struct mmu_gather *tlb = &per_cpu(mmu_gathers, cpu); | |
44 | ||
45 | tlb->mm = mm; | |
46 | tlb->freed = 0; | |
47 | tlb->fullmm = full_mm_flush; | |
48 | ||
49 | return tlb; | |
50 | } | |
51 | ||
52 | static inline void | |
53 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | |
54 | { | |
55 | struct mm_struct *mm = tlb->mm; | |
56 | unsigned long freed = tlb->freed; | |
57 | int rss = get_mm_counter(mm, rss); | |
58 | ||
59 | if (rss < freed) | |
60 | freed = rss; | |
61 | add_mm_counter(mm, rss, -freed); | |
62 | ||
63 | if (tlb->fullmm) | |
64 | flush_tlb_mm(mm); | |
65 | ||
66 | /* keep the page table cache within bounds */ | |
67 | check_pgt_cache(); | |
68 | } | |
69 | ||
70 | static inline unsigned int tlb_is_full_mm(struct mmu_gather *tlb) | |
71 | { | |
72 | return tlb->fullmm; | |
73 | } | |
74 | ||
75 | #define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0) | |
76 | ||
77 | /* | |
78 | * In the case of tlb vma handling, we can optimise these away in the | |
79 | * case where we're doing a full MM flush. When we're doing a munmap, | |
80 | * the vmas are adjusted to only cover the region to be torn down. | |
81 | */ | |
82 | static inline void | |
83 | tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |
84 | { | |
85 | if (!tlb->fullmm) | |
86 | flush_cache_range(vma, vma->vm_start, vma->vm_end); | |
87 | } | |
88 | ||
89 | static inline void | |
90 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |
91 | { | |
92 | if (!tlb->fullmm) | |
93 | flush_tlb_range(vma, vma->vm_start, vma->vm_end); | |
94 | } | |
95 | ||
96 | #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) | |
97 | #define pte_free_tlb(tlb,ptep) pte_free(ptep) | |
98 | #define pmd_free_tlb(tlb,pmdp) pmd_free(pmdp) | |
99 | ||
100 | #define tlb_migrate_finish(mm) do { } while (0) | |
101 | ||
102 | #endif |