Commit | Line | Data |
---|---|---|
615d6e87 DB |
1 | /* |
2 | * Copyright (C) 2014 Davidlohr Bueso. | |
3 | */ | |
4 | #include <linux/sched.h> | |
5 | #include <linux/mm.h> | |
6 | #include <linux/vmacache.h> | |
7 | ||
8 | /* | |
9 | * Flush vma caches for threads that share a given mm. | |
10 | * | |
11 | * The operation is safe because the caller holds the mmap_sem | |
12 | * exclusively and other threads accessing the vma cache will | |
13 | * have mmap_sem held at least for read, so no extra locking | |
14 | * is required to maintain the vma cache. | |
15 | */ | |
16 | void vmacache_flush_all(struct mm_struct *mm) | |
17 | { | |
18 | struct task_struct *g, *p; | |
19 | ||
f5f302e2 DB |
20 | count_vm_vmacache_event(VMACACHE_FULL_FLUSHES); |
21 | ||
6b4ebc3a DB |
22 | /* |
23 | * Single threaded tasks need not iterate the entire | |
24 | * list of process. We can avoid the flushing as well | |
25 | * since the mm's seqnum was increased and don't have | |
26 | * to worry about other threads' seqnum. Current's | |
27 | * flush will occur upon the next lookup. | |
28 | */ | |
29 | if (atomic_read(&mm->mm_users) == 1) | |
30 | return; | |
31 | ||
615d6e87 DB |
32 | rcu_read_lock(); |
33 | for_each_process_thread(g, p) { | |
34 | /* | |
35 | * Only flush the vmacache pointers as the | |
36 | * mm seqnum is already set and curr's will | |
37 | * be set upon invalidation when the next | |
38 | * lookup is done. | |
39 | */ | |
40 | if (mm == p->mm) | |
41 | vmacache_flush(p); | |
42 | } | |
43 | rcu_read_unlock(); | |
44 | } | |
45 | ||
46 | /* | |
47 | * This task may be accessing a foreign mm via (for example) | |
48 | * get_user_pages()->find_vma(). The vmacache is task-local and this | |
49 | * task's vmacache pertains to a different mm (ie, its own). There is | |
50 | * nothing we can do here. | |
51 | * | |
52 | * Also handle the case where a kernel thread has adopted this mm via use_mm(). | |
53 | * That kernel thread's vmacache is not applicable to this mm. | |
54 | */ | |
a2c1aad3 | 55 | static inline bool vmacache_valid_mm(struct mm_struct *mm) |
615d6e87 DB |
56 | { |
57 | return current->mm == mm && !(current->flags & PF_KTHREAD); | |
58 | } | |
59 | ||
60 | void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) | |
61 | { | |
62 | if (vmacache_valid_mm(newvma->vm_mm)) | |
63 | current->vmacache[VMACACHE_HASH(addr)] = newvma; | |
64 | } | |
65 | ||
66 | static bool vmacache_valid(struct mm_struct *mm) | |
67 | { | |
68 | struct task_struct *curr; | |
69 | ||
70 | if (!vmacache_valid_mm(mm)) | |
71 | return false; | |
72 | ||
73 | curr = current; | |
74 | if (mm->vmacache_seqnum != curr->vmacache_seqnum) { | |
75 | /* | |
76 | * First attempt will always be invalid, initialize | |
77 | * the new cache for this task here. | |
78 | */ | |
79 | curr->vmacache_seqnum = mm->vmacache_seqnum; | |
80 | vmacache_flush(curr); | |
81 | return false; | |
82 | } | |
83 | return true; | |
84 | } | |
85 | ||
86 | struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) | |
87 | { | |
88 | int i; | |
89 | ||
131ddc5c AD |
90 | count_vm_vmacache_event(VMACACHE_FIND_CALLS); |
91 | ||
615d6e87 DB |
92 | if (!vmacache_valid(mm)) |
93 | return NULL; | |
94 | ||
95 | for (i = 0; i < VMACACHE_SIZE; i++) { | |
96 | struct vm_area_struct *vma = current->vmacache[i]; | |
97 | ||
50f5aa8a LT |
98 | if (!vma) |
99 | continue; | |
100 | if (WARN_ON_ONCE(vma->vm_mm != mm)) | |
101 | break; | |
4f115147 DB |
102 | if (vma->vm_start <= addr && vma->vm_end > addr) { |
103 | count_vm_vmacache_event(VMACACHE_FIND_HITS); | |
615d6e87 | 104 | return vma; |
4f115147 | 105 | } |
615d6e87 DB |
106 | } |
107 | ||
108 | return NULL; | |
109 | } | |
110 | ||
111 | #ifndef CONFIG_MMU | |
112 | struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, | |
113 | unsigned long start, | |
114 | unsigned long end) | |
115 | { | |
116 | int i; | |
117 | ||
131ddc5c AD |
118 | count_vm_vmacache_event(VMACACHE_FIND_CALLS); |
119 | ||
615d6e87 DB |
120 | if (!vmacache_valid(mm)) |
121 | return NULL; | |
122 | ||
123 | for (i = 0; i < VMACACHE_SIZE; i++) { | |
124 | struct vm_area_struct *vma = current->vmacache[i]; | |
125 | ||
4f115147 DB |
126 | if (vma && vma->vm_start == start && vma->vm_end == end) { |
127 | count_vm_vmacache_event(VMACACHE_FIND_HITS); | |
615d6e87 | 128 | return vma; |
4f115147 | 129 | } |
615d6e87 DB |
130 | } |
131 | ||
132 | return NULL; | |
133 | } | |
134 | #endif |