Commit | Line | Data |
---|---|---|
06e0ffa6 MH |
1 | /* |
2 | * Copyright(c) 2016 Intel Corporation. | |
3 | * | |
4 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
5 | * redistributing this file, you may do so under either license. | |
6 | * | |
7 | * GPL LICENSE SUMMARY | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of version 2 of the GNU General Public License as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * General Public License for more details. | |
17 | * | |
18 | * BSD LICENSE | |
19 | * | |
20 | * Redistribution and use in source and binary forms, with or without | |
21 | * modification, are permitted provided that the following conditions | |
22 | * are met: | |
23 | * | |
24 | * - Redistributions of source code must retain the above copyright | |
25 | * notice, this list of conditions and the following disclaimer. | |
26 | * - Redistributions in binary form must reproduce the above copyright | |
27 | * notice, this list of conditions and the following disclaimer in | |
28 | * the documentation and/or other materials provided with the | |
29 | * distribution. | |
30 | * - Neither the name of Intel Corporation nor the names of its | |
31 | * contributors may be used to endorse or promote products derived | |
32 | * from this software without specific prior written permission. | |
33 | * | |
34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
45 | * | |
46 | */ | |
47 | #include <linux/list.h> | |
48 | #include <linux/mmu_notifier.h> | |
df5a00f8 | 49 | #include <linux/interval_tree_generic.h> |
06e0ffa6 MH |
50 | |
51 | #include "mmu_rb.h" | |
52 | #include "trace.h" | |
53 | ||
54 | struct mmu_rb_handler { | |
55 | struct list_head list; | |
56 | struct mmu_notifier mn; | |
57 | struct rb_root *root; | |
58 | spinlock_t lock; /* protect the RB tree */ | |
59 | struct mmu_rb_ops *ops; | |
60 | }; | |
61 | ||
62 | static LIST_HEAD(mmu_rb_handlers); | |
63 | static DEFINE_SPINLOCK(mmu_rb_lock); /* protect mmu_rb_handlers list */ | |
64 | ||
df5a00f8 MH |
65 | static unsigned long mmu_node_start(struct mmu_rb_node *); |
66 | static unsigned long mmu_node_last(struct mmu_rb_node *); | |
06e0ffa6 MH |
67 | static struct mmu_rb_handler *find_mmu_handler(struct rb_root *); |
68 | static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *, | |
69 | unsigned long); | |
70 | static inline void mmu_notifier_range_start(struct mmu_notifier *, | |
71 | struct mm_struct *, | |
72 | unsigned long, unsigned long); | |
73 | static void mmu_notifier_mem_invalidate(struct mmu_notifier *, | |
f19bd643 | 74 | struct mm_struct *, |
06e0ffa6 MH |
75 | unsigned long, unsigned long); |
76 | static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, | |
77 | unsigned long, unsigned long); | |
78 | ||
79 | static struct mmu_notifier_ops mn_opts = { | |
80 | .invalidate_page = mmu_notifier_page, | |
81 | .invalidate_range_start = mmu_notifier_range_start, | |
82 | }; | |
83 | ||
df5a00f8 MH |
84 | INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last, |
85 | mmu_node_start, mmu_node_last, static, __mmu_int_rb); | |
86 | ||
87 | static unsigned long mmu_node_start(struct mmu_rb_node *node) | |
88 | { | |
89 | return node->addr & PAGE_MASK; | |
90 | } | |
91 | ||
92 | static unsigned long mmu_node_last(struct mmu_rb_node *node) | |
93 | { | |
a4898760 | 94 | return PAGE_ALIGN((node->addr & PAGE_MASK) + node->len) - 1; |
df5a00f8 MH |
95 | } |
96 | ||
06e0ffa6 MH |
97 | int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) |
98 | { | |
99 | struct mmu_rb_handler *handlr; | |
c81e1f64 | 100 | unsigned long flags; |
06e0ffa6 | 101 | |
b8718e2e | 102 | if (!ops->invalidate) |
06e0ffa6 MH |
103 | return -EINVAL; |
104 | ||
105 | handlr = kmalloc(sizeof(*handlr), GFP_KERNEL); | |
106 | if (!handlr) | |
107 | return -ENOMEM; | |
108 | ||
109 | handlr->root = root; | |
110 | handlr->ops = ops; | |
111 | INIT_HLIST_NODE(&handlr->mn.hlist); | |
112 | spin_lock_init(&handlr->lock); | |
113 | handlr->mn.ops = &mn_opts; | |
c81e1f64 | 114 | spin_lock_irqsave(&mmu_rb_lock, flags); |
06e0ffa6 | 115 | list_add_tail(&handlr->list, &mmu_rb_handlers); |
c81e1f64 | 116 | spin_unlock_irqrestore(&mmu_rb_lock, flags); |
06e0ffa6 MH |
117 | |
118 | return mmu_notifier_register(&handlr->mn, current->mm); | |
119 | } | |
120 | ||
121 | void hfi1_mmu_rb_unregister(struct rb_root *root) | |
122 | { | |
123 | struct mmu_rb_handler *handler = find_mmu_handler(root); | |
c81e1f64 | 124 | unsigned long flags; |
06e0ffa6 | 125 | |
4b00d949 MH |
126 | if (!handler) |
127 | return; | |
128 | ||
c81e1f64 | 129 | spin_lock_irqsave(&mmu_rb_lock, flags); |
06e0ffa6 | 130 | list_del(&handler->list); |
c81e1f64 | 131 | spin_unlock_irqrestore(&mmu_rb_lock, flags); |
06e0ffa6 MH |
132 | |
133 | if (!RB_EMPTY_ROOT(root)) { | |
134 | struct rb_node *node; | |
135 | struct mmu_rb_node *rbnode; | |
136 | ||
137 | while ((node = rb_first(root))) { | |
138 | rbnode = rb_entry(node, struct mmu_rb_node, node); | |
eef9c896 | 139 | rb_erase(node, root); |
06e0ffa6 | 140 | if (handler->ops->remove) |
f19bd643 | 141 | handler->ops->remove(root, rbnode, NULL); |
06e0ffa6 MH |
142 | } |
143 | } | |
144 | ||
145 | if (current->mm) | |
146 | mmu_notifier_unregister(&handler->mn, current->mm); | |
147 | kfree(handler); | |
148 | } | |
149 | ||
150 | int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode) | |
151 | { | |
06e0ffa6 | 152 | struct mmu_rb_handler *handler = find_mmu_handler(root); |
df5a00f8 | 153 | struct mmu_rb_node *node; |
c81e1f64 | 154 | unsigned long flags; |
df5a00f8 | 155 | int ret = 0; |
06e0ffa6 MH |
156 | |
157 | if (!handler) | |
158 | return -EINVAL; | |
159 | ||
c81e1f64 | 160 | spin_lock_irqsave(&handler->lock, flags); |
353b71c7 MH |
161 | hfi1_cdbg(MMU, "Inserting node addr 0x%llx, len %u", mnode->addr, |
162 | mnode->len); | |
df5a00f8 MH |
163 | node = __mmu_rb_search(handler, mnode->addr, mnode->len); |
164 | if (node) { | |
165 | ret = -EINVAL; | |
166 | goto unlock; | |
06e0ffa6 | 167 | } |
df5a00f8 | 168 | __mmu_int_rb_insert(mnode, root); |
06e0ffa6 MH |
169 | |
170 | if (handler->ops->insert) { | |
171 | ret = handler->ops->insert(root, mnode); | |
172 | if (ret) | |
df5a00f8 | 173 | __mmu_int_rb_remove(mnode, root); |
06e0ffa6 | 174 | } |
06e0ffa6 | 175 | unlock: |
c81e1f64 | 176 | spin_unlock_irqrestore(&handler->lock, flags); |
06e0ffa6 MH |
177 | return ret; |
178 | } | |
179 | ||
de82bdff | 180 | /* Caller must hold handler lock */ |
06e0ffa6 MH |
181 | static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, |
182 | unsigned long addr, | |
183 | unsigned long len) | |
184 | { | |
0f310a00 | 185 | struct mmu_rb_node *node = NULL; |
df5a00f8 | 186 | |
353b71c7 | 187 | hfi1_cdbg(MMU, "Searching for addr 0x%llx, len %u", addr, len); |
0f310a00 MH |
188 | if (!handler->ops->filter) { |
189 | node = __mmu_int_rb_iter_first(handler->root, addr, | |
190 | (addr + len) - 1); | |
191 | } else { | |
192 | for (node = __mmu_int_rb_iter_first(handler->root, addr, | |
193 | (addr + len) - 1); | |
194 | node; | |
195 | node = __mmu_int_rb_iter_next(node, addr, | |
196 | (addr + len) - 1)) { | |
197 | if (handler->ops->filter(node, addr, len)) | |
198 | return node; | |
199 | } | |
200 | } | |
df5a00f8 | 201 | return node; |
06e0ffa6 MH |
202 | } |
203 | ||
de82bdff | 204 | /* Caller must *not* hold handler lock. */ |
06e0ffa6 | 205 | static void __mmu_rb_remove(struct mmu_rb_handler *handler, |
f19bd643 | 206 | struct mmu_rb_node *node, struct mm_struct *mm) |
06e0ffa6 | 207 | { |
de82bdff MH |
208 | unsigned long flags; |
209 | ||
06e0ffa6 | 210 | /* Validity of handler and node pointers has been checked by caller. */ |
353b71c7 MH |
211 | hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr, |
212 | node->len); | |
de82bdff | 213 | spin_lock_irqsave(&handler->lock, flags); |
df5a00f8 | 214 | __mmu_int_rb_remove(node, handler->root); |
de82bdff MH |
215 | spin_unlock_irqrestore(&handler->lock, flags); |
216 | ||
06e0ffa6 | 217 | if (handler->ops->remove) |
f19bd643 | 218 | handler->ops->remove(handler->root, node, mm); |
06e0ffa6 MH |
219 | } |
220 | ||
221 | struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, | |
222 | unsigned long len) | |
223 | { | |
224 | struct mmu_rb_handler *handler = find_mmu_handler(root); | |
225 | struct mmu_rb_node *node; | |
c81e1f64 | 226 | unsigned long flags; |
06e0ffa6 MH |
227 | |
228 | if (!handler) | |
229 | return ERR_PTR(-EINVAL); | |
230 | ||
c81e1f64 | 231 | spin_lock_irqsave(&handler->lock, flags); |
06e0ffa6 | 232 | node = __mmu_rb_search(handler, addr, len); |
c81e1f64 | 233 | spin_unlock_irqrestore(&handler->lock, flags); |
06e0ffa6 MH |
234 | |
235 | return node; | |
236 | } | |
237 | ||
238 | void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) | |
239 | { | |
240 | struct mmu_rb_handler *handler = find_mmu_handler(root); | |
241 | ||
242 | if (!handler || !node) | |
243 | return; | |
244 | ||
f19bd643 | 245 | __mmu_rb_remove(handler, node, NULL); |
06e0ffa6 MH |
246 | } |
247 | ||
248 | static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) | |
249 | { | |
250 | struct mmu_rb_handler *handler; | |
c81e1f64 | 251 | unsigned long flags; |
06e0ffa6 | 252 | |
c81e1f64 | 253 | spin_lock_irqsave(&mmu_rb_lock, flags); |
06e0ffa6 MH |
254 | list_for_each_entry(handler, &mmu_rb_handlers, list) { |
255 | if (handler->root == root) | |
256 | goto unlock; | |
257 | } | |
258 | handler = NULL; | |
259 | unlock: | |
c81e1f64 | 260 | spin_unlock_irqrestore(&mmu_rb_lock, flags); |
06e0ffa6 MH |
261 | return handler; |
262 | } | |
263 | ||
264 | static inline void mmu_notifier_page(struct mmu_notifier *mn, | |
265 | struct mm_struct *mm, unsigned long addr) | |
266 | { | |
f19bd643 | 267 | mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE); |
06e0ffa6 MH |
268 | } |
269 | ||
270 | static inline void mmu_notifier_range_start(struct mmu_notifier *mn, | |
271 | struct mm_struct *mm, | |
272 | unsigned long start, | |
273 | unsigned long end) | |
274 | { | |
f19bd643 | 275 | mmu_notifier_mem_invalidate(mn, mm, start, end); |
06e0ffa6 MH |
276 | } |
277 | ||
278 | static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, | |
f19bd643 | 279 | struct mm_struct *mm, |
06e0ffa6 MH |
280 | unsigned long start, unsigned long end) |
281 | { | |
282 | struct mmu_rb_handler *handler = | |
283 | container_of(mn, struct mmu_rb_handler, mn); | |
284 | struct rb_root *root = handler->root; | |
f19bd643 | 285 | struct mmu_rb_node *node, *ptr = NULL; |
df5a00f8 | 286 | unsigned long flags; |
06e0ffa6 | 287 | |
c81e1f64 | 288 | spin_lock_irqsave(&handler->lock, flags); |
f19bd643 MH |
289 | for (node = __mmu_int_rb_iter_first(root, start, end - 1); |
290 | node; node = ptr) { | |
291 | /* Guard against node removal. */ | |
292 | ptr = __mmu_int_rb_iter_next(node, start, end - 1); | |
353b71c7 MH |
293 | hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", |
294 | node->addr, node->len); | |
de82bdff MH |
295 | if (handler->ops->invalidate(root, node)) { |
296 | spin_unlock_irqrestore(&handler->lock, flags); | |
f19bd643 | 297 | __mmu_rb_remove(handler, node, mm); |
de82bdff MH |
298 | spin_lock_irqsave(&handler->lock, flags); |
299 | } | |
06e0ffa6 | 300 | } |
c81e1f64 | 301 | spin_unlock_irqrestore(&handler->lock, flags); |
06e0ffa6 | 302 | } |