Commit | Line | Data |
---|---|---|
341cb9e4 CK |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Christian König <christian.koenig@amd.com> | |
29 | */ | |
30 | ||
31 | #include <linux/firmware.h> | |
32 | #include <linux/module.h> | |
33 | #include <linux/mmu_notifier.h> | |
34 | #include <drm/drmP.h> | |
35 | #include <drm/drm.h> | |
36 | ||
37 | #include "radeon.h" | |
38 | ||
39 | struct radeon_mn { | |
40 | /* constant after initialisation */ | |
41 | struct radeon_device *rdev; | |
42 | struct mm_struct *mm; | |
43 | struct mmu_notifier mn; | |
44 | ||
45 | /* only used on destruction */ | |
46 | struct work_struct work; | |
47 | ||
48 | /* protected by rdev->mn_lock */ | |
49 | struct hlist_node node; | |
50 | ||
51 | /* objects protected by lock */ | |
52 | struct mutex lock; | |
53 | struct rb_root objects; | |
54 | }; | |
55 | ||
49ecb10e CK |
56 | struct radeon_mn_node { |
57 | struct interval_tree_node it; | |
58 | struct list_head bos; | |
59 | }; | |
60 | ||
341cb9e4 CK |
61 | /** |
62 | * radeon_mn_destroy - destroy the rmn | |
63 | * | |
64 | * @work: previously sheduled work item | |
65 | * | |
66 | * Lazy destroys the notifier from a work item | |
67 | */ | |
68 | static void radeon_mn_destroy(struct work_struct *work) | |
69 | { | |
70 | struct radeon_mn *rmn = container_of(work, struct radeon_mn, work); | |
71 | struct radeon_device *rdev = rmn->rdev; | |
49ecb10e CK |
72 | struct radeon_mn_node *node, *next_node; |
73 | struct radeon_bo *bo, *next_bo; | |
341cb9e4 CK |
74 | |
75 | mutex_lock(&rdev->mn_lock); | |
76 | mutex_lock(&rmn->lock); | |
77 | hash_del(&rmn->node); | |
49ecb10e CK |
78 | rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, |
79 | it.rb) { | |
80 | ||
81 | interval_tree_remove(&node->it, &rmn->objects); | |
82 | list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { | |
83 | bo->mn = NULL; | |
84 | list_del_init(&bo->mn_list); | |
85 | } | |
86 | kfree(node); | |
341cb9e4 CK |
87 | } |
88 | mutex_unlock(&rmn->lock); | |
89 | mutex_unlock(&rdev->mn_lock); | |
90 | mmu_notifier_unregister(&rmn->mn, rmn->mm); | |
91 | kfree(rmn); | |
92 | } | |
93 | ||
94 | /** | |
95 | * radeon_mn_release - callback to notify about mm destruction | |
96 | * | |
97 | * @mn: our notifier | |
98 | * @mn: the mm this callback is about | |
99 | * | |
100 | * Shedule a work item to lazy destroy our notifier. | |
101 | */ | |
102 | static void radeon_mn_release(struct mmu_notifier *mn, | |
103 | struct mm_struct *mm) | |
104 | { | |
105 | struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); | |
106 | INIT_WORK(&rmn->work, radeon_mn_destroy); | |
107 | schedule_work(&rmn->work); | |
108 | } | |
109 | ||
110 | /** | |
111 | * radeon_mn_invalidate_range_start - callback to notify about mm change | |
112 | * | |
113 | * @mn: our notifier | |
114 | * @mn: the mm this callback is about | |
115 | * @start: start of updated range | |
116 | * @end: end of updated range | |
117 | * | |
118 | * We block for all BOs between start and end to be idle and | |
119 | * unmap them by move them into system domain again. | |
120 | */ | |
121 | static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, | |
122 | struct mm_struct *mm, | |
123 | unsigned long start, | |
124 | unsigned long end) | |
125 | { | |
126 | struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); | |
127 | struct interval_tree_node *it; | |
128 | ||
129 | /* notification is exclusive, but interval is inclusive */ | |
130 | end -= 1; | |
131 | ||
132 | mutex_lock(&rmn->lock); | |
133 | ||
134 | it = interval_tree_iter_first(&rmn->objects, start, end); | |
135 | while (it) { | |
49ecb10e | 136 | struct radeon_mn_node *node; |
341cb9e4 | 137 | struct radeon_bo *bo; |
9fb2bcf9 | 138 | long r; |
341cb9e4 | 139 | |
49ecb10e | 140 | node = container_of(it, struct radeon_mn_node, it); |
341cb9e4 CK |
141 | it = interval_tree_iter_next(it, start, end); |
142 | ||
49ecb10e | 143 | list_for_each_entry(bo, &node->bos, mn_list) { |
341cb9e4 | 144 | |
49ecb10e CK |
145 | r = radeon_bo_reserve(bo, true); |
146 | if (r) { | |
9fb2bcf9 | 147 | DRM_ERROR("(%ld) failed to reserve user bo\n", r); |
49ecb10e CK |
148 | continue; |
149 | } | |
341cb9e4 | 150 | |
49ecb10e CK |
151 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, |
152 | true, false, MAX_SCHEDULE_TIMEOUT); | |
9fb2bcf9 CK |
153 | if (r <= 0) |
154 | DRM_ERROR("(%ld) failed to wait for user bo\n", r); | |
341cb9e4 | 155 | |
49ecb10e CK |
156 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); |
157 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | |
158 | if (r) | |
9fb2bcf9 | 159 | DRM_ERROR("(%ld) failed to validate user bo\n", r); |
49ecb10e CK |
160 | |
161 | radeon_bo_unreserve(bo); | |
162 | } | |
341cb9e4 CK |
163 | } |
164 | ||
165 | mutex_unlock(&rmn->lock); | |
166 | } | |
167 | ||
168 | static const struct mmu_notifier_ops radeon_mn_ops = { | |
169 | .release = radeon_mn_release, | |
170 | .invalidate_range_start = radeon_mn_invalidate_range_start, | |
171 | }; | |
172 | ||
173 | /** | |
174 | * radeon_mn_get - create notifier context | |
175 | * | |
176 | * @rdev: radeon device pointer | |
177 | * | |
178 | * Creates a notifier context for current->mm. | |
179 | */ | |
180 | static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev) | |
181 | { | |
182 | struct mm_struct *mm = current->mm; | |
183 | struct radeon_mn *rmn; | |
184 | int r; | |
185 | ||
186 | down_write(&mm->mmap_sem); | |
187 | mutex_lock(&rdev->mn_lock); | |
188 | ||
189 | hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm) | |
190 | if (rmn->mm == mm) | |
191 | goto release_locks; | |
192 | ||
193 | rmn = kzalloc(sizeof(*rmn), GFP_KERNEL); | |
194 | if (!rmn) { | |
195 | rmn = ERR_PTR(-ENOMEM); | |
196 | goto release_locks; | |
197 | } | |
198 | ||
199 | rmn->rdev = rdev; | |
200 | rmn->mm = mm; | |
201 | rmn->mn.ops = &radeon_mn_ops; | |
202 | mutex_init(&rmn->lock); | |
203 | rmn->objects = RB_ROOT; | |
204 | ||
205 | r = __mmu_notifier_register(&rmn->mn, mm); | |
206 | if (r) | |
207 | goto free_rmn; | |
208 | ||
209 | hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm); | |
210 | ||
211 | release_locks: | |
212 | mutex_unlock(&rdev->mn_lock); | |
213 | up_write(&mm->mmap_sem); | |
214 | ||
215 | return rmn; | |
216 | ||
217 | free_rmn: | |
218 | mutex_unlock(&rdev->mn_lock); | |
219 | up_write(&mm->mmap_sem); | |
220 | kfree(rmn); | |
221 | ||
222 | return ERR_PTR(r); | |
223 | } | |
224 | ||
225 | /** | |
226 | * radeon_mn_register - register a BO for notifier updates | |
227 | * | |
228 | * @bo: radeon buffer object | |
229 | * @addr: userptr addr we should monitor | |
230 | * | |
231 | * Registers an MMU notifier for the given BO at the specified address. | |
232 | * Returns 0 on success, -ERRNO if anything goes wrong. | |
233 | */ | |
234 | int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) | |
235 | { | |
236 | unsigned long end = addr + radeon_bo_size(bo) - 1; | |
237 | struct radeon_device *rdev = bo->rdev; | |
238 | struct radeon_mn *rmn; | |
49ecb10e CK |
239 | struct radeon_mn_node *node = NULL; |
240 | struct list_head bos; | |
341cb9e4 CK |
241 | struct interval_tree_node *it; |
242 | ||
243 | rmn = radeon_mn_get(rdev); | |
244 | if (IS_ERR(rmn)) | |
245 | return PTR_ERR(rmn); | |
246 | ||
49ecb10e CK |
247 | INIT_LIST_HEAD(&bos); |
248 | ||
341cb9e4 CK |
249 | mutex_lock(&rmn->lock); |
250 | ||
49ecb10e CK |
251 | while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { |
252 | kfree(node); | |
253 | node = container_of(it, struct radeon_mn_node, it); | |
254 | interval_tree_remove(&node->it, &rmn->objects); | |
255 | addr = min(it->start, addr); | |
256 | end = max(it->last, end); | |
257 | list_splice(&node->bos, &bos); | |
258 | } | |
259 | ||
260 | if (!node) { | |
261 | node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL); | |
262 | if (!node) { | |
263 | mutex_unlock(&rmn->lock); | |
264 | return -ENOMEM; | |
265 | } | |
341cb9e4 CK |
266 | } |
267 | ||
268 | bo->mn = rmn; | |
49ecb10e CK |
269 | |
270 | node->it.start = addr; | |
271 | node->it.last = end; | |
272 | INIT_LIST_HEAD(&node->bos); | |
273 | list_splice(&bos, &node->bos); | |
274 | list_add(&bo->mn_list, &node->bos); | |
275 | ||
276 | interval_tree_insert(&node->it, &rmn->objects); | |
341cb9e4 CK |
277 | |
278 | mutex_unlock(&rmn->lock); | |
279 | ||
280 | return 0; | |
281 | } | |
282 | ||
283 | /** | |
284 | * radeon_mn_unregister - unregister a BO for notifier updates | |
285 | * | |
286 | * @bo: radeon buffer object | |
287 | * | |
288 | * Remove any registration of MMU notifier updates from the buffer object. | |
289 | */ | |
290 | void radeon_mn_unregister(struct radeon_bo *bo) | |
291 | { | |
292 | struct radeon_device *rdev = bo->rdev; | |
293 | struct radeon_mn *rmn; | |
49ecb10e | 294 | struct list_head *head; |
341cb9e4 CK |
295 | |
296 | mutex_lock(&rdev->mn_lock); | |
297 | rmn = bo->mn; | |
298 | if (rmn == NULL) { | |
299 | mutex_unlock(&rdev->mn_lock); | |
300 | return; | |
301 | } | |
302 | ||
303 | mutex_lock(&rmn->lock); | |
49ecb10e CK |
304 | /* save the next list entry for later */ |
305 | head = bo->mn_list.next; | |
306 | ||
341cb9e4 | 307 | bo->mn = NULL; |
49ecb10e CK |
308 | list_del(&bo->mn_list); |
309 | ||
310 | if (list_empty(head)) { | |
311 | struct radeon_mn_node *node; | |
312 | node = container_of(head, struct radeon_mn_node, bos); | |
313 | interval_tree_remove(&node->it, &rmn->objects); | |
314 | kfree(node); | |
315 | } | |
316 | ||
341cb9e4 CK |
317 | mutex_unlock(&rmn->lock); |
318 | mutex_unlock(&rdev->mn_lock); | |
319 | } |