Commit | Line | Data |
---|---|---|
341cb9e4 CK |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Christian König <christian.koenig@amd.com> | |
29 | */ | |
30 | ||
31 | #include <linux/firmware.h> | |
32 | #include <linux/module.h> | |
33 | #include <linux/mmu_notifier.h> | |
34 | #include <drm/drmP.h> | |
35 | #include <drm/drm.h> | |
36 | ||
37 | #include "radeon.h" | |
38 | ||
39 | struct radeon_mn { | |
40 | /* constant after initialisation */ | |
41 | struct radeon_device *rdev; | |
42 | struct mm_struct *mm; | |
43 | struct mmu_notifier mn; | |
44 | ||
45 | /* only used on destruction */ | |
46 | struct work_struct work; | |
47 | ||
48 | /* protected by rdev->mn_lock */ | |
49 | struct hlist_node node; | |
50 | ||
51 | /* objects protected by lock */ | |
52 | struct mutex lock; | |
53 | struct rb_root objects; | |
54 | }; | |
55 | ||
56 | /** | |
57 | * radeon_mn_destroy - destroy the rmn | |
58 | * | |
59 | * @work: previously sheduled work item | |
60 | * | |
61 | * Lazy destroys the notifier from a work item | |
62 | */ | |
63 | static void radeon_mn_destroy(struct work_struct *work) | |
64 | { | |
65 | struct radeon_mn *rmn = container_of(work, struct radeon_mn, work); | |
66 | struct radeon_device *rdev = rmn->rdev; | |
67 | struct radeon_bo *bo, *next; | |
68 | ||
69 | mutex_lock(&rdev->mn_lock); | |
70 | mutex_lock(&rmn->lock); | |
71 | hash_del(&rmn->node); | |
72 | rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) { | |
73 | interval_tree_remove(&bo->mn_it, &rmn->objects); | |
74 | bo->mn = NULL; | |
75 | } | |
76 | mutex_unlock(&rmn->lock); | |
77 | mutex_unlock(&rdev->mn_lock); | |
78 | mmu_notifier_unregister(&rmn->mn, rmn->mm); | |
79 | kfree(rmn); | |
80 | } | |
81 | ||
82 | /** | |
83 | * radeon_mn_release - callback to notify about mm destruction | |
84 | * | |
85 | * @mn: our notifier | |
86 | * @mn: the mm this callback is about | |
87 | * | |
88 | * Shedule a work item to lazy destroy our notifier. | |
89 | */ | |
90 | static void radeon_mn_release(struct mmu_notifier *mn, | |
91 | struct mm_struct *mm) | |
92 | { | |
93 | struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); | |
94 | INIT_WORK(&rmn->work, radeon_mn_destroy); | |
95 | schedule_work(&rmn->work); | |
96 | } | |
97 | ||
98 | /** | |
99 | * radeon_mn_invalidate_range_start - callback to notify about mm change | |
100 | * | |
101 | * @mn: our notifier | |
102 | * @mn: the mm this callback is about | |
103 | * @start: start of updated range | |
104 | * @end: end of updated range | |
105 | * | |
106 | * We block for all BOs between start and end to be idle and | |
107 | * unmap them by move them into system domain again. | |
108 | */ | |
109 | static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, | |
110 | struct mm_struct *mm, | |
111 | unsigned long start, | |
112 | unsigned long end) | |
113 | { | |
114 | struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); | |
115 | struct interval_tree_node *it; | |
116 | ||
117 | /* notification is exclusive, but interval is inclusive */ | |
118 | end -= 1; | |
119 | ||
120 | mutex_lock(&rmn->lock); | |
121 | ||
122 | it = interval_tree_iter_first(&rmn->objects, start, end); | |
123 | while (it) { | |
124 | struct radeon_bo *bo; | |
f2c24b83 | 125 | struct fence *fence; |
341cb9e4 CK |
126 | int r; |
127 | ||
128 | bo = container_of(it, struct radeon_bo, mn_it); | |
129 | it = interval_tree_iter_next(it, start, end); | |
130 | ||
131 | r = radeon_bo_reserve(bo, true); | |
132 | if (r) { | |
133 | DRM_ERROR("(%d) failed to reserve user bo\n", r); | |
134 | continue; | |
135 | } | |
136 | ||
f2c24b83 ML |
137 | fence = reservation_object_get_excl(bo->tbo.resv); |
138 | if (fence) { | |
139 | r = radeon_fence_wait((struct radeon_fence *)fence, false); | |
341cb9e4 CK |
140 | if (r) |
141 | DRM_ERROR("(%d) failed to wait for user bo\n", r); | |
142 | } | |
143 | ||
144 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); | |
145 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | |
146 | if (r) | |
147 | DRM_ERROR("(%d) failed to validate user bo\n", r); | |
148 | ||
149 | radeon_bo_unreserve(bo); | |
150 | } | |
151 | ||
152 | mutex_unlock(&rmn->lock); | |
153 | } | |
154 | ||
155 | static const struct mmu_notifier_ops radeon_mn_ops = { | |
156 | .release = radeon_mn_release, | |
157 | .invalidate_range_start = radeon_mn_invalidate_range_start, | |
158 | }; | |
159 | ||
160 | /** | |
161 | * radeon_mn_get - create notifier context | |
162 | * | |
163 | * @rdev: radeon device pointer | |
164 | * | |
165 | * Creates a notifier context for current->mm. | |
166 | */ | |
167 | static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev) | |
168 | { | |
169 | struct mm_struct *mm = current->mm; | |
170 | struct radeon_mn *rmn; | |
171 | int r; | |
172 | ||
173 | down_write(&mm->mmap_sem); | |
174 | mutex_lock(&rdev->mn_lock); | |
175 | ||
176 | hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm) | |
177 | if (rmn->mm == mm) | |
178 | goto release_locks; | |
179 | ||
180 | rmn = kzalloc(sizeof(*rmn), GFP_KERNEL); | |
181 | if (!rmn) { | |
182 | rmn = ERR_PTR(-ENOMEM); | |
183 | goto release_locks; | |
184 | } | |
185 | ||
186 | rmn->rdev = rdev; | |
187 | rmn->mm = mm; | |
188 | rmn->mn.ops = &radeon_mn_ops; | |
189 | mutex_init(&rmn->lock); | |
190 | rmn->objects = RB_ROOT; | |
191 | ||
192 | r = __mmu_notifier_register(&rmn->mn, mm); | |
193 | if (r) | |
194 | goto free_rmn; | |
195 | ||
196 | hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm); | |
197 | ||
198 | release_locks: | |
199 | mutex_unlock(&rdev->mn_lock); | |
200 | up_write(&mm->mmap_sem); | |
201 | ||
202 | return rmn; | |
203 | ||
204 | free_rmn: | |
205 | mutex_unlock(&rdev->mn_lock); | |
206 | up_write(&mm->mmap_sem); | |
207 | kfree(rmn); | |
208 | ||
209 | return ERR_PTR(r); | |
210 | } | |
211 | ||
212 | /** | |
213 | * radeon_mn_register - register a BO for notifier updates | |
214 | * | |
215 | * @bo: radeon buffer object | |
216 | * @addr: userptr addr we should monitor | |
217 | * | |
218 | * Registers an MMU notifier for the given BO at the specified address. | |
219 | * Returns 0 on success, -ERRNO if anything goes wrong. | |
220 | */ | |
221 | int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) | |
222 | { | |
223 | unsigned long end = addr + radeon_bo_size(bo) - 1; | |
224 | struct radeon_device *rdev = bo->rdev; | |
225 | struct radeon_mn *rmn; | |
226 | struct interval_tree_node *it; | |
227 | ||
228 | rmn = radeon_mn_get(rdev); | |
229 | if (IS_ERR(rmn)) | |
230 | return PTR_ERR(rmn); | |
231 | ||
232 | mutex_lock(&rmn->lock); | |
233 | ||
234 | it = interval_tree_iter_first(&rmn->objects, addr, end); | |
235 | if (it) { | |
236 | mutex_unlock(&rmn->lock); | |
237 | return -EEXIST; | |
238 | } | |
239 | ||
240 | bo->mn = rmn; | |
241 | bo->mn_it.start = addr; | |
242 | bo->mn_it.last = end; | |
243 | interval_tree_insert(&bo->mn_it, &rmn->objects); | |
244 | ||
245 | mutex_unlock(&rmn->lock); | |
246 | ||
247 | return 0; | |
248 | } | |
249 | ||
250 | /** | |
251 | * radeon_mn_unregister - unregister a BO for notifier updates | |
252 | * | |
253 | * @bo: radeon buffer object | |
254 | * | |
255 | * Remove any registration of MMU notifier updates from the buffer object. | |
256 | */ | |
257 | void radeon_mn_unregister(struct radeon_bo *bo) | |
258 | { | |
259 | struct radeon_device *rdev = bo->rdev; | |
260 | struct radeon_mn *rmn; | |
261 | ||
262 | mutex_lock(&rdev->mn_lock); | |
263 | rmn = bo->mn; | |
264 | if (rmn == NULL) { | |
265 | mutex_unlock(&rdev->mn_lock); | |
266 | return; | |
267 | } | |
268 | ||
269 | mutex_lock(&rmn->lock); | |
270 | interval_tree_remove(&bo->mn_it, &rmn->objects); | |
271 | bo->mn = NULL; | |
272 | mutex_unlock(&rmn->lock); | |
273 | mutex_unlock(&rdev->mn_lock); | |
274 | } |