Commit | Line | Data |
---|---|---|
5cc9ed4b CW |
1 | /* |
2 | * Copyright © 2012-2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
b588c92b ML |
25 | #include <drm/drmP.h> |
26 | #include <drm/i915_drm.h> | |
5cc9ed4b CW |
27 | #include "i915_drv.h" |
28 | #include "i915_trace.h" | |
29 | #include "intel_drv.h" | |
30 | #include <linux/mmu_context.h> | |
31 | #include <linux/mmu_notifier.h> | |
32 | #include <linux/mempolicy.h> | |
33 | #include <linux/swap.h> | |
34 | ||
ad46cb53 CW |
35 | struct i915_mm_struct { |
36 | struct mm_struct *mm; | |
f470b190 | 37 | struct drm_i915_private *i915; |
ad46cb53 CW |
38 | struct i915_mmu_notifier *mn; |
39 | struct hlist_node node; | |
40 | struct kref kref; | |
41 | struct work_struct work; | |
42 | }; | |
43 | ||
5cc9ed4b CW |
44 | #if defined(CONFIG_MMU_NOTIFIER) |
45 | #include <linux/interval_tree.h> | |
46 | ||
47 | struct i915_mmu_notifier { | |
48 | spinlock_t lock; | |
49 | struct hlist_node node; | |
50 | struct mmu_notifier mn; | |
51 | struct rb_root objects; | |
393afc2c | 52 | struct workqueue_struct *wq; |
5cc9ed4b CW |
53 | }; |
54 | ||
55 | struct i915_mmu_object { | |
ad46cb53 | 56 | struct i915_mmu_notifier *mn; |
768e159f | 57 | struct drm_i915_gem_object *obj; |
5cc9ed4b | 58 | struct interval_tree_node it; |
ec8b0dd5 | 59 | struct list_head link; |
380996aa | 60 | struct work_struct work; |
768e159f | 61 | bool attached; |
5cc9ed4b CW |
62 | }; |
63 | ||
768e159f | 64 | static void cancel_userptr(struct work_struct *work) |
ec8b0dd5 | 65 | { |
380996aa CW |
66 | struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); |
67 | struct drm_i915_gem_object *obj = mo->obj; | |
ec8b0dd5 | 68 | struct drm_device *dev = obj->base.dev; |
ec8b0dd5 | 69 | |
e95433c7 | 70 | i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL); |
8a3b3d57 | 71 | |
ec8b0dd5 CW |
72 | mutex_lock(&dev->struct_mutex); |
73 | /* Cancel any active worker and force us to re-evaluate gup */ | |
74 | obj->userptr.work = NULL; | |
75 | ||
76 | if (obj->pages != NULL) { | |
f826ee21 | 77 | /* We are inside a kthread context and can't be interrupted */ |
aa653a68 | 78 | WARN_ON(i915_gem_object_unbind(obj)); |
ec8b0dd5 | 79 | WARN_ON(i915_gem_object_put_pages(obj)); |
ec8b0dd5 CW |
80 | } |
81 | ||
f8c417cd | 82 | i915_gem_object_put(obj); |
ec8b0dd5 | 83 | mutex_unlock(&dev->struct_mutex); |
ec8b0dd5 CW |
84 | } |
85 | ||
768e159f | 86 | static void add_object(struct i915_mmu_object *mo) |
ec8b0dd5 | 87 | { |
768e159f CW |
88 | if (mo->attached) |
89 | return; | |
ec8b0dd5 | 90 | |
768e159f CW |
91 | interval_tree_insert(&mo->it, &mo->mn->objects); |
92 | mo->attached = true; | |
93 | } | |
94 | ||
95 | static void del_object(struct i915_mmu_object *mo) | |
96 | { | |
97 | if (!mo->attached) | |
98 | return; | |
99 | ||
100 | interval_tree_remove(&mo->it, &mo->mn->objects); | |
101 | mo->attached = false; | |
ec8b0dd5 CW |
102 | } |
103 | ||
5cc9ed4b CW |
104 | static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, |
105 | struct mm_struct *mm, | |
106 | unsigned long start, | |
107 | unsigned long end) | |
108 | { | |
380996aa CW |
109 | struct i915_mmu_notifier *mn = |
110 | container_of(_mn, struct i915_mmu_notifier, mn); | |
111 | struct i915_mmu_object *mo; | |
768e159f CW |
112 | struct interval_tree_node *it; |
113 | LIST_HEAD(cancelled); | |
114 | ||
115 | if (RB_EMPTY_ROOT(&mn->objects)) | |
116 | return; | |
380996aa CW |
117 | |
118 | /* interval ranges are inclusive, but invalidate range is exclusive */ | |
119 | end--; | |
120 | ||
121 | spin_lock(&mn->lock); | |
768e159f CW |
122 | it = interval_tree_iter_first(&mn->objects, start, end); |
123 | while (it) { | |
124 | /* The mmu_object is released late when destroying the | |
125 | * GEM object so it is entirely possible to gain a | |
126 | * reference on an object in the process of being freed | |
127 | * since our serialisation is via the spinlock and not | |
128 | * the struct_mutex - and consequently use it after it | |
129 | * is freed and then double free it. To prevent that | |
130 | * use-after-free we only acquire a reference on the | |
131 | * object if it is not in the process of being destroyed. | |
132 | */ | |
133 | mo = container_of(it, struct i915_mmu_object, it); | |
134 | if (kref_get_unless_zero(&mo->obj->base.refcount)) | |
393afc2c | 135 | queue_work(mn->wq, &mo->work); |
5cc9ed4b | 136 | |
768e159f CW |
137 | list_add(&mo->link, &cancelled); |
138 | it = interval_tree_iter_next(it, start, end); | |
5cc9ed4b | 139 | } |
768e159f CW |
140 | list_for_each_entry(mo, &cancelled, link) |
141 | del_object(mo); | |
380996aa | 142 | spin_unlock(&mn->lock); |
393afc2c CW |
143 | |
144 | flush_workqueue(mn->wq); | |
5cc9ed4b CW |
145 | } |
146 | ||
147 | static const struct mmu_notifier_ops i915_gem_userptr_notifier = { | |
148 | .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start, | |
149 | }; | |
150 | ||
151 | static struct i915_mmu_notifier * | |
ad46cb53 | 152 | i915_mmu_notifier_create(struct mm_struct *mm) |
5cc9ed4b | 153 | { |
ad46cb53 | 154 | struct i915_mmu_notifier *mn; |
5cc9ed4b CW |
155 | int ret; |
156 | ||
ad46cb53 CW |
157 | mn = kmalloc(sizeof(*mn), GFP_KERNEL); |
158 | if (mn == NULL) | |
5cc9ed4b CW |
159 | return ERR_PTR(-ENOMEM); |
160 | ||
ad46cb53 CW |
161 | spin_lock_init(&mn->lock); |
162 | mn->mn.ops = &i915_gem_userptr_notifier; | |
163 | mn->objects = RB_ROOT; | |
393afc2c CW |
164 | mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0); |
165 | if (mn->wq == NULL) { | |
166 | kfree(mn); | |
167 | return ERR_PTR(-ENOMEM); | |
168 | } | |
ad46cb53 CW |
169 | |
170 | /* Protected by mmap_sem (write-lock) */ | |
171 | ret = __mmu_notifier_register(&mn->mn, mm); | |
5cc9ed4b | 172 | if (ret) { |
393afc2c | 173 | destroy_workqueue(mn->wq); |
ad46cb53 | 174 | kfree(mn); |
5cc9ed4b CW |
175 | return ERR_PTR(ret); |
176 | } | |
177 | ||
ad46cb53 | 178 | return mn; |
5cc9ed4b CW |
179 | } |
180 | ||
5cc9ed4b CW |
181 | static void |
182 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
183 | { | |
ad46cb53 | 184 | struct i915_mmu_object *mo; |
5cc9ed4b | 185 | |
ad46cb53 CW |
186 | mo = obj->userptr.mmu_object; |
187 | if (mo == NULL) | |
5cc9ed4b CW |
188 | return; |
189 | ||
768e159f CW |
190 | spin_lock(&mo->mn->lock); |
191 | del_object(mo); | |
192 | spin_unlock(&mo->mn->lock); | |
ad46cb53 CW |
193 | kfree(mo); |
194 | ||
195 | obj->userptr.mmu_object = NULL; | |
196 | } | |
197 | ||
198 | static struct i915_mmu_notifier * | |
199 | i915_mmu_notifier_find(struct i915_mm_struct *mm) | |
200 | { | |
e9681366 CW |
201 | struct i915_mmu_notifier *mn = mm->mn; |
202 | ||
203 | mn = mm->mn; | |
204 | if (mn) | |
205 | return mn; | |
206 | ||
207 | down_write(&mm->mm->mmap_sem); | |
f470b190 | 208 | mutex_lock(&mm->i915->mm_lock); |
e9681366 CW |
209 | if ((mn = mm->mn) == NULL) { |
210 | mn = i915_mmu_notifier_create(mm->mm); | |
211 | if (!IS_ERR(mn)) | |
212 | mm->mn = mn; | |
ad46cb53 | 213 | } |
f470b190 | 214 | mutex_unlock(&mm->i915->mm_lock); |
e9681366 CW |
215 | up_write(&mm->mm->mmap_sem); |
216 | ||
217 | return mn; | |
5cc9ed4b CW |
218 | } |
219 | ||
220 | static int | |
221 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
222 | unsigned flags) | |
223 | { | |
ad46cb53 CW |
224 | struct i915_mmu_notifier *mn; |
225 | struct i915_mmu_object *mo; | |
5cc9ed4b CW |
226 | |
227 | if (flags & I915_USERPTR_UNSYNCHRONIZED) | |
228 | return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; | |
229 | ||
ad46cb53 CW |
230 | if (WARN_ON(obj->userptr.mm == NULL)) |
231 | return -EINVAL; | |
5cc9ed4b | 232 | |
ad46cb53 CW |
233 | mn = i915_mmu_notifier_find(obj->userptr.mm); |
234 | if (IS_ERR(mn)) | |
235 | return PTR_ERR(mn); | |
5cc9ed4b | 236 | |
ad46cb53 CW |
237 | mo = kzalloc(sizeof(*mo), GFP_KERNEL); |
238 | if (mo == NULL) | |
239 | return -ENOMEM; | |
5cc9ed4b | 240 | |
ad46cb53 | 241 | mo->mn = mn; |
ad46cb53 | 242 | mo->obj = obj; |
768e159f CW |
243 | mo->it.start = obj->userptr.ptr; |
244 | mo->it.last = obj->userptr.ptr + obj->base.size - 1; | |
245 | INIT_WORK(&mo->work, cancel_userptr); | |
ad46cb53 CW |
246 | |
247 | obj->userptr.mmu_object = mo; | |
5cc9ed4b | 248 | return 0; |
ad46cb53 CW |
249 | } |
250 | ||
251 | static void | |
252 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | |
253 | struct mm_struct *mm) | |
254 | { | |
255 | if (mn == NULL) | |
256 | return; | |
5cc9ed4b | 257 | |
ad46cb53 | 258 | mmu_notifier_unregister(&mn->mn, mm); |
393afc2c | 259 | destroy_workqueue(mn->wq); |
5cc9ed4b | 260 | kfree(mn); |
5cc9ed4b CW |
261 | } |
262 | ||
263 | #else | |
264 | ||
265 | static void | |
266 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
267 | { | |
268 | } | |
269 | ||
270 | static int | |
271 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
272 | unsigned flags) | |
273 | { | |
274 | if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0) | |
275 | return -ENODEV; | |
276 | ||
277 | if (!capable(CAP_SYS_ADMIN)) | |
278 | return -EPERM; | |
279 | ||
280 | return 0; | |
281 | } | |
ad46cb53 CW |
282 | |
283 | static void | |
284 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | |
285 | struct mm_struct *mm) | |
286 | { | |
287 | } | |
288 | ||
5cc9ed4b CW |
289 | #endif |
290 | ||
ad46cb53 CW |
291 | static struct i915_mm_struct * |
292 | __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) | |
293 | { | |
294 | struct i915_mm_struct *mm; | |
295 | ||
296 | /* Protected by dev_priv->mm_lock */ | |
297 | hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) | |
298 | if (mm->mm == real) | |
299 | return mm; | |
300 | ||
301 | return NULL; | |
302 | } | |
303 | ||
304 | static int | |
305 | i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) | |
306 | { | |
307 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | |
308 | struct i915_mm_struct *mm; | |
309 | int ret = 0; | |
310 | ||
311 | /* During release of the GEM object we hold the struct_mutex. This | |
312 | * precludes us from calling mmput() at that time as that may be | |
313 | * the last reference and so call exit_mmap(). exit_mmap() will | |
314 | * attempt to reap the vma, and if we were holding a GTT mmap | |
315 | * would then call drm_gem_vm_close() and attempt to reacquire | |
316 | * the struct mutex. So in order to avoid that recursion, we have | |
317 | * to defer releasing the mm reference until after we drop the | |
318 | * struct_mutex, i.e. we need to schedule a worker to do the clean | |
319 | * up. | |
320 | */ | |
321 | mutex_lock(&dev_priv->mm_lock); | |
322 | mm = __i915_mm_struct_find(dev_priv, current->mm); | |
323 | if (mm == NULL) { | |
324 | mm = kmalloc(sizeof(*mm), GFP_KERNEL); | |
325 | if (mm == NULL) { | |
326 | ret = -ENOMEM; | |
327 | goto out; | |
328 | } | |
329 | ||
330 | kref_init(&mm->kref); | |
f470b190 | 331 | mm->i915 = to_i915(obj->base.dev); |
ad46cb53 CW |
332 | |
333 | mm->mm = current->mm; | |
334 | atomic_inc(¤t->mm->mm_count); | |
335 | ||
336 | mm->mn = NULL; | |
337 | ||
338 | /* Protected by dev_priv->mm_lock */ | |
339 | hash_add(dev_priv->mm_structs, | |
340 | &mm->node, (unsigned long)mm->mm); | |
341 | } else | |
342 | kref_get(&mm->kref); | |
343 | ||
344 | obj->userptr.mm = mm; | |
345 | out: | |
346 | mutex_unlock(&dev_priv->mm_lock); | |
347 | return ret; | |
348 | } | |
349 | ||
350 | static void | |
351 | __i915_mm_struct_free__worker(struct work_struct *work) | |
352 | { | |
353 | struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); | |
354 | i915_mmu_notifier_free(mm->mn, mm->mm); | |
355 | mmdrop(mm->mm); | |
356 | kfree(mm); | |
357 | } | |
358 | ||
359 | static void | |
360 | __i915_mm_struct_free(struct kref *kref) | |
361 | { | |
362 | struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); | |
363 | ||
364 | /* Protected by dev_priv->mm_lock */ | |
365 | hash_del(&mm->node); | |
f470b190 | 366 | mutex_unlock(&mm->i915->mm_lock); |
ad46cb53 CW |
367 | |
368 | INIT_WORK(&mm->work, __i915_mm_struct_free__worker); | |
369 | schedule_work(&mm->work); | |
370 | } | |
371 | ||
372 | static void | |
373 | i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) | |
374 | { | |
375 | if (obj->userptr.mm == NULL) | |
376 | return; | |
377 | ||
378 | kref_put_mutex(&obj->userptr.mm->kref, | |
379 | __i915_mm_struct_free, | |
380 | &to_i915(obj->base.dev)->mm_lock); | |
381 | obj->userptr.mm = NULL; | |
382 | } | |
383 | ||
5cc9ed4b CW |
384 | struct get_pages_work { |
385 | struct work_struct work; | |
386 | struct drm_i915_gem_object *obj; | |
387 | struct task_struct *task; | |
388 | }; | |
389 | ||
5cc9ed4b CW |
390 | #if IS_ENABLED(CONFIG_SWIOTLB) |
391 | #define swiotlb_active() swiotlb_nr_tbl() | |
392 | #else | |
393 | #define swiotlb_active() 0 | |
394 | #endif | |
395 | ||
396 | static int | |
397 | st_set_pages(struct sg_table **st, struct page **pvec, int num_pages) | |
398 | { | |
399 | struct scatterlist *sg; | |
400 | int ret, n; | |
401 | ||
402 | *st = kmalloc(sizeof(**st), GFP_KERNEL); | |
403 | if (*st == NULL) | |
404 | return -ENOMEM; | |
405 | ||
406 | if (swiotlb_active()) { | |
407 | ret = sg_alloc_table(*st, num_pages, GFP_KERNEL); | |
408 | if (ret) | |
409 | goto err; | |
410 | ||
411 | for_each_sg((*st)->sgl, sg, num_pages, n) | |
412 | sg_set_page(sg, pvec[n], PAGE_SIZE, 0); | |
413 | } else { | |
414 | ret = sg_alloc_table_from_pages(*st, pvec, num_pages, | |
415 | 0, num_pages << PAGE_SHIFT, | |
416 | GFP_KERNEL); | |
417 | if (ret) | |
418 | goto err; | |
419 | } | |
420 | ||
421 | return 0; | |
422 | ||
423 | err: | |
424 | kfree(*st); | |
425 | *st = NULL; | |
426 | return ret; | |
427 | } | |
428 | ||
e2273302 ID |
429 | static int |
430 | __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj, | |
431 | struct page **pvec, int num_pages) | |
432 | { | |
433 | int ret; | |
434 | ||
435 | ret = st_set_pages(&obj->pages, pvec, num_pages); | |
436 | if (ret) | |
437 | return ret; | |
438 | ||
439 | ret = i915_gem_gtt_prepare_object(obj); | |
440 | if (ret) { | |
441 | sg_free_table(obj->pages); | |
442 | kfree(obj->pages); | |
443 | obj->pages = NULL; | |
444 | } | |
445 | ||
446 | return ret; | |
447 | } | |
448 | ||
380996aa | 449 | static int |
e4b946bf CW |
450 | __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, |
451 | bool value) | |
452 | { | |
380996aa CW |
453 | int ret = 0; |
454 | ||
e4b946bf CW |
455 | /* During mm_invalidate_range we need to cancel any userptr that |
456 | * overlaps the range being invalidated. Doing so requires the | |
457 | * struct_mutex, and that risks recursion. In order to cause | |
458 | * recursion, the user must alias the userptr address space with | |
459 | * a GTT mmapping (possible with a MAP_FIXED) - then when we have | |
460 | * to invalidate that mmaping, mm_invalidate_range is called with | |
461 | * the userptr address *and* the struct_mutex held. To prevent that | |
462 | * we set a flag under the i915_mmu_notifier spinlock to indicate | |
463 | * whether this object is valid. | |
464 | */ | |
465 | #if defined(CONFIG_MMU_NOTIFIER) | |
466 | if (obj->userptr.mmu_object == NULL) | |
380996aa | 467 | return 0; |
e4b946bf CW |
468 | |
469 | spin_lock(&obj->userptr.mmu_object->mn->lock); | |
380996aa CW |
470 | /* In order to serialise get_pages with an outstanding |
471 | * cancel_userptr, we must drop the struct_mutex and try again. | |
472 | */ | |
768e159f CW |
473 | if (!value) |
474 | del_object(obj->userptr.mmu_object); | |
475 | else if (!work_pending(&obj->userptr.mmu_object->work)) | |
476 | add_object(obj->userptr.mmu_object); | |
380996aa CW |
477 | else |
478 | ret = -EAGAIN; | |
e4b946bf CW |
479 | spin_unlock(&obj->userptr.mmu_object->mn->lock); |
480 | #endif | |
380996aa CW |
481 | |
482 | return ret; | |
e4b946bf CW |
483 | } |
484 | ||
5cc9ed4b CW |
485 | static void |
486 | __i915_gem_userptr_get_pages_worker(struct work_struct *_work) | |
487 | { | |
488 | struct get_pages_work *work = container_of(_work, typeof(*work), work); | |
489 | struct drm_i915_gem_object *obj = work->obj; | |
490 | struct drm_device *dev = obj->base.dev; | |
68d6c840 | 491 | const int npages = obj->base.size >> PAGE_SHIFT; |
5cc9ed4b CW |
492 | struct page **pvec; |
493 | int pinned, ret; | |
494 | ||
495 | ret = -ENOMEM; | |
496 | pinned = 0; | |
497 | ||
f2a85e19 | 498 | pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); |
5cc9ed4b | 499 | if (pvec != NULL) { |
ad46cb53 | 500 | struct mm_struct *mm = obj->userptr.mm->mm; |
9beae1ea LS |
501 | unsigned int flags = 0; |
502 | ||
503 | if (!obj->userptr.read_only) | |
504 | flags |= FOLL_WRITE; | |
5cc9ed4b | 505 | |
40313f0c CW |
506 | ret = -EFAULT; |
507 | if (atomic_inc_not_zero(&mm->mm_users)) { | |
508 | down_read(&mm->mmap_sem); | |
509 | while (pinned < npages) { | |
510 | ret = get_user_pages_remote | |
511 | (work->task, mm, | |
512 | obj->userptr.ptr + pinned * PAGE_SIZE, | |
513 | npages - pinned, | |
9beae1ea | 514 | flags, |
40313f0c CW |
515 | pvec + pinned, NULL); |
516 | if (ret < 0) | |
517 | break; | |
518 | ||
519 | pinned += ret; | |
520 | } | |
521 | up_read(&mm->mmap_sem); | |
522 | mmput(mm); | |
5cc9ed4b | 523 | } |
5cc9ed4b CW |
524 | } |
525 | ||
526 | mutex_lock(&dev->struct_mutex); | |
68d6c840 CW |
527 | if (obj->userptr.work == &work->work) { |
528 | if (pinned == npages) { | |
529 | ret = __i915_gem_userptr_set_pages(obj, pvec, npages); | |
530 | if (ret == 0) { | |
531 | list_add_tail(&obj->global_list, | |
532 | &to_i915(dev)->mm.unbound_list); | |
96d77634 CW |
533 | obj->get_page.sg_pos = obj->pages->sgl; |
534 | obj->get_page.sg_idx = 0; | |
68d6c840 CW |
535 | pinned = 0; |
536 | } | |
5cc9ed4b | 537 | } |
68d6c840 | 538 | obj->userptr.work = ERR_PTR(ret); |
5cc9ed4b CW |
539 | } |
540 | ||
5cc9ed4b | 541 | obj->userptr.workers--; |
f8c417cd | 542 | i915_gem_object_put(obj); |
5cc9ed4b CW |
543 | mutex_unlock(&dev->struct_mutex); |
544 | ||
545 | release_pages(pvec, pinned, 0); | |
546 | drm_free_large(pvec); | |
547 | ||
548 | put_task_struct(work->task); | |
549 | kfree(work); | |
550 | } | |
551 | ||
e4b946bf CW |
552 | static int |
553 | __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj, | |
554 | bool *active) | |
555 | { | |
556 | struct get_pages_work *work; | |
557 | ||
558 | /* Spawn a worker so that we can acquire the | |
559 | * user pages without holding our mutex. Access | |
560 | * to the user pages requires mmap_sem, and we have | |
561 | * a strict lock ordering of mmap_sem, struct_mutex - | |
562 | * we already hold struct_mutex here and so cannot | |
563 | * call gup without encountering a lock inversion. | |
564 | * | |
565 | * Userspace will keep on repeating the operation | |
566 | * (thanks to EAGAIN) until either we hit the fast | |
567 | * path or the worker completes. If the worker is | |
568 | * cancelled or superseded, the task is still run | |
569 | * but the results ignored. (This leads to | |
570 | * complications that we may have a stray object | |
571 | * refcount that we need to be wary of when | |
572 | * checking for existing objects during creation.) | |
573 | * If the worker encounters an error, it reports | |
574 | * that error back to this function through | |
575 | * obj->userptr.work = ERR_PTR. | |
576 | */ | |
577 | if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS) | |
578 | return -EAGAIN; | |
579 | ||
580 | work = kmalloc(sizeof(*work), GFP_KERNEL); | |
581 | if (work == NULL) | |
582 | return -ENOMEM; | |
583 | ||
584 | obj->userptr.work = &work->work; | |
585 | obj->userptr.workers++; | |
586 | ||
25dc556a | 587 | work->obj = i915_gem_object_get(obj); |
e4b946bf CW |
588 | |
589 | work->task = current; | |
590 | get_task_struct(work->task); | |
591 | ||
592 | INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); | |
593 | schedule_work(&work->work); | |
594 | ||
595 | *active = true; | |
596 | return -EAGAIN; | |
597 | } | |
598 | ||
5cc9ed4b CW |
599 | static int |
600 | i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) | |
601 | { | |
602 | const int num_pages = obj->base.size >> PAGE_SHIFT; | |
603 | struct page **pvec; | |
604 | int pinned, ret; | |
e4b946bf | 605 | bool active; |
5cc9ed4b CW |
606 | |
607 | /* If userspace should engineer that these pages are replaced in | |
608 | * the vma between us binding this page into the GTT and completion | |
609 | * of rendering... Their loss. If they change the mapping of their | |
610 | * pages they need to create a new bo to point to the new vma. | |
611 | * | |
612 | * However, that still leaves open the possibility of the vma | |
613 | * being copied upon fork. Which falls under the same userspace | |
614 | * synchronisation issue as a regular bo, except that this time | |
615 | * the process may not be expecting that a particular piece of | |
616 | * memory is tied to the GPU. | |
617 | * | |
618 | * Fortunately, we can hook into the mmu_notifier in order to | |
619 | * discard the page references prior to anything nasty happening | |
620 | * to the vma (discard or cloning) which should prevent the more | |
621 | * egregious cases from causing harm. | |
622 | */ | |
364c8172 CW |
623 | |
624 | if (obj->userptr.work) { | |
e4b946bf | 625 | /* active flag should still be held for the pending work */ |
364c8172 CW |
626 | if (IS_ERR(obj->userptr.work)) |
627 | return PTR_ERR(obj->userptr.work); | |
628 | else | |
629 | return -EAGAIN; | |
630 | } | |
e4b946bf CW |
631 | |
632 | /* Let the mmu-notifier know that we have begun and need cancellation */ | |
380996aa CW |
633 | ret = __i915_gem_userptr_set_active(obj, true); |
634 | if (ret) | |
635 | return ret; | |
5cc9ed4b CW |
636 | |
637 | pvec = NULL; | |
638 | pinned = 0; | |
ad46cb53 | 639 | if (obj->userptr.mm->mm == current->mm) { |
f2a85e19 CW |
640 | pvec = drm_malloc_gfp(num_pages, sizeof(struct page *), |
641 | GFP_TEMPORARY); | |
5cc9ed4b | 642 | if (pvec == NULL) { |
f2a85e19 CW |
643 | __i915_gem_userptr_set_active(obj, false); |
644 | return -ENOMEM; | |
5cc9ed4b CW |
645 | } |
646 | ||
647 | pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, | |
648 | !obj->userptr.read_only, pvec); | |
649 | } | |
e4b946bf CW |
650 | |
651 | active = false; | |
652 | if (pinned < 0) | |
653 | ret = pinned, pinned = 0; | |
654 | else if (pinned < num_pages) | |
655 | ret = __i915_gem_userptr_get_pages_schedule(obj, &active); | |
656 | else | |
e2273302 | 657 | ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); |
e4b946bf CW |
658 | if (ret) { |
659 | __i915_gem_userptr_set_active(obj, active); | |
660 | release_pages(pvec, pinned, 0); | |
5cc9ed4b | 661 | } |
5cc9ed4b CW |
662 | drm_free_large(pvec); |
663 | return ret; | |
664 | } | |
665 | ||
666 | static void | |
667 | i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) | |
668 | { | |
85d1225e DG |
669 | struct sgt_iter sgt_iter; |
670 | struct page *page; | |
5cc9ed4b CW |
671 | |
672 | BUG_ON(obj->userptr.work != NULL); | |
e4b946bf | 673 | __i915_gem_userptr_set_active(obj, false); |
5cc9ed4b CW |
674 | |
675 | if (obj->madv != I915_MADV_WILLNEED) | |
676 | obj->dirty = 0; | |
677 | ||
e2273302 ID |
678 | i915_gem_gtt_finish_object(obj); |
679 | ||
85d1225e | 680 | for_each_sgt_page(page, sgt_iter, obj->pages) { |
5cc9ed4b CW |
681 | if (obj->dirty) |
682 | set_page_dirty(page); | |
683 | ||
684 | mark_page_accessed(page); | |
09cbfeaf | 685 | put_page(page); |
5cc9ed4b CW |
686 | } |
687 | obj->dirty = 0; | |
688 | ||
689 | sg_free_table(obj->pages); | |
690 | kfree(obj->pages); | |
691 | } | |
692 | ||
693 | static void | |
694 | i915_gem_userptr_release(struct drm_i915_gem_object *obj) | |
695 | { | |
696 | i915_gem_userptr_release__mmu_notifier(obj); | |
ad46cb53 | 697 | i915_gem_userptr_release__mm_struct(obj); |
5cc9ed4b CW |
698 | } |
699 | ||
700 | static int | |
701 | i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) | |
702 | { | |
ad46cb53 | 703 | if (obj->userptr.mmu_object) |
5cc9ed4b CW |
704 | return 0; |
705 | ||
706 | return i915_gem_userptr_init__mmu_notifier(obj, 0); | |
707 | } | |
708 | ||
709 | static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { | |
de472664 | 710 | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE, |
5cc9ed4b CW |
711 | .get_pages = i915_gem_userptr_get_pages, |
712 | .put_pages = i915_gem_userptr_put_pages, | |
de472664 | 713 | .dmabuf_export = i915_gem_userptr_dmabuf_export, |
5cc9ed4b CW |
714 | .release = i915_gem_userptr_release, |
715 | }; | |
716 | ||
717 | /** | |
718 | * Creates a new mm object that wraps some normal memory from the process | |
719 | * context - user memory. | |
720 | * | |
721 | * We impose several restrictions upon the memory being mapped | |
722 | * into the GPU. | |
723 | * 1. It must be page aligned (both start/end addresses, i.e ptr and size). | |
ec8b0dd5 | 724 | * 2. It must be normal system memory, not a pointer into another map of IO |
5cc9ed4b | 725 | * space (e.g. it must not be a GTT mmapping of another object). |
ec8b0dd5 | 726 | * 3. We only allow a bo as large as we could in theory map into the GTT, |
5cc9ed4b | 727 | * that is we limit the size to the total size of the GTT. |
ec8b0dd5 | 728 | * 4. The bo is marked as being snoopable. The backing pages are left |
5cc9ed4b CW |
729 | * accessible directly by the CPU, but reads and writes by the GPU may |
730 | * incur the cost of a snoop (unless you have an LLC architecture). | |
731 | * | |
732 | * Synchronisation between multiple users and the GPU is left to userspace | |
733 | * through the normal set-domain-ioctl. The kernel will enforce that the | |
734 | * GPU relinquishes the VMA before it is returned back to the system | |
735 | * i.e. upon free(), munmap() or process termination. However, the userspace | |
736 | * malloc() library may not immediately relinquish the VMA after free() and | |
737 | * instead reuse it whilst the GPU is still reading and writing to the VMA. | |
738 | * Caveat emptor. | |
739 | * | |
740 | * Also note, that the object created here is not currently a "first class" | |
741 | * object, in that several ioctls are banned. These are the CPU access | |
742 | * ioctls: mmap(), pwrite and pread. In practice, you are expected to use | |
cc917ab4 CW |
743 | * direct access via your pointer rather than use those ioctls. Another |
744 | * restriction is that we do not allow userptr surfaces to be pinned to the | |
745 | * hardware and so we reject any attempt to create a framebuffer out of a | |
746 | * userptr. | |
5cc9ed4b CW |
747 | * |
748 | * If you think this is a good interface to use to pass GPU memory between | |
749 | * drivers, please use dma-buf instead. In fact, wherever possible use | |
750 | * dma-buf instead. | |
751 | */ | |
752 | int | |
753 | i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |
754 | { | |
5cc9ed4b CW |
755 | struct drm_i915_gem_userptr *args = data; |
756 | struct drm_i915_gem_object *obj; | |
757 | int ret; | |
758 | u32 handle; | |
759 | ||
ca377809 TU |
760 | if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) { |
761 | /* We cannot support coherent userptr objects on hw without | |
762 | * LLC and broken snooping. | |
763 | */ | |
764 | return -ENODEV; | |
765 | } | |
766 | ||
5cc9ed4b CW |
767 | if (args->flags & ~(I915_USERPTR_READ_ONLY | |
768 | I915_USERPTR_UNSYNCHRONIZED)) | |
769 | return -EINVAL; | |
770 | ||
771 | if (offset_in_page(args->user_ptr | args->user_size)) | |
772 | return -EINVAL; | |
773 | ||
5cc9ed4b CW |
774 | if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, |
775 | (char __user *)(unsigned long)args->user_ptr, args->user_size)) | |
776 | return -EFAULT; | |
777 | ||
778 | if (args->flags & I915_USERPTR_READ_ONLY) { | |
779 | /* On almost all of the current hw, we cannot tell the GPU that a | |
780 | * page is readonly, so this is just a placeholder in the uAPI. | |
781 | */ | |
782 | return -ENODEV; | |
783 | } | |
784 | ||
5cc9ed4b CW |
785 | obj = i915_gem_object_alloc(dev); |
786 | if (obj == NULL) | |
787 | return -ENOMEM; | |
788 | ||
789 | drm_gem_private_object_init(dev, &obj->base, args->user_size); | |
790 | i915_gem_object_init(obj, &i915_gem_userptr_ops); | |
791 | obj->cache_level = I915_CACHE_LLC; | |
792 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | |
793 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | |
794 | ||
795 | obj->userptr.ptr = args->user_ptr; | |
796 | obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY); | |
797 | ||
798 | /* And keep a pointer to the current->mm for resolving the user pages | |
799 | * at binding. This means that we need to hook into the mmu_notifier | |
800 | * in order to detect if the mmu is destroyed. | |
801 | */ | |
ad46cb53 CW |
802 | ret = i915_gem_userptr_init__mm_struct(obj); |
803 | if (ret == 0) | |
5cc9ed4b CW |
804 | ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); |
805 | if (ret == 0) | |
806 | ret = drm_gem_handle_create(file, &obj->base, &handle); | |
807 | ||
808 | /* drop reference from allocate - handle holds it now */ | |
34911fd3 | 809 | i915_gem_object_put_unlocked(obj); |
5cc9ed4b CW |
810 | if (ret) |
811 | return ret; | |
812 | ||
813 | args->handle = handle; | |
814 | return 0; | |
815 | } | |
816 | ||
72778cb2 | 817 | void i915_gem_init_userptr(struct drm_i915_private *dev_priv) |
5cc9ed4b | 818 | { |
ad46cb53 CW |
819 | mutex_init(&dev_priv->mm_lock); |
820 | hash_init(dev_priv->mm_structs); | |
5cc9ed4b | 821 | } |