Commit | Line | Data |
---|---|---|
5cc9ed4b CW |
1 | /* |
2 | * Copyright © 2012-2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
b588c92b ML |
25 | #include <drm/drmP.h> |
26 | #include <drm/i915_drm.h> | |
5cc9ed4b CW |
27 | #include "i915_drv.h" |
28 | #include "i915_trace.h" | |
29 | #include "intel_drv.h" | |
30 | #include <linux/mmu_context.h> | |
31 | #include <linux/mmu_notifier.h> | |
32 | #include <linux/mempolicy.h> | |
33 | #include <linux/swap.h> | |
6e84f315 | 34 | #include <linux/sched/mm.h> |
5cc9ed4b | 35 | |
ad46cb53 CW |
36 | struct i915_mm_struct { |
37 | struct mm_struct *mm; | |
f470b190 | 38 | struct drm_i915_private *i915; |
ad46cb53 CW |
39 | struct i915_mmu_notifier *mn; |
40 | struct hlist_node node; | |
41 | struct kref kref; | |
42 | struct work_struct work; | |
43 | }; | |
44 | ||
5cc9ed4b CW |
45 | #if defined(CONFIG_MMU_NOTIFIER) |
46 | #include <linux/interval_tree.h> | |
47 | ||
48 | struct i915_mmu_notifier { | |
49 | spinlock_t lock; | |
50 | struct hlist_node node; | |
51 | struct mmu_notifier mn; | |
52 | struct rb_root objects; | |
393afc2c | 53 | struct workqueue_struct *wq; |
5cc9ed4b CW |
54 | }; |
55 | ||
56 | struct i915_mmu_object { | |
ad46cb53 | 57 | struct i915_mmu_notifier *mn; |
768e159f | 58 | struct drm_i915_gem_object *obj; |
5cc9ed4b | 59 | struct interval_tree_node it; |
ec8b0dd5 | 60 | struct list_head link; |
380996aa | 61 | struct work_struct work; |
768e159f | 62 | bool attached; |
5cc9ed4b CW |
63 | }; |
64 | ||
768e159f | 65 | static void cancel_userptr(struct work_struct *work) |
ec8b0dd5 | 66 | { |
380996aa CW |
67 | struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); |
68 | struct drm_i915_gem_object *obj = mo->obj; | |
ec8b0dd5 | 69 | struct drm_device *dev = obj->base.dev; |
ec8b0dd5 | 70 | |
e95433c7 | 71 | i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL); |
8a3b3d57 | 72 | |
ec8b0dd5 CW |
73 | mutex_lock(&dev->struct_mutex); |
74 | /* Cancel any active worker and force us to re-evaluate gup */ | |
75 | obj->userptr.work = NULL; | |
76 | ||
03ac84f1 CW |
77 | /* We are inside a kthread context and can't be interrupted */ |
78 | if (i915_gem_object_unbind(obj) == 0) | |
548625ee | 79 | __i915_gem_object_put_pages(obj, I915_MM_NORMAL); |
03ac84f1 CW |
80 | WARN_ONCE(obj->mm.pages, |
81 | "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n", | |
82 | obj->bind_count, | |
1233e2db | 83 | atomic_read(&obj->mm.pages_pin_count), |
03ac84f1 | 84 | obj->pin_display); |
ec8b0dd5 | 85 | |
f8c417cd | 86 | i915_gem_object_put(obj); |
ec8b0dd5 | 87 | mutex_unlock(&dev->struct_mutex); |
ec8b0dd5 CW |
88 | } |
89 | ||
768e159f | 90 | static void add_object(struct i915_mmu_object *mo) |
ec8b0dd5 | 91 | { |
768e159f CW |
92 | if (mo->attached) |
93 | return; | |
ec8b0dd5 | 94 | |
768e159f CW |
95 | interval_tree_insert(&mo->it, &mo->mn->objects); |
96 | mo->attached = true; | |
97 | } | |
98 | ||
99 | static void del_object(struct i915_mmu_object *mo) | |
100 | { | |
101 | if (!mo->attached) | |
102 | return; | |
103 | ||
104 | interval_tree_remove(&mo->it, &mo->mn->objects); | |
105 | mo->attached = false; | |
ec8b0dd5 CW |
106 | } |
107 | ||
5cc9ed4b CW |
108 | static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, |
109 | struct mm_struct *mm, | |
110 | unsigned long start, | |
111 | unsigned long end) | |
112 | { | |
380996aa CW |
113 | struct i915_mmu_notifier *mn = |
114 | container_of(_mn, struct i915_mmu_notifier, mn); | |
115 | struct i915_mmu_object *mo; | |
768e159f CW |
116 | struct interval_tree_node *it; |
117 | LIST_HEAD(cancelled); | |
118 | ||
119 | if (RB_EMPTY_ROOT(&mn->objects)) | |
120 | return; | |
380996aa CW |
121 | |
122 | /* interval ranges are inclusive, but invalidate range is exclusive */ | |
123 | end--; | |
124 | ||
125 | spin_lock(&mn->lock); | |
768e159f CW |
126 | it = interval_tree_iter_first(&mn->objects, start, end); |
127 | while (it) { | |
128 | /* The mmu_object is released late when destroying the | |
129 | * GEM object so it is entirely possible to gain a | |
130 | * reference on an object in the process of being freed | |
131 | * since our serialisation is via the spinlock and not | |
132 | * the struct_mutex - and consequently use it after it | |
133 | * is freed and then double free it. To prevent that | |
134 | * use-after-free we only acquire a reference on the | |
135 | * object if it is not in the process of being destroyed. | |
136 | */ | |
137 | mo = container_of(it, struct i915_mmu_object, it); | |
138 | if (kref_get_unless_zero(&mo->obj->base.refcount)) | |
393afc2c | 139 | queue_work(mn->wq, &mo->work); |
5cc9ed4b | 140 | |
768e159f CW |
141 | list_add(&mo->link, &cancelled); |
142 | it = interval_tree_iter_next(it, start, end); | |
5cc9ed4b | 143 | } |
768e159f CW |
144 | list_for_each_entry(mo, &cancelled, link) |
145 | del_object(mo); | |
380996aa | 146 | spin_unlock(&mn->lock); |
393afc2c CW |
147 | |
148 | flush_workqueue(mn->wq); | |
5cc9ed4b CW |
149 | } |
150 | ||
151 | static const struct mmu_notifier_ops i915_gem_userptr_notifier = { | |
152 | .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start, | |
153 | }; | |
154 | ||
155 | static struct i915_mmu_notifier * | |
ad46cb53 | 156 | i915_mmu_notifier_create(struct mm_struct *mm) |
5cc9ed4b | 157 | { |
ad46cb53 | 158 | struct i915_mmu_notifier *mn; |
5cc9ed4b CW |
159 | int ret; |
160 | ||
ad46cb53 CW |
161 | mn = kmalloc(sizeof(*mn), GFP_KERNEL); |
162 | if (mn == NULL) | |
5cc9ed4b CW |
163 | return ERR_PTR(-ENOMEM); |
164 | ||
ad46cb53 CW |
165 | spin_lock_init(&mn->lock); |
166 | mn->mn.ops = &i915_gem_userptr_notifier; | |
167 | mn->objects = RB_ROOT; | |
393afc2c CW |
168 | mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0); |
169 | if (mn->wq == NULL) { | |
170 | kfree(mn); | |
171 | return ERR_PTR(-ENOMEM); | |
172 | } | |
ad46cb53 CW |
173 | |
174 | /* Protected by mmap_sem (write-lock) */ | |
175 | ret = __mmu_notifier_register(&mn->mn, mm); | |
5cc9ed4b | 176 | if (ret) { |
393afc2c | 177 | destroy_workqueue(mn->wq); |
ad46cb53 | 178 | kfree(mn); |
5cc9ed4b CW |
179 | return ERR_PTR(ret); |
180 | } | |
181 | ||
ad46cb53 | 182 | return mn; |
5cc9ed4b CW |
183 | } |
184 | ||
5cc9ed4b CW |
185 | static void |
186 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
187 | { | |
ad46cb53 | 188 | struct i915_mmu_object *mo; |
5cc9ed4b | 189 | |
ad46cb53 CW |
190 | mo = obj->userptr.mmu_object; |
191 | if (mo == NULL) | |
5cc9ed4b CW |
192 | return; |
193 | ||
768e159f CW |
194 | spin_lock(&mo->mn->lock); |
195 | del_object(mo); | |
196 | spin_unlock(&mo->mn->lock); | |
ad46cb53 CW |
197 | kfree(mo); |
198 | ||
199 | obj->userptr.mmu_object = NULL; | |
200 | } | |
201 | ||
202 | static struct i915_mmu_notifier * | |
203 | i915_mmu_notifier_find(struct i915_mm_struct *mm) | |
204 | { | |
e9681366 CW |
205 | struct i915_mmu_notifier *mn = mm->mn; |
206 | ||
207 | mn = mm->mn; | |
208 | if (mn) | |
209 | return mn; | |
210 | ||
211 | down_write(&mm->mm->mmap_sem); | |
f470b190 | 212 | mutex_lock(&mm->i915->mm_lock); |
e9681366 CW |
213 | if ((mn = mm->mn) == NULL) { |
214 | mn = i915_mmu_notifier_create(mm->mm); | |
215 | if (!IS_ERR(mn)) | |
216 | mm->mn = mn; | |
ad46cb53 | 217 | } |
f470b190 | 218 | mutex_unlock(&mm->i915->mm_lock); |
e9681366 CW |
219 | up_write(&mm->mm->mmap_sem); |
220 | ||
221 | return mn; | |
5cc9ed4b CW |
222 | } |
223 | ||
224 | static int | |
225 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
226 | unsigned flags) | |
227 | { | |
ad46cb53 CW |
228 | struct i915_mmu_notifier *mn; |
229 | struct i915_mmu_object *mo; | |
5cc9ed4b CW |
230 | |
231 | if (flags & I915_USERPTR_UNSYNCHRONIZED) | |
232 | return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; | |
233 | ||
ad46cb53 CW |
234 | if (WARN_ON(obj->userptr.mm == NULL)) |
235 | return -EINVAL; | |
5cc9ed4b | 236 | |
ad46cb53 CW |
237 | mn = i915_mmu_notifier_find(obj->userptr.mm); |
238 | if (IS_ERR(mn)) | |
239 | return PTR_ERR(mn); | |
5cc9ed4b | 240 | |
ad46cb53 CW |
241 | mo = kzalloc(sizeof(*mo), GFP_KERNEL); |
242 | if (mo == NULL) | |
243 | return -ENOMEM; | |
5cc9ed4b | 244 | |
ad46cb53 | 245 | mo->mn = mn; |
ad46cb53 | 246 | mo->obj = obj; |
768e159f CW |
247 | mo->it.start = obj->userptr.ptr; |
248 | mo->it.last = obj->userptr.ptr + obj->base.size - 1; | |
249 | INIT_WORK(&mo->work, cancel_userptr); | |
ad46cb53 CW |
250 | |
251 | obj->userptr.mmu_object = mo; | |
5cc9ed4b | 252 | return 0; |
ad46cb53 CW |
253 | } |
254 | ||
255 | static void | |
256 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | |
257 | struct mm_struct *mm) | |
258 | { | |
259 | if (mn == NULL) | |
260 | return; | |
5cc9ed4b | 261 | |
ad46cb53 | 262 | mmu_notifier_unregister(&mn->mn, mm); |
393afc2c | 263 | destroy_workqueue(mn->wq); |
5cc9ed4b | 264 | kfree(mn); |
5cc9ed4b CW |
265 | } |
266 | ||
267 | #else | |
268 | ||
269 | static void | |
270 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
271 | { | |
272 | } | |
273 | ||
274 | static int | |
275 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
276 | unsigned flags) | |
277 | { | |
278 | if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0) | |
279 | return -ENODEV; | |
280 | ||
281 | if (!capable(CAP_SYS_ADMIN)) | |
282 | return -EPERM; | |
283 | ||
284 | return 0; | |
285 | } | |
ad46cb53 CW |
286 | |
287 | static void | |
288 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | |
289 | struct mm_struct *mm) | |
290 | { | |
291 | } | |
292 | ||
5cc9ed4b CW |
293 | #endif |
294 | ||
ad46cb53 CW |
295 | static struct i915_mm_struct * |
296 | __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) | |
297 | { | |
298 | struct i915_mm_struct *mm; | |
299 | ||
300 | /* Protected by dev_priv->mm_lock */ | |
301 | hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) | |
302 | if (mm->mm == real) | |
303 | return mm; | |
304 | ||
305 | return NULL; | |
306 | } | |
307 | ||
308 | static int | |
309 | i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) | |
310 | { | |
311 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | |
312 | struct i915_mm_struct *mm; | |
313 | int ret = 0; | |
314 | ||
315 | /* During release of the GEM object we hold the struct_mutex. This | |
316 | * precludes us from calling mmput() at that time as that may be | |
317 | * the last reference and so call exit_mmap(). exit_mmap() will | |
318 | * attempt to reap the vma, and if we were holding a GTT mmap | |
319 | * would then call drm_gem_vm_close() and attempt to reacquire | |
320 | * the struct mutex. So in order to avoid that recursion, we have | |
321 | * to defer releasing the mm reference until after we drop the | |
322 | * struct_mutex, i.e. we need to schedule a worker to do the clean | |
323 | * up. | |
324 | */ | |
325 | mutex_lock(&dev_priv->mm_lock); | |
326 | mm = __i915_mm_struct_find(dev_priv, current->mm); | |
327 | if (mm == NULL) { | |
328 | mm = kmalloc(sizeof(*mm), GFP_KERNEL); | |
329 | if (mm == NULL) { | |
330 | ret = -ENOMEM; | |
331 | goto out; | |
332 | } | |
333 | ||
334 | kref_init(&mm->kref); | |
f470b190 | 335 | mm->i915 = to_i915(obj->base.dev); |
ad46cb53 CW |
336 | |
337 | mm->mm = current->mm; | |
f1f10076 | 338 | mmgrab(current->mm); |
ad46cb53 CW |
339 | |
340 | mm->mn = NULL; | |
341 | ||
342 | /* Protected by dev_priv->mm_lock */ | |
343 | hash_add(dev_priv->mm_structs, | |
344 | &mm->node, (unsigned long)mm->mm); | |
345 | } else | |
346 | kref_get(&mm->kref); | |
347 | ||
348 | obj->userptr.mm = mm; | |
349 | out: | |
350 | mutex_unlock(&dev_priv->mm_lock); | |
351 | return ret; | |
352 | } | |
353 | ||
354 | static void | |
355 | __i915_mm_struct_free__worker(struct work_struct *work) | |
356 | { | |
357 | struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); | |
358 | i915_mmu_notifier_free(mm->mn, mm->mm); | |
359 | mmdrop(mm->mm); | |
360 | kfree(mm); | |
361 | } | |
362 | ||
363 | static void | |
364 | __i915_mm_struct_free(struct kref *kref) | |
365 | { | |
366 | struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); | |
367 | ||
368 | /* Protected by dev_priv->mm_lock */ | |
369 | hash_del(&mm->node); | |
f470b190 | 370 | mutex_unlock(&mm->i915->mm_lock); |
ad46cb53 CW |
371 | |
372 | INIT_WORK(&mm->work, __i915_mm_struct_free__worker); | |
373 | schedule_work(&mm->work); | |
374 | } | |
375 | ||
376 | static void | |
377 | i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) | |
378 | { | |
379 | if (obj->userptr.mm == NULL) | |
380 | return; | |
381 | ||
382 | kref_put_mutex(&obj->userptr.mm->kref, | |
383 | __i915_mm_struct_free, | |
384 | &to_i915(obj->base.dev)->mm_lock); | |
385 | obj->userptr.mm = NULL; | |
386 | } | |
387 | ||
5cc9ed4b CW |
388 | struct get_pages_work { |
389 | struct work_struct work; | |
390 | struct drm_i915_gem_object *obj; | |
391 | struct task_struct *task; | |
392 | }; | |
393 | ||
5cc9ed4b CW |
394 | #if IS_ENABLED(CONFIG_SWIOTLB) |
395 | #define swiotlb_active() swiotlb_nr_tbl() | |
396 | #else | |
397 | #define swiotlb_active() 0 | |
398 | #endif | |
399 | ||
400 | static int | |
401 | st_set_pages(struct sg_table **st, struct page **pvec, int num_pages) | |
402 | { | |
403 | struct scatterlist *sg; | |
404 | int ret, n; | |
405 | ||
406 | *st = kmalloc(sizeof(**st), GFP_KERNEL); | |
407 | if (*st == NULL) | |
408 | return -ENOMEM; | |
409 | ||
410 | if (swiotlb_active()) { | |
411 | ret = sg_alloc_table(*st, num_pages, GFP_KERNEL); | |
412 | if (ret) | |
413 | goto err; | |
414 | ||
415 | for_each_sg((*st)->sgl, sg, num_pages, n) | |
416 | sg_set_page(sg, pvec[n], PAGE_SIZE, 0); | |
417 | } else { | |
418 | ret = sg_alloc_table_from_pages(*st, pvec, num_pages, | |
419 | 0, num_pages << PAGE_SHIFT, | |
420 | GFP_KERNEL); | |
421 | if (ret) | |
422 | goto err; | |
423 | } | |
424 | ||
425 | return 0; | |
426 | ||
427 | err: | |
428 | kfree(*st); | |
429 | *st = NULL; | |
430 | return ret; | |
431 | } | |
432 | ||
03ac84f1 | 433 | static struct sg_table * |
e2273302 ID |
434 | __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj, |
435 | struct page **pvec, int num_pages) | |
436 | { | |
03ac84f1 | 437 | struct sg_table *pages; |
e2273302 ID |
438 | int ret; |
439 | ||
03ac84f1 | 440 | ret = st_set_pages(&pages, pvec, num_pages); |
e2273302 | 441 | if (ret) |
03ac84f1 | 442 | return ERR_PTR(ret); |
e2273302 | 443 | |
03ac84f1 | 444 | ret = i915_gem_gtt_prepare_pages(obj, pages); |
e2273302 | 445 | if (ret) { |
03ac84f1 CW |
446 | sg_free_table(pages); |
447 | kfree(pages); | |
448 | return ERR_PTR(ret); | |
e2273302 ID |
449 | } |
450 | ||
03ac84f1 | 451 | return pages; |
e2273302 ID |
452 | } |
453 | ||
380996aa | 454 | static int |
e4b946bf CW |
455 | __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, |
456 | bool value) | |
457 | { | |
380996aa CW |
458 | int ret = 0; |
459 | ||
e4b946bf CW |
460 | /* During mm_invalidate_range we need to cancel any userptr that |
461 | * overlaps the range being invalidated. Doing so requires the | |
462 | * struct_mutex, and that risks recursion. In order to cause | |
463 | * recursion, the user must alias the userptr address space with | |
464 | * a GTT mmapping (possible with a MAP_FIXED) - then when we have | |
465 | * to invalidate that mmaping, mm_invalidate_range is called with | |
466 | * the userptr address *and* the struct_mutex held. To prevent that | |
467 | * we set a flag under the i915_mmu_notifier spinlock to indicate | |
468 | * whether this object is valid. | |
469 | */ | |
470 | #if defined(CONFIG_MMU_NOTIFIER) | |
471 | if (obj->userptr.mmu_object == NULL) | |
380996aa | 472 | return 0; |
e4b946bf CW |
473 | |
474 | spin_lock(&obj->userptr.mmu_object->mn->lock); | |
380996aa CW |
475 | /* In order to serialise get_pages with an outstanding |
476 | * cancel_userptr, we must drop the struct_mutex and try again. | |
477 | */ | |
768e159f CW |
478 | if (!value) |
479 | del_object(obj->userptr.mmu_object); | |
480 | else if (!work_pending(&obj->userptr.mmu_object->work)) | |
481 | add_object(obj->userptr.mmu_object); | |
380996aa CW |
482 | else |
483 | ret = -EAGAIN; | |
e4b946bf CW |
484 | spin_unlock(&obj->userptr.mmu_object->mn->lock); |
485 | #endif | |
380996aa CW |
486 | |
487 | return ret; | |
e4b946bf CW |
488 | } |
489 | ||
5cc9ed4b CW |
490 | static void |
491 | __i915_gem_userptr_get_pages_worker(struct work_struct *_work) | |
492 | { | |
493 | struct get_pages_work *work = container_of(_work, typeof(*work), work); | |
494 | struct drm_i915_gem_object *obj = work->obj; | |
68d6c840 | 495 | const int npages = obj->base.size >> PAGE_SHIFT; |
5cc9ed4b CW |
496 | struct page **pvec; |
497 | int pinned, ret; | |
498 | ||
499 | ret = -ENOMEM; | |
500 | pinned = 0; | |
501 | ||
f2a85e19 | 502 | pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); |
5cc9ed4b | 503 | if (pvec != NULL) { |
ad46cb53 | 504 | struct mm_struct *mm = obj->userptr.mm->mm; |
9beae1ea LS |
505 | unsigned int flags = 0; |
506 | ||
507 | if (!obj->userptr.read_only) | |
508 | flags |= FOLL_WRITE; | |
5cc9ed4b | 509 | |
40313f0c | 510 | ret = -EFAULT; |
388f7934 | 511 | if (mmget_not_zero(mm)) { |
40313f0c CW |
512 | down_read(&mm->mmap_sem); |
513 | while (pinned < npages) { | |
514 | ret = get_user_pages_remote | |
515 | (work->task, mm, | |
516 | obj->userptr.ptr + pinned * PAGE_SIZE, | |
517 | npages - pinned, | |
9beae1ea | 518 | flags, |
5b56d49f | 519 | pvec + pinned, NULL, NULL); |
40313f0c CW |
520 | if (ret < 0) |
521 | break; | |
522 | ||
523 | pinned += ret; | |
524 | } | |
525 | up_read(&mm->mmap_sem); | |
526 | mmput(mm); | |
5cc9ed4b | 527 | } |
5cc9ed4b CW |
528 | } |
529 | ||
1233e2db | 530 | mutex_lock(&obj->mm.lock); |
68d6c840 | 531 | if (obj->userptr.work == &work->work) { |
03ac84f1 CW |
532 | struct sg_table *pages = ERR_PTR(ret); |
533 | ||
68d6c840 | 534 | if (pinned == npages) { |
03ac84f1 CW |
535 | pages = __i915_gem_userptr_set_pages(obj, pvec, npages); |
536 | if (!IS_ERR(pages)) { | |
537 | __i915_gem_object_set_pages(obj, pages); | |
68d6c840 | 538 | pinned = 0; |
03ac84f1 | 539 | pages = NULL; |
68d6c840 | 540 | } |
5cc9ed4b | 541 | } |
03ac84f1 CW |
542 | |
543 | obj->userptr.work = ERR_CAST(pages); | |
5cc9ed4b | 544 | } |
1233e2db | 545 | mutex_unlock(&obj->mm.lock); |
5cc9ed4b CW |
546 | |
547 | release_pages(pvec, pinned, 0); | |
548 | drm_free_large(pvec); | |
549 | ||
f0cd5182 | 550 | i915_gem_object_put(obj); |
5cc9ed4b CW |
551 | put_task_struct(work->task); |
552 | kfree(work); | |
553 | } | |
554 | ||
03ac84f1 | 555 | static struct sg_table * |
e4b946bf CW |
556 | __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj, |
557 | bool *active) | |
558 | { | |
559 | struct get_pages_work *work; | |
560 | ||
561 | /* Spawn a worker so that we can acquire the | |
562 | * user pages without holding our mutex. Access | |
563 | * to the user pages requires mmap_sem, and we have | |
564 | * a strict lock ordering of mmap_sem, struct_mutex - | |
565 | * we already hold struct_mutex here and so cannot | |
566 | * call gup without encountering a lock inversion. | |
567 | * | |
568 | * Userspace will keep on repeating the operation | |
569 | * (thanks to EAGAIN) until either we hit the fast | |
570 | * path or the worker completes. If the worker is | |
571 | * cancelled or superseded, the task is still run | |
572 | * but the results ignored. (This leads to | |
573 | * complications that we may have a stray object | |
574 | * refcount that we need to be wary of when | |
575 | * checking for existing objects during creation.) | |
576 | * If the worker encounters an error, it reports | |
577 | * that error back to this function through | |
578 | * obj->userptr.work = ERR_PTR. | |
579 | */ | |
e4b946bf CW |
580 | work = kmalloc(sizeof(*work), GFP_KERNEL); |
581 | if (work == NULL) | |
03ac84f1 | 582 | return ERR_PTR(-ENOMEM); |
e4b946bf CW |
583 | |
584 | obj->userptr.work = &work->work; | |
e4b946bf | 585 | |
25dc556a | 586 | work->obj = i915_gem_object_get(obj); |
e4b946bf CW |
587 | |
588 | work->task = current; | |
589 | get_task_struct(work->task); | |
590 | ||
591 | INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); | |
592 | schedule_work(&work->work); | |
593 | ||
594 | *active = true; | |
03ac84f1 | 595 | return ERR_PTR(-EAGAIN); |
e4b946bf CW |
596 | } |
597 | ||
03ac84f1 | 598 | static struct sg_table * |
5cc9ed4b CW |
599 | i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) |
600 | { | |
601 | const int num_pages = obj->base.size >> PAGE_SHIFT; | |
602 | struct page **pvec; | |
03ac84f1 | 603 | struct sg_table *pages; |
5cc9ed4b | 604 | int pinned, ret; |
e4b946bf | 605 | bool active; |
5cc9ed4b CW |
606 | |
607 | /* If userspace should engineer that these pages are replaced in | |
608 | * the vma between us binding this page into the GTT and completion | |
609 | * of rendering... Their loss. If they change the mapping of their | |
610 | * pages they need to create a new bo to point to the new vma. | |
611 | * | |
612 | * However, that still leaves open the possibility of the vma | |
613 | * being copied upon fork. Which falls under the same userspace | |
614 | * synchronisation issue as a regular bo, except that this time | |
615 | * the process may not be expecting that a particular piece of | |
616 | * memory is tied to the GPU. | |
617 | * | |
618 | * Fortunately, we can hook into the mmu_notifier in order to | |
619 | * discard the page references prior to anything nasty happening | |
620 | * to the vma (discard or cloning) which should prevent the more | |
621 | * egregious cases from causing harm. | |
622 | */ | |
364c8172 CW |
623 | |
624 | if (obj->userptr.work) { | |
e4b946bf | 625 | /* active flag should still be held for the pending work */ |
364c8172 | 626 | if (IS_ERR(obj->userptr.work)) |
03ac84f1 | 627 | return ERR_CAST(obj->userptr.work); |
364c8172 | 628 | else |
03ac84f1 | 629 | return ERR_PTR(-EAGAIN); |
364c8172 | 630 | } |
e4b946bf CW |
631 | |
632 | /* Let the mmu-notifier know that we have begun and need cancellation */ | |
380996aa CW |
633 | ret = __i915_gem_userptr_set_active(obj, true); |
634 | if (ret) | |
03ac84f1 | 635 | return ERR_PTR(ret); |
5cc9ed4b CW |
636 | |
637 | pvec = NULL; | |
638 | pinned = 0; | |
ad46cb53 | 639 | if (obj->userptr.mm->mm == current->mm) { |
f2a85e19 CW |
640 | pvec = drm_malloc_gfp(num_pages, sizeof(struct page *), |
641 | GFP_TEMPORARY); | |
5cc9ed4b | 642 | if (pvec == NULL) { |
f2a85e19 | 643 | __i915_gem_userptr_set_active(obj, false); |
03ac84f1 | 644 | return ERR_PTR(-ENOMEM); |
5cc9ed4b CW |
645 | } |
646 | ||
647 | pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, | |
648 | !obj->userptr.read_only, pvec); | |
649 | } | |
e4b946bf CW |
650 | |
651 | active = false; | |
652 | if (pinned < 0) | |
03ac84f1 | 653 | pages = ERR_PTR(pinned), pinned = 0; |
e4b946bf | 654 | else if (pinned < num_pages) |
03ac84f1 | 655 | pages = __i915_gem_userptr_get_pages_schedule(obj, &active); |
e4b946bf | 656 | else |
03ac84f1 CW |
657 | pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages); |
658 | if (IS_ERR(pages)) { | |
e4b946bf CW |
659 | __i915_gem_userptr_set_active(obj, active); |
660 | release_pages(pvec, pinned, 0); | |
5cc9ed4b | 661 | } |
5cc9ed4b | 662 | drm_free_large(pvec); |
03ac84f1 | 663 | return pages; |
5cc9ed4b CW |
664 | } |
665 | ||
666 | static void | |
03ac84f1 CW |
667 | i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, |
668 | struct sg_table *pages) | |
5cc9ed4b | 669 | { |
85d1225e DG |
670 | struct sgt_iter sgt_iter; |
671 | struct page *page; | |
5cc9ed4b CW |
672 | |
673 | BUG_ON(obj->userptr.work != NULL); | |
e4b946bf | 674 | __i915_gem_userptr_set_active(obj, false); |
5cc9ed4b | 675 | |
a4f5ea64 CW |
676 | if (obj->mm.madv != I915_MADV_WILLNEED) |
677 | obj->mm.dirty = false; | |
5cc9ed4b | 678 | |
03ac84f1 | 679 | i915_gem_gtt_finish_pages(obj, pages); |
e2273302 | 680 | |
03ac84f1 | 681 | for_each_sgt_page(page, sgt_iter, pages) { |
a4f5ea64 | 682 | if (obj->mm.dirty) |
5cc9ed4b CW |
683 | set_page_dirty(page); |
684 | ||
685 | mark_page_accessed(page); | |
09cbfeaf | 686 | put_page(page); |
5cc9ed4b | 687 | } |
a4f5ea64 | 688 | obj->mm.dirty = false; |
5cc9ed4b | 689 | |
03ac84f1 CW |
690 | sg_free_table(pages); |
691 | kfree(pages); | |
5cc9ed4b CW |
692 | } |
693 | ||
694 | static void | |
695 | i915_gem_userptr_release(struct drm_i915_gem_object *obj) | |
696 | { | |
697 | i915_gem_userptr_release__mmu_notifier(obj); | |
ad46cb53 | 698 | i915_gem_userptr_release__mm_struct(obj); |
5cc9ed4b CW |
699 | } |
700 | ||
701 | static int | |
702 | i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) | |
703 | { | |
ad46cb53 | 704 | if (obj->userptr.mmu_object) |
5cc9ed4b CW |
705 | return 0; |
706 | ||
707 | return i915_gem_userptr_init__mmu_notifier(obj, 0); | |
708 | } | |
709 | ||
710 | static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { | |
3599a91c TU |
711 | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | |
712 | I915_GEM_OBJECT_IS_SHRINKABLE, | |
5cc9ed4b CW |
713 | .get_pages = i915_gem_userptr_get_pages, |
714 | .put_pages = i915_gem_userptr_put_pages, | |
de472664 | 715 | .dmabuf_export = i915_gem_userptr_dmabuf_export, |
5cc9ed4b CW |
716 | .release = i915_gem_userptr_release, |
717 | }; | |
718 | ||
719 | /** | |
720 | * Creates a new mm object that wraps some normal memory from the process | |
721 | * context - user memory. | |
722 | * | |
723 | * We impose several restrictions upon the memory being mapped | |
724 | * into the GPU. | |
725 | * 1. It must be page aligned (both start/end addresses, i.e ptr and size). | |
ec8b0dd5 | 726 | * 2. It must be normal system memory, not a pointer into another map of IO |
5cc9ed4b | 727 | * space (e.g. it must not be a GTT mmapping of another object). |
ec8b0dd5 | 728 | * 3. We only allow a bo as large as we could in theory map into the GTT, |
5cc9ed4b | 729 | * that is we limit the size to the total size of the GTT. |
ec8b0dd5 | 730 | * 4. The bo is marked as being snoopable. The backing pages are left |
5cc9ed4b CW |
731 | * accessible directly by the CPU, but reads and writes by the GPU may |
732 | * incur the cost of a snoop (unless you have an LLC architecture). | |
733 | * | |
734 | * Synchronisation between multiple users and the GPU is left to userspace | |
735 | * through the normal set-domain-ioctl. The kernel will enforce that the | |
736 | * GPU relinquishes the VMA before it is returned back to the system | |
737 | * i.e. upon free(), munmap() or process termination. However, the userspace | |
738 | * malloc() library may not immediately relinquish the VMA after free() and | |
739 | * instead reuse it whilst the GPU is still reading and writing to the VMA. | |
740 | * Caveat emptor. | |
741 | * | |
742 | * Also note, that the object created here is not currently a "first class" | |
743 | * object, in that several ioctls are banned. These are the CPU access | |
744 | * ioctls: mmap(), pwrite and pread. In practice, you are expected to use | |
cc917ab4 CW |
745 | * direct access via your pointer rather than use those ioctls. Another |
746 | * restriction is that we do not allow userptr surfaces to be pinned to the | |
747 | * hardware and so we reject any attempt to create a framebuffer out of a | |
748 | * userptr. | |
5cc9ed4b CW |
749 | * |
750 | * If you think this is a good interface to use to pass GPU memory between | |
751 | * drivers, please use dma-buf instead. In fact, wherever possible use | |
752 | * dma-buf instead. | |
753 | */ | |
754 | int | |
755 | i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |
756 | { | |
0031fb96 | 757 | struct drm_i915_private *dev_priv = to_i915(dev); |
5cc9ed4b CW |
758 | struct drm_i915_gem_userptr *args = data; |
759 | struct drm_i915_gem_object *obj; | |
760 | int ret; | |
761 | u32 handle; | |
762 | ||
0031fb96 | 763 | if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) { |
ca377809 TU |
764 | /* We cannot support coherent userptr objects on hw without |
765 | * LLC and broken snooping. | |
766 | */ | |
767 | return -ENODEV; | |
768 | } | |
769 | ||
5cc9ed4b CW |
770 | if (args->flags & ~(I915_USERPTR_READ_ONLY | |
771 | I915_USERPTR_UNSYNCHRONIZED)) | |
772 | return -EINVAL; | |
773 | ||
774 | if (offset_in_page(args->user_ptr | args->user_size)) | |
775 | return -EINVAL; | |
776 | ||
5cc9ed4b CW |
777 | if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, |
778 | (char __user *)(unsigned long)args->user_ptr, args->user_size)) | |
779 | return -EFAULT; | |
780 | ||
781 | if (args->flags & I915_USERPTR_READ_ONLY) { | |
782 | /* On almost all of the current hw, we cannot tell the GPU that a | |
783 | * page is readonly, so this is just a placeholder in the uAPI. | |
784 | */ | |
785 | return -ENODEV; | |
786 | } | |
787 | ||
187685cb | 788 | obj = i915_gem_object_alloc(dev_priv); |
5cc9ed4b CW |
789 | if (obj == NULL) |
790 | return -ENOMEM; | |
791 | ||
792 | drm_gem_private_object_init(dev, &obj->base, args->user_size); | |
793 | i915_gem_object_init(obj, &i915_gem_userptr_ops); | |
794 | obj->cache_level = I915_CACHE_LLC; | |
795 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | |
796 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | |
797 | ||
798 | obj->userptr.ptr = args->user_ptr; | |
799 | obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY); | |
800 | ||
801 | /* And keep a pointer to the current->mm for resolving the user pages | |
802 | * at binding. This means that we need to hook into the mmu_notifier | |
803 | * in order to detect if the mmu is destroyed. | |
804 | */ | |
ad46cb53 CW |
805 | ret = i915_gem_userptr_init__mm_struct(obj); |
806 | if (ret == 0) | |
5cc9ed4b CW |
807 | ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); |
808 | if (ret == 0) | |
809 | ret = drm_gem_handle_create(file, &obj->base, &handle); | |
810 | ||
811 | /* drop reference from allocate - handle holds it now */ | |
f0cd5182 | 812 | i915_gem_object_put(obj); |
5cc9ed4b CW |
813 | if (ret) |
814 | return ret; | |
815 | ||
816 | args->handle = handle; | |
817 | return 0; | |
818 | } | |
819 | ||
72778cb2 | 820 | void i915_gem_init_userptr(struct drm_i915_private *dev_priv) |
5cc9ed4b | 821 | { |
ad46cb53 CW |
822 | mutex_init(&dev_priv->mm_lock); |
823 | hash_init(dev_priv->mm_structs); | |
5cc9ed4b | 824 | } |