Commit | Line | Data |
---|---|---|
5cc9ed4b | 1 | /* |
10be98a7 | 2 | * SPDX-License-Identifier: MIT |
5cc9ed4b | 3 | * |
10be98a7 | 4 | * Copyright © 2012-2014 Intel Corporation |
ed29c269 ML |
5 | * |
6 | * Based on amdgpu_mn, which bears the following notice: | |
7 | * | |
8 | * Copyright 2014 Advanced Micro Devices, Inc. | |
9 | * All Rights Reserved. | |
10 | * | |
11 | * Permission is hereby granted, free of charge, to any person obtaining a | |
12 | * copy of this software and associated documentation files (the | |
13 | * "Software"), to deal in the Software without restriction, including | |
14 | * without limitation the rights to use, copy, modify, merge, publish, | |
15 | * distribute, sub license, and/or sell copies of the Software, and to | |
16 | * permit persons to whom the Software is furnished to do so, subject to | |
17 | * the following conditions: | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
27 | * The above copyright notice and this permission notice (including the | |
28 | * next paragraph) shall be included in all copies or substantial portions | |
29 | * of the Software. | |
30 | * | |
31 | */ | |
32 | /* | |
33 | * Authors: | |
34 | * Christian König <christian.koenig@amd.com> | |
5cc9ed4b CW |
35 | */ |
36 | ||
5cc9ed4b | 37 | #include <linux/mmu_context.h> |
5cc9ed4b CW |
38 | #include <linux/mempolicy.h> |
39 | #include <linux/swap.h> | |
6e84f315 | 40 | #include <linux/sched/mm.h> |
5cc9ed4b | 41 | |
6da4a2c4 | 42 | #include "i915_drv.h" |
10be98a7 CW |
43 | #include "i915_gem_ioctls.h" |
44 | #include "i915_gem_object.h" | |
db583eea | 45 | #include "i915_gem_userptr.h" |
37d63f8f | 46 | #include "i915_scatterlist.h" |
afa13085 | 47 | |
ed29c269 | 48 | #ifdef CONFIG_MMU_NOTIFIER |
5cc9ed4b | 49 | |
ed29c269 ML |
50 | /** |
51 | * i915_gem_userptr_invalidate - callback to notify about mm change | |
52 | * | |
53 | * @mni: the range (mm) is about to update | |
54 | * @range: details on the invalidation | |
55 | * @cur_seq: Value to pass to mmu_interval_set_seq() | |
56 | * | |
57 | * Block for operations on BOs to finish and mark pages as accessed and | |
58 | * potentially dirty. | |
59 | */ | |
60 | static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, | |
61 | const struct mmu_notifier_range *range, | |
62 | unsigned long cur_seq) | |
ec8b0dd5 | 63 | { |
ed29c269 ML |
64 | struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier); |
65 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | |
66 | long r; | |
ec8b0dd5 | 67 | |
ed29c269 ML |
68 | if (!mmu_notifier_range_blockable(range)) |
69 | return false; | |
ec8b0dd5 | 70 | |
b4b9731b | 71 | write_lock(&i915->mm.notifier_lock); |
768e159f | 72 | |
ed29c269 ML |
73 | mmu_interval_set_seq(mni, cur_seq); |
74 | ||
b4b9731b | 75 | write_unlock(&i915->mm.notifier_lock); |
484d9a84 CW |
76 | |
77 | /* | |
ed29c269 ML |
78 | * We don't wait when the process is exiting. This is valid |
79 | * because the object will be cleaned up anyway. | |
80 | * | |
81 | * This is also temporarily required as a hack, because we | |
82 | * cannot currently force non-consistent batch buffers to preempt | |
83 | * and reschedule by waiting on it, hanging processes on exit. | |
484d9a84 | 84 | */ |
ed29c269 ML |
85 | if (current->flags & PF_EXITING) |
86 | return true; | |
5cc9ed4b | 87 | |
ed29c269 | 88 | /* we will unbind on next submission, still have userptr pins */ |
0cc848a7 | 89 | r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false, |
d3fae3b3 | 90 | MAX_SCHEDULE_TIMEOUT); |
ed29c269 ML |
91 | if (r <= 0) |
92 | drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r); | |
93065ac7 | 93 | |
ed29c269 | 94 | return true; |
5cc9ed4b CW |
95 | } |
96 | ||
ed29c269 ML |
97 | static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = { |
98 | .invalidate = i915_gem_userptr_invalidate, | |
5cc9ed4b CW |
99 | }; |
100 | ||
5cc9ed4b | 101 | static int |
20ee27bd | 102 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj) |
5cc9ed4b | 103 | { |
ed29c269 ML |
104 | return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm, |
105 | obj->userptr.ptr, obj->base.size, | |
106 | &i915_gem_userptr_notifier_ops); | |
ad46cb53 CW |
107 | } |
108 | ||
ed29c269 | 109 | static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj) |
ad46cb53 | 110 | { |
ed29c269 | 111 | struct page **pvec = NULL; |
ad46cb53 | 112 | |
b4b9731b TH |
113 | assert_object_held_shared(obj); |
114 | ||
ed29c269 ML |
115 | if (!--obj->userptr.page_ref) { |
116 | pvec = obj->userptr.pvec; | |
117 | obj->userptr.pvec = NULL; | |
040e123c | 118 | } |
ed29c269 | 119 | GEM_BUG_ON(obj->userptr.page_ref < 0); |
ad46cb53 | 120 | |
ed29c269 ML |
121 | if (pvec) { |
122 | const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; | |
ad46cb53 | 123 | |
ed29c269 ML |
124 | unpin_user_pages(pvec, num_pages); |
125 | kvfree(pvec); | |
126 | } | |
ad46cb53 CW |
127 | } |
128 | ||
ed29c269 | 129 | static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) |
e2273302 | 130 | { |
ed29c269 | 131 | const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; |
78a07fe7 | 132 | unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev); |
5602452e | 133 | struct sg_table *st; |
ed29c269 | 134 | struct page **pvec; |
e2273302 ID |
135 | int ret; |
136 | ||
5602452e TU |
137 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
138 | if (!st) | |
ed29c269 ML |
139 | return -ENOMEM; |
140 | ||
b4b9731b TH |
141 | if (!obj->userptr.page_ref) { |
142 | ret = -EAGAIN; | |
ed29c269 ML |
143 | goto err_free; |
144 | } | |
145 | ||
146 | obj->userptr.page_ref++; | |
147 | pvec = obj->userptr.pvec; | |
5602452e TU |
148 | |
149 | alloc_table: | |
90e7a6de MG |
150 | ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0, |
151 | num_pages << PAGE_SHIFT, | |
152 | max_segment, GFP_KERNEL); | |
153 | if (ret) | |
ed29c269 | 154 | goto err; |
e2273302 | 155 | |
5602452e | 156 | ret = i915_gem_gtt_prepare_pages(obj, st); |
e2273302 | 157 | if (ret) { |
5602452e TU |
158 | sg_free_table(st); |
159 | ||
160 | if (max_segment > PAGE_SIZE) { | |
161 | max_segment = PAGE_SIZE; | |
162 | goto alloc_table; | |
163 | } | |
164 | ||
ed29c269 | 165 | goto err; |
e2273302 ID |
166 | } |
167 | ||
63430347 MA |
168 | WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)); |
169 | if (i915_gem_object_can_bypass_llc(obj)) | |
170 | obj->cache_dirty = true; | |
a5c08166 | 171 | |
8c949515 | 172 | __i915_gem_object_set_pages(obj, st); |
b91b09ee | 173 | |
ed29c269 | 174 | return 0; |
1c8782dd | 175 | |
ed29c269 ML |
176 | err: |
177 | i915_gem_object_userptr_drop_ref(obj); | |
178 | err_free: | |
179 | kfree(st); | |
180 | return ret; | |
5cc9ed4b CW |
181 | } |
182 | ||
183 | static void | |
03ac84f1 CW |
184 | i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, |
185 | struct sg_table *pages) | |
5cc9ed4b | 186 | { |
85d1225e DG |
187 | struct sgt_iter sgt_iter; |
188 | struct page *page; | |
5cc9ed4b | 189 | |
484d9a84 CW |
190 | if (!pages) |
191 | return; | |
5cc9ed4b | 192 | |
ee8efa80 | 193 | __i915_gem_object_release_shmem(obj, pages, true); |
03ac84f1 | 194 | i915_gem_gtt_finish_pages(obj, pages); |
e2273302 | 195 | |
681c774d CW |
196 | /* |
197 | * We always mark objects as dirty when they are used by the GPU, | |
198 | * just in case. However, if we set the vma as being read-only we know | |
199 | * that the object will never have been written to. | |
200 | */ | |
201 | if (i915_gem_object_is_readonly(obj)) | |
202 | obj->mm.dirty = false; | |
203 | ||
03ac84f1 | 204 | for_each_sgt_page(page, sgt_iter, pages) { |
0d4bbe3d CW |
205 | if (obj->mm.dirty && trylock_page(page)) { |
206 | /* | |
207 | * As this may not be anonymous memory (e.g. shmem) | |
208 | * but exist on a real mapping, we have to lock | |
209 | * the page in order to dirty it -- holding | |
210 | * the page reference is not sufficient to | |
211 | * prevent the inode from being truncated. | |
212 | * Play safe and take the lock. | |
213 | * | |
214 | * However...! | |
215 | * | |
216 | * The mmu-notifier can be invalidated for a | |
54184650 MWO |
217 | * migrate_folio, that is alreadying holding the lock |
218 | * on the folio. Such a try_to_unmap() will result | |
0d4bbe3d CW |
219 | * in us calling put_pages() and so recursively try |
220 | * to lock the page. We avoid that deadlock with | |
221 | * a trylock_page() and in exchange we risk missing | |
222 | * some page dirtying. | |
223 | */ | |
505a8ec7 | 224 | set_page_dirty(page); |
0d4bbe3d CW |
225 | unlock_page(page); |
226 | } | |
5cc9ed4b CW |
227 | |
228 | mark_page_accessed(page); | |
5cc9ed4b | 229 | } |
a4f5ea64 | 230 | obj->mm.dirty = false; |
5cc9ed4b | 231 | |
03ac84f1 CW |
232 | sg_free_table(pages); |
233 | kfree(pages); | |
ed29c269 ML |
234 | |
235 | i915_gem_object_userptr_drop_ref(obj); | |
236 | } | |
237 | ||
b4b9731b | 238 | static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj) |
ed29c269 ML |
239 | { |
240 | struct sg_table *pages; | |
241 | int err; | |
242 | ||
243 | err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); | |
244 | if (err) | |
245 | return err; | |
246 | ||
247 | if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj))) | |
248 | return -EBUSY; | |
249 | ||
cf41a8f1 | 250 | assert_object_held(obj); |
ed29c269 ML |
251 | |
252 | pages = __i915_gem_object_unset_pages(obj); | |
253 | if (!IS_ERR_OR_NULL(pages)) | |
254 | i915_gem_userptr_put_pages(obj, pages); | |
255 | ||
ed29c269 ML |
256 | return err; |
257 | } | |
258 | ||
259 | int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) | |
260 | { | |
ed29c269 ML |
261 | const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; |
262 | struct page **pvec; | |
263 | unsigned int gup_flags = 0; | |
264 | unsigned long notifier_seq; | |
265 | int pinned, ret; | |
266 | ||
267 | if (obj->userptr.notifier.mm != current->mm) | |
268 | return -EFAULT; | |
269 | ||
b4b9731b TH |
270 | notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier); |
271 | ||
ed29c269 ML |
272 | ret = i915_gem_object_lock_interruptible(obj, NULL); |
273 | if (ret) | |
274 | return ret; | |
275 | ||
b4b9731b TH |
276 | if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) { |
277 | i915_gem_object_unlock(obj); | |
278 | return 0; | |
fd995a3c ML |
279 | } |
280 | ||
b4b9731b | 281 | ret = i915_gem_object_userptr_unbind(obj); |
ed29c269 | 282 | i915_gem_object_unlock(obj); |
b4b9731b | 283 | if (ret) |
ed29c269 ML |
284 | return ret; |
285 | ||
ed29c269 ML |
286 | pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); |
287 | if (!pvec) | |
288 | return -ENOMEM; | |
289 | ||
290 | if (!i915_gem_object_is_readonly(obj)) | |
291 | gup_flags |= FOLL_WRITE; | |
292 | ||
178ce94a | 293 | pinned = 0; |
ed29c269 ML |
294 | while (pinned < num_pages) { |
295 | ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE, | |
296 | num_pages - pinned, gup_flags, | |
297 | &pvec[pinned]); | |
298 | if (ret < 0) | |
299 | goto out; | |
300 | ||
301 | pinned += ret; | |
302 | } | |
ed29c269 | 303 | |
b4b9731b TH |
304 | ret = i915_gem_object_lock_interruptible(obj, NULL); |
305 | if (ret) | |
306 | goto out; | |
ed29c269 ML |
307 | |
308 | if (mmu_interval_read_retry(&obj->userptr.notifier, | |
309 | !obj->userptr.page_ref ? notifier_seq : | |
310 | obj->userptr.notifier_seq)) { | |
311 | ret = -EAGAIN; | |
312 | goto out_unlock; | |
313 | } | |
314 | ||
315 | if (!obj->userptr.page_ref++) { | |
316 | obj->userptr.pvec = pvec; | |
317 | obj->userptr.notifier_seq = notifier_seq; | |
ed29c269 | 318 | pvec = NULL; |
b4b9731b | 319 | ret = ____i915_gem_object_get_pages(obj); |
ed29c269 ML |
320 | } |
321 | ||
b4b9731b TH |
322 | obj->userptr.page_ref--; |
323 | ||
ed29c269 | 324 | out_unlock: |
b4b9731b | 325 | i915_gem_object_unlock(obj); |
ed29c269 ML |
326 | |
327 | out: | |
328 | if (pvec) { | |
329 | unpin_user_pages(pvec, pinned); | |
330 | kvfree(pvec); | |
331 | } | |
332 | ||
333 | return ret; | |
334 | } | |
335 | ||
336 | int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) | |
337 | { | |
338 | if (mmu_interval_read_retry(&obj->userptr.notifier, | |
339 | obj->userptr.notifier_seq)) { | |
340 | /* We collided with the mmu notifier, need to retry */ | |
341 | ||
342 | return -EAGAIN; | |
343 | } | |
344 | ||
345 | return 0; | |
346 | } | |
347 | ||
ed29c269 ML |
348 | int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) |
349 | { | |
350 | int err; | |
351 | ||
352 | err = i915_gem_object_userptr_submit_init(obj); | |
353 | if (err) | |
354 | return err; | |
355 | ||
356 | err = i915_gem_object_lock_interruptible(obj, NULL); | |
357 | if (!err) { | |
358 | /* | |
359 | * Since we only check validity, not use the pages, | |
360 | * it doesn't matter if we collide with the mmu notifier, | |
361 | * and -EAGAIN handling is not required. | |
362 | */ | |
363 | err = i915_gem_object_pin_pages(obj); | |
364 | if (!err) | |
365 | i915_gem_object_unpin_pages(obj); | |
366 | ||
367 | i915_gem_object_unlock(obj); | |
368 | } | |
369 | ||
ed29c269 | 370 | return err; |
5cc9ed4b CW |
371 | } |
372 | ||
373 | static void | |
374 | i915_gem_userptr_release(struct drm_i915_gem_object *obj) | |
375 | { | |
ed29c269 ML |
376 | GEM_WARN_ON(obj->userptr.page_ref); |
377 | ||
378 | mmu_interval_notifier_remove(&obj->userptr.notifier); | |
379 | obj->userptr.notifier.mm = NULL; | |
5cc9ed4b CW |
380 | } |
381 | ||
382 | static int | |
383 | i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) | |
384 | { | |
ae4e55b8 | 385 | drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n"); |
5cc9ed4b | 386 | |
ae4e55b8 | 387 | return -EINVAL; |
5cc9ed4b CW |
388 | } |
389 | ||
ae30af84 ML |
390 | static int |
391 | i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj, | |
392 | const struct drm_i915_gem_pwrite *args) | |
393 | { | |
394 | drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n"); | |
395 | ||
396 | return -EINVAL; | |
397 | } | |
398 | ||
399 | static int | |
400 | i915_gem_userptr_pread(struct drm_i915_gem_object *obj, | |
401 | const struct drm_i915_gem_pread *args) | |
402 | { | |
403 | drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n"); | |
404 | ||
405 | return -EINVAL; | |
406 | } | |
407 | ||
5cc9ed4b | 408 | static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { |
7d192daa | 409 | .name = "i915_gem_object_userptr", |
c471748d | 410 | .flags = I915_GEM_OBJECT_IS_SHRINKABLE | |
f6c26b55 | 411 | I915_GEM_OBJECT_NO_MMAP | |
02b64a4a | 412 | I915_GEM_OBJECT_IS_PROXY, |
5cc9ed4b CW |
413 | .get_pages = i915_gem_userptr_get_pages, |
414 | .put_pages = i915_gem_userptr_put_pages, | |
de472664 | 415 | .dmabuf_export = i915_gem_userptr_dmabuf_export, |
ae30af84 ML |
416 | .pwrite = i915_gem_userptr_pwrite, |
417 | .pread = i915_gem_userptr_pread, | |
5cc9ed4b CW |
418 | .release = i915_gem_userptr_release, |
419 | }; | |
420 | ||
20ee27bd ML |
421 | #endif |
422 | ||
b65a9489 CW |
423 | static int |
424 | probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len) | |
425 | { | |
f683b9d6 | 426 | VMA_ITERATOR(vmi, mm, addr); |
b65a9489 | 427 | struct vm_area_struct *vma; |
6f7de35b | 428 | unsigned long end = addr + len; |
b65a9489 CW |
429 | |
430 | mmap_read_lock(mm); | |
6f7de35b | 431 | for_each_vma_range(vmi, vma, end) { |
b65a9489 CW |
432 | /* Check for holes, note that we also update the addr below */ |
433 | if (vma->vm_start > addr) | |
434 | break; | |
435 | ||
436 | if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) | |
437 | break; | |
438 | ||
b65a9489 CW |
439 | addr = vma->vm_end; |
440 | } | |
441 | mmap_read_unlock(mm); | |
442 | ||
6f7de35b | 443 | if (vma || addr < end) |
f683b9d6 MWO |
444 | return -EFAULT; |
445 | return 0; | |
b65a9489 CW |
446 | } |
447 | ||
a5a5ae2a | 448 | /* |
5cc9ed4b CW |
449 | * Creates a new mm object that wraps some normal memory from the process |
450 | * context - user memory. | |
451 | * | |
452 | * We impose several restrictions upon the memory being mapped | |
453 | * into the GPU. | |
454 | * 1. It must be page aligned (both start/end addresses, i.e ptr and size). | |
ec8b0dd5 | 455 | * 2. It must be normal system memory, not a pointer into another map of IO |
5cc9ed4b | 456 | * space (e.g. it must not be a GTT mmapping of another object). |
ec8b0dd5 | 457 | * 3. We only allow a bo as large as we could in theory map into the GTT, |
5cc9ed4b | 458 | * that is we limit the size to the total size of the GTT. |
ec8b0dd5 | 459 | * 4. The bo is marked as being snoopable. The backing pages are left |
5cc9ed4b CW |
460 | * accessible directly by the CPU, but reads and writes by the GPU may |
461 | * incur the cost of a snoop (unless you have an LLC architecture). | |
462 | * | |
463 | * Synchronisation between multiple users and the GPU is left to userspace | |
464 | * through the normal set-domain-ioctl. The kernel will enforce that the | |
465 | * GPU relinquishes the VMA before it is returned back to the system | |
466 | * i.e. upon free(), munmap() or process termination. However, the userspace | |
467 | * malloc() library may not immediately relinquish the VMA after free() and | |
468 | * instead reuse it whilst the GPU is still reading and writing to the VMA. | |
469 | * Caveat emptor. | |
470 | * | |
471 | * Also note, that the object created here is not currently a "first class" | |
472 | * object, in that several ioctls are banned. These are the CPU access | |
473 | * ioctls: mmap(), pwrite and pread. In practice, you are expected to use | |
cc917ab4 CW |
474 | * direct access via your pointer rather than use those ioctls. Another |
475 | * restriction is that we do not allow userptr surfaces to be pinned to the | |
476 | * hardware and so we reject any attempt to create a framebuffer out of a | |
477 | * userptr. | |
5cc9ed4b CW |
478 | * |
479 | * If you think this is a good interface to use to pass GPU memory between | |
480 | * drivers, please use dma-buf instead. In fact, wherever possible use | |
481 | * dma-buf instead. | |
482 | */ | |
483 | int | |
a5a5ae2a CW |
484 | i915_gem_userptr_ioctl(struct drm_device *dev, |
485 | void *data, | |
486 | struct drm_file *file) | |
5cc9ed4b | 487 | { |
20ee27bd | 488 | static struct lock_class_key __maybe_unused lock_class; |
0031fb96 | 489 | struct drm_i915_private *dev_priv = to_i915(dev); |
5cc9ed4b | 490 | struct drm_i915_gem_userptr *args = data; |
20ee27bd ML |
491 | struct drm_i915_gem_object __maybe_unused *obj; |
492 | int __maybe_unused ret; | |
493 | u32 __maybe_unused handle; | |
5cc9ed4b | 494 | |
0031fb96 | 495 | if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) { |
ca377809 TU |
496 | /* We cannot support coherent userptr objects on hw without |
497 | * LLC and broken snooping. | |
498 | */ | |
499 | return -ENODEV; | |
500 | } | |
501 | ||
5cc9ed4b | 502 | if (args->flags & ~(I915_USERPTR_READ_ONLY | |
b65a9489 CW |
503 | I915_USERPTR_UNSYNCHRONIZED | |
504 | I915_USERPTR_PROBE)) | |
5cc9ed4b CW |
505 | return -EINVAL; |
506 | ||
ae2fb480 | 507 | if (i915_gem_object_size_2big(args->user_size)) |
24860ad7 MA |
508 | return -E2BIG; |
509 | ||
c11c7bfd MA |
510 | if (!args->user_size) |
511 | return -EINVAL; | |
512 | ||
5cc9ed4b CW |
513 | if (offset_in_page(args->user_ptr | args->user_size)) |
514 | return -EINVAL; | |
515 | ||
96d4f267 | 516 | if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size)) |
5cc9ed4b CW |
517 | return -EFAULT; |
518 | ||
20ee27bd ML |
519 | if (args->flags & I915_USERPTR_UNSYNCHRONIZED) |
520 | return -ENODEV; | |
521 | ||
5cc9ed4b | 522 | if (args->flags & I915_USERPTR_READ_ONLY) { |
0b100760 CW |
523 | /* |
524 | * On almost all of the older hw, we cannot tell the GPU that | |
525 | * a page is readonly. | |
5cc9ed4b | 526 | */ |
1a9c4db4 | 527 | if (!to_gt(dev_priv)->vm->has_read_only) |
0b100760 | 528 | return -ENODEV; |
5cc9ed4b CW |
529 | } |
530 | ||
b65a9489 CW |
531 | if (args->flags & I915_USERPTR_PROBE) { |
532 | /* | |
533 | * Check that the range pointed to represents real struct | |
534 | * pages and not iomappings (at this moment in time!) | |
535 | */ | |
536 | ret = probe_range(current->mm, args->user_ptr, args->user_size); | |
537 | if (ret) | |
538 | return ret; | |
539 | } | |
540 | ||
20ee27bd | 541 | #ifdef CONFIG_MMU_NOTIFIER |
13f1bfd3 | 542 | obj = i915_gem_object_alloc(); |
5cc9ed4b CW |
543 | if (obj == NULL) |
544 | return -ENOMEM; | |
545 | ||
546 | drm_gem_private_object_init(dev, &obj->base, args->user_size); | |
f7858cb4 MA |
547 | i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, |
548 | I915_BO_ALLOC_USER); | |
0ff37575 | 549 | obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE; |
c0a51fd0 CK |
550 | obj->read_domains = I915_GEM_DOMAIN_CPU; |
551 | obj->write_domain = I915_GEM_DOMAIN_CPU; | |
b8f55be6 | 552 | i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); |
5cc9ed4b CW |
553 | |
554 | obj->userptr.ptr = args->user_ptr; | |
ed29c269 | 555 | obj->userptr.notifier_seq = ULONG_MAX; |
0b100760 CW |
556 | if (args->flags & I915_USERPTR_READ_ONLY) |
557 | i915_gem_object_set_readonly(obj); | |
5cc9ed4b CW |
558 | |
559 | /* And keep a pointer to the current->mm for resolving the user pages | |
560 | * at binding. This means that we need to hook into the mmu_notifier | |
561 | * in order to detect if the mmu is destroyed. | |
562 | */ | |
ed29c269 | 563 | ret = i915_gem_userptr_init__mmu_notifier(obj); |
5cc9ed4b CW |
564 | if (ret == 0) |
565 | ret = drm_gem_handle_create(file, &obj->base, &handle); | |
566 | ||
567 | /* drop reference from allocate - handle holds it now */ | |
f0cd5182 | 568 | i915_gem_object_put(obj); |
5cc9ed4b CW |
569 | if (ret) |
570 | return ret; | |
571 | ||
572 | args->handle = handle; | |
573 | return 0; | |
20ee27bd ML |
574 | #else |
575 | return -ENODEV; | |
576 | #endif | |
5cc9ed4b CW |
577 | } |
578 | ||
8a2421bd | 579 | int i915_gem_init_userptr(struct drm_i915_private *dev_priv) |
5cc9ed4b | 580 | { |
20ee27bd | 581 | #ifdef CONFIG_MMU_NOTIFIER |
b4b9731b | 582 | rwlock_init(&dev_priv->mm.notifier_lock); |
20ee27bd | 583 | #endif |
8a2421bd CW |
584 | |
585 | return 0; | |
586 | } | |
587 | ||
588 | void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv) | |
589 | { | |
5cc9ed4b | 590 | } |