Commit | Line | Data |
---|---|---|
b414fcd5 CW |
1 | /* |
2 | * SPDX-License-Identifier: MIT | |
3 | * | |
4 | * Copyright © 2014-2016 Intel Corporation | |
5 | */ | |
6 | ||
f17b8980 | 7 | #include <linux/anon_inodes.h> |
b414fcd5 | 8 | #include <linux/mman.h> |
cc662126 | 9 | #include <linux/pfn_t.h> |
b414fcd5 CW |
10 | #include <linux/sizes.h> |
11 | ||
cb823ed9 | 12 | #include "gt/intel_gt.h" |
66101975 | 13 | #include "gt/intel_gt_requests.h" |
cb823ed9 | 14 | |
b414fcd5 CW |
15 | #include "i915_drv.h" |
16 | #include "i915_gem_gtt.h" | |
17 | #include "i915_gem_ioctls.h" | |
18 | #include "i915_gem_object.h" | |
cc662126 | 19 | #include "i915_gem_mman.h" |
67c430bb | 20 | #include "i915_mm.h" |
a09d9a80 | 21 | #include "i915_trace.h" |
126d5de3 | 22 | #include "i915_user_extensions.h" |
cf3e3e86 | 23 | #include "i915_gem_ttm.h" |
b414fcd5 | 24 | #include "i915_vma.h" |
b414fcd5 CW |
25 | |
26 | static inline bool | |
27 | __vma_matches(struct vm_area_struct *vma, struct file *filp, | |
28 | unsigned long addr, unsigned long size) | |
29 | { | |
30 | if (vma->vm_file != filp) | |
31 | return false; | |
32 | ||
33 | return vma->vm_start == addr && | |
34 | (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); | |
35 | } | |
36 | ||
37 | /** | |
38 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address | |
39 | * it is mapped to. | |
40 | * @dev: drm device | |
41 | * @data: ioctl data blob | |
42 | * @file: drm file | |
43 | * | |
44 | * While the mapping holds a reference on the contents of the object, it doesn't | |
45 | * imply a ref on the object itself. | |
46 | * | |
47 | * IMPORTANT: | |
48 | * | |
49 | * DRM driver writers who look a this function as an example for how to do GEM | |
50 | * mmap support, please don't implement mmap support like here. The modern way | |
51 | * to implement DRM mmap support is with an mmap offset ioctl (like | |
52 | * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. | |
53 | * That way debug tooling like valgrind will understand what's going on, hiding | |
54 | * the mmap call in a driver private ioctl will break that. The i915 driver only | |
55 | * does cpu mmaps this way because we didn't know better. | |
56 | */ | |
57 | int | |
58 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |
59 | struct drm_file *file) | |
60 | { | |
35cbd91e | 61 | struct drm_i915_private *i915 = to_i915(dev); |
b414fcd5 CW |
62 | struct drm_i915_gem_mmap *args = data; |
63 | struct drm_i915_gem_object *obj; | |
64 | unsigned long addr; | |
65 | ||
d3f3baa3 TH |
66 | /* |
67 | * mmap ioctl is disallowed for all discrete platforms, | |
68 | * and for all platforms with GRAPHICS_VER > 12. | |
35cbd91e | 69 | */ |
d3f3baa3 | 70 | if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12) |
35cbd91e ML |
71 | return -EOPNOTSUPP; |
72 | ||
b414fcd5 CW |
73 | if (args->flags & ~(I915_MMAP_WC)) |
74 | return -EINVAL; | |
75 | ||
bdd8b6c9 | 76 | if (args->flags & I915_MMAP_WC && !pat_enabled()) |
b414fcd5 CW |
77 | return -ENODEV; |
78 | ||
79 | obj = i915_gem_object_lookup(file, args->handle); | |
80 | if (!obj) | |
81 | return -ENOENT; | |
82 | ||
83 | /* prime objects have no backing filp to GEM mmap | |
84 | * pages from. | |
85 | */ | |
86 | if (!obj->base.filp) { | |
87 | addr = -ENXIO; | |
88 | goto err; | |
89 | } | |
90 | ||
91 | if (range_overflows(args->offset, args->size, (u64)obj->base.size)) { | |
92 | addr = -EINVAL; | |
93 | goto err; | |
94 | } | |
95 | ||
96 | addr = vm_mmap(obj->base.filp, 0, args->size, | |
97 | PROT_READ | PROT_WRITE, MAP_SHARED, | |
98 | args->offset); | |
99 | if (IS_ERR_VALUE(addr)) | |
100 | goto err; | |
101 | ||
102 | if (args->flags & I915_MMAP_WC) { | |
103 | struct mm_struct *mm = current->mm; | |
104 | struct vm_area_struct *vma; | |
105 | ||
d8ed45c5 | 106 | if (mmap_write_lock_killable(mm)) { |
b414fcd5 CW |
107 | addr = -EINTR; |
108 | goto err; | |
109 | } | |
110 | vma = find_vma(mm, addr); | |
111 | if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) | |
112 | vma->vm_page_prot = | |
113 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | |
114 | else | |
115 | addr = -ENOMEM; | |
d8ed45c5 | 116 | mmap_write_unlock(mm); |
b414fcd5 CW |
117 | if (IS_ERR_VALUE(addr)) |
118 | goto err; | |
b414fcd5 CW |
119 | } |
120 | i915_gem_object_put(obj); | |
121 | ||
122 | args->addr_ptr = (u64)addr; | |
123 | return 0; | |
124 | ||
125 | err: | |
126 | i915_gem_object_put(obj); | |
127 | return addr; | |
128 | } | |
129 | ||
130 | static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) | |
131 | { | |
132 | return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; | |
133 | } | |
134 | ||
135 | /** | |
136 | * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps | |
137 | * | |
138 | * A history of the GTT mmap interface: | |
139 | * | |
140 | * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to | |
141 | * aligned and suitable for fencing, and still fit into the available | |
142 | * mappable space left by the pinned display objects. A classic problem | |
143 | * we called the page-fault-of-doom where we would ping-pong between | |
144 | * two objects that could not fit inside the GTT and so the memcpy | |
145 | * would page one object in at the expense of the other between every | |
146 | * single byte. | |
147 | * | |
148 | * 1 - Objects can be any size, and have any compatible fencing (X Y, or none | |
149 | * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the | |
150 | * object is too large for the available space (or simply too large | |
151 | * for the mappable aperture!), a view is created instead and faulted | |
152 | * into userspace. (This view is aligned and sized appropriately for | |
153 | * fenced access.) | |
154 | * | |
155 | * 2 - Recognise WC as a separate cache domain so that we can flush the | |
156 | * delayed writes via GTT before performing direct access via WC. | |
157 | * | |
158 | * 3 - Remove implicit set-domain(GTT) and synchronisation on initial | |
159 | * pagefault; swapin remains transparent. | |
160 | * | |
cc662126 AJ |
161 | * 4 - Support multiple fault handlers per object depending on object's |
162 | * backing storage (a.k.a. MMAP_OFFSET). | |
163 | * | |
b414fcd5 CW |
164 | * Restrictions: |
165 | * | |
166 | * * snoopable objects cannot be accessed via the GTT. It can cause machine | |
167 | * hangs on some architectures, corruption on others. An attempt to service | |
168 | * a GTT page fault from a snoopable object will generate a SIGBUS. | |
169 | * | |
170 | * * the object must be able to fit into RAM (physical memory, though no | |
171 | * limited to the mappable aperture). | |
172 | * | |
173 | * | |
174 | * Caveats: | |
175 | * | |
176 | * * a new GTT page fault will synchronize rendering from the GPU and flush | |
177 | * all data to system memory. Subsequent access will not be synchronized. | |
178 | * | |
179 | * * all mappings are revoked on runtime device suspend. | |
180 | * | |
181 | * * there are only 8, 16 or 32 fence registers to share between all users | |
182 | * (older machines require fence register for display and blitter access | |
183 | * as well). Contention of the fence registers will cause the previous users | |
184 | * to be unmapped and any new access will generate new page faults. | |
185 | * | |
186 | * * running out of memory while servicing a fault may generate a SIGBUS, | |
187 | * rather than the expected SIGSEGV. | |
188 | */ | |
189 | int i915_gem_mmap_gtt_version(void) | |
190 | { | |
cc662126 | 191 | return 4; |
b414fcd5 CW |
192 | } |
193 | ||
194 | static inline struct i915_ggtt_view | |
195 | compute_partial_view(const struct drm_i915_gem_object *obj, | |
196 | pgoff_t page_offset, | |
197 | unsigned int chunk) | |
198 | { | |
199 | struct i915_ggtt_view view; | |
200 | ||
201 | if (i915_gem_object_is_tiled(obj)) | |
ed52c62d | 202 | chunk = roundup(chunk, tile_row_pages(obj) ?: 1); |
b414fcd5 CW |
203 | |
204 | view.type = I915_GGTT_VIEW_PARTIAL; | |
205 | view.partial.offset = rounddown(page_offset, chunk); | |
206 | view.partial.size = | |
207 | min_t(unsigned int, chunk, | |
208 | (obj->base.size >> PAGE_SHIFT) - view.partial.offset); | |
209 | ||
210 | /* If the partial covers the entire object, just create a normal VMA. */ | |
211 | if (chunk >= obj->base.size >> PAGE_SHIFT) | |
212 | view.type = I915_GGTT_VIEW_NORMAL; | |
213 | ||
214 | return view; | |
215 | } | |
216 | ||
cc662126 AJ |
217 | static vm_fault_t i915_error_to_vmf_fault(int err) |
218 | { | |
219 | switch (err) { | |
220 | default: | |
221 | WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err); | |
df561f66 | 222 | fallthrough; |
cc662126 AJ |
223 | case -EIO: /* shmemfs failure from swap device */ |
224 | case -EFAULT: /* purged object */ | |
225 | case -ENODEV: /* bad object, how did you get here! */ | |
4e598fad | 226 | case -ENXIO: /* unable to access backing store (on device) */ |
cc662126 AJ |
227 | return VM_FAULT_SIGBUS; |
228 | ||
cc662126 AJ |
229 | case -ENOMEM: /* our allocation failure */ |
230 | return VM_FAULT_OOM; | |
231 | ||
232 | case 0: | |
233 | case -EAGAIN: | |
552e01f6 | 234 | case -ENOSPC: /* transient failure to evict? */ |
cc662126 AJ |
235 | case -ERESTARTSYS: |
236 | case -EINTR: | |
237 | case -EBUSY: | |
238 | /* | |
239 | * EBUSY is ok: this just means that another thread | |
240 | * already did the job. | |
241 | */ | |
242 | return VM_FAULT_NOPAGE; | |
243 | } | |
244 | } | |
245 | ||
246 | static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) | |
247 | { | |
248 | struct vm_area_struct *area = vmf->vma; | |
249 | struct i915_mmap_offset *mmo = area->vm_private_data; | |
250 | struct drm_i915_gem_object *obj = mmo->obj; | |
4e598fad | 251 | resource_size_t iomap; |
cc662126 AJ |
252 | int err; |
253 | ||
cc662126 | 254 | /* Sanity check that we allow writing into this object */ |
1764b992 AJ |
255 | if (unlikely(i915_gem_object_is_readonly(obj) && |
256 | area->vm_flags & VM_WRITE)) | |
257 | return VM_FAULT_SIGBUS; | |
cc662126 | 258 | |
9fa1f478 ML |
259 | if (i915_gem_object_lock_interruptible(obj, NULL)) |
260 | return VM_FAULT_NOPAGE; | |
261 | ||
cc662126 AJ |
262 | err = i915_gem_object_pin_pages(obj); |
263 | if (err) | |
1764b992 | 264 | goto out; |
cc662126 | 265 | |
4e598fad | 266 | iomap = -1; |
c471748d | 267 | if (!i915_gem_object_has_struct_page(obj)) { |
4e598fad AJ |
268 | iomap = obj->mm.region->iomap.base; |
269 | iomap -= obj->mm.region->region.start; | |
270 | } | |
271 | ||
cc662126 | 272 | /* PTEs are revoked in obj->ops->put_pages() */ |
4e598fad AJ |
273 | err = remap_io_sg(area, |
274 | area->vm_start, area->vm_end - area->vm_start, | |
275 | obj->mm.pages->sgl, iomap); | |
cc662126 | 276 | |
1764b992 | 277 | if (area->vm_flags & VM_WRITE) { |
cc662126 | 278 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); |
cc662126 AJ |
279 | obj->mm.dirty = true; |
280 | } | |
281 | ||
282 | i915_gem_object_unpin_pages(obj); | |
283 | ||
1764b992 | 284 | out: |
9fa1f478 | 285 | i915_gem_object_unlock(obj); |
1764b992 | 286 | return i915_error_to_vmf_fault(err); |
cc662126 AJ |
287 | } |
288 | ||
289 | static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) | |
b414fcd5 CW |
290 | { |
291 | #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) | |
292 | struct vm_area_struct *area = vmf->vma; | |
cc662126 AJ |
293 | struct i915_mmap_offset *mmo = area->vm_private_data; |
294 | struct drm_i915_gem_object *obj = mmo->obj; | |
b414fcd5 CW |
295 | struct drm_device *dev = obj->base.dev; |
296 | struct drm_i915_private *i915 = to_i915(dev); | |
d858d569 | 297 | struct intel_runtime_pm *rpm = &i915->runtime_pm; |
5c24c9d2 | 298 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; |
b414fcd5 | 299 | bool write = area->vm_flags & VM_WRITE; |
3c0ffa27 | 300 | struct i915_gem_ww_ctx ww; |
b414fcd5 CW |
301 | intel_wakeref_t wakeref; |
302 | struct i915_vma *vma; | |
303 | pgoff_t page_offset; | |
304 | int srcu; | |
305 | int ret; | |
306 | ||
b414fcd5 CW |
307 | /* We don't use vmf->pgoff since that has the fake offset */ |
308 | page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; | |
309 | ||
310 | trace_i915_gem_object_fault(obj, page_offset, true, write); | |
311 | ||
3c0ffa27 ML |
312 | wakeref = intel_runtime_pm_get(rpm); |
313 | ||
314 | i915_gem_ww_ctx_init(&ww, true); | |
315 | retry: | |
316 | ret = i915_gem_object_lock(obj, &ww); | |
b414fcd5 | 317 | if (ret) |
3c0ffa27 | 318 | goto err_rpm; |
b414fcd5 | 319 | |
3c0ffa27 ML |
320 | /* Sanity check that we allow writing into this object */ |
321 | if (i915_gem_object_is_readonly(obj) && write) { | |
322 | ret = -EFAULT; | |
323 | goto err_rpm; | |
324 | } | |
b414fcd5 | 325 | |
3c0ffa27 | 326 | ret = i915_gem_object_pin_pages(obj); |
eebab60f | 327 | if (ret) |
b414fcd5 | 328 | goto err_rpm; |
b414fcd5 | 329 | |
3c0ffa27 ML |
330 | ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu); |
331 | if (ret) | |
332 | goto err_pages; | |
333 | ||
b414fcd5 | 334 | /* Now pin it into the GTT as needed */ |
3c0ffa27 ML |
335 | vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0, |
336 | PIN_MAPPABLE | | |
337 | PIN_NONBLOCK /* NOWARN */ | | |
338 | PIN_NOEVICT); | |
339 | if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { | |
b414fcd5 CW |
340 | /* Use a partial view if it is bigger than available space */ |
341 | struct i915_ggtt_view view = | |
342 | compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); | |
343 | unsigned int flags; | |
344 | ||
6846895f | 345 | flags = PIN_MAPPABLE | PIN_NOSEARCH; |
b414fcd5 CW |
346 | if (view.type == I915_GGTT_VIEW_NORMAL) |
347 | flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ | |
348 | ||
349 | /* | |
350 | * Userspace is now writing through an untracked VMA, abandon | |
351 | * all hope that the hardware is able to track future writes. | |
352 | */ | |
b414fcd5 | 353 | |
3c0ffa27 ML |
354 | vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); |
355 | if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { | |
b414fcd5 CW |
356 | flags = PIN_MAPPABLE; |
357 | view.type = I915_GGTT_VIEW_PARTIAL; | |
3c0ffa27 | 358 | vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); |
b414fcd5 | 359 | } |
8f9fb61c | 360 | |
e849f7e7 ML |
361 | /* |
362 | * The entire mappable GGTT is pinned? Unexpected! | |
363 | * Try to evict the object we locked too, as normally we skip it | |
364 | * due to lack of short term pinning inside execbuf. | |
365 | */ | |
366 | if (vma == ERR_PTR(-ENOSPC)) { | |
367 | ret = mutex_lock_interruptible(&ggtt->vm.mutex); | |
368 | if (!ret) { | |
6945c53b | 369 | ret = i915_gem_evict_vm(&ggtt->vm, &ww); |
e849f7e7 ML |
370 | mutex_unlock(&ggtt->vm.mutex); |
371 | } | |
372 | if (ret) | |
373 | goto err_reset; | |
374 | vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); | |
375 | } | |
b414fcd5 CW |
376 | } |
377 | if (IS_ERR(vma)) { | |
378 | ret = PTR_ERR(vma); | |
2850748e CW |
379 | goto err_reset; |
380 | } | |
381 | ||
382 | /* Access to snoopable pages through the GTT is incoherent. */ | |
383 | if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) { | |
384 | ret = -EFAULT; | |
385 | goto err_unpin; | |
b414fcd5 CW |
386 | } |
387 | ||
388 | ret = i915_vma_pin_fence(vma); | |
389 | if (ret) | |
390 | goto err_unpin; | |
391 | ||
392 | /* Finally, remap it using the new GTT offset */ | |
0e4fe0c9 MA |
393 | ret = remap_io_mapping(area, |
394 | area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), | |
395 | (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, | |
396 | min_t(u64, vma->size, area->vm_end - area->vm_start), | |
397 | &ggtt->iomap); | |
b414fcd5 CW |
398 | if (ret) |
399 | goto err_fence; | |
400 | ||
d858d569 | 401 | assert_rpm_wakelock_held(rpm); |
b7d151ba CW |
402 | |
403 | /* Mark as being mmapped into userspace for later revocation */ | |
5c24c9d2 | 404 | mutex_lock(&to_gt(i915)->ggtt->vm.mutex); |
b414fcd5 | 405 | if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) |
5c24c9d2 MW |
406 | list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list); |
407 | mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); | |
b7d151ba | 408 | |
cc662126 AJ |
409 | /* Track the mmo associated with the fenced vma */ |
410 | vma->mmo = mmo; | |
411 | ||
1a839e01 | 412 | if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) |
5c24c9d2 | 413 | intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, |
b414fcd5 | 414 | msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); |
b414fcd5 | 415 | |
5028851c CW |
416 | if (write) { |
417 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); | |
418 | i915_vma_set_ggtt_write(vma); | |
419 | obj->mm.dirty = true; | |
420 | } | |
b414fcd5 CW |
421 | |
422 | err_fence: | |
423 | i915_vma_unpin_fence(vma); | |
424 | err_unpin: | |
425 | __i915_vma_unpin(vma); | |
b414fcd5 | 426 | err_reset: |
cb823ed9 | 427 | intel_gt_reset_unlock(ggtt->vm.gt, srcu); |
3c0ffa27 ML |
428 | err_pages: |
429 | i915_gem_object_unpin_pages(obj); | |
b414fcd5 | 430 | err_rpm: |
3c0ffa27 ML |
431 | if (ret == -EDEADLK) { |
432 | ret = i915_gem_ww_ctx_backoff(&ww); | |
433 | if (!ret) | |
434 | goto retry; | |
435 | } | |
436 | i915_gem_ww_ctx_fini(&ww); | |
d858d569 | 437 | intel_runtime_pm_put(rpm, wakeref); |
cc662126 | 438 | return i915_error_to_vmf_fault(ret); |
b414fcd5 CW |
439 | } |
440 | ||
9f909e21 CW |
441 | static int |
442 | vm_access(struct vm_area_struct *area, unsigned long addr, | |
443 | void *buf, int len, int write) | |
444 | { | |
445 | struct i915_mmap_offset *mmo = area->vm_private_data; | |
446 | struct drm_i915_gem_object *obj = mmo->obj; | |
52665fe7 | 447 | struct i915_gem_ww_ctx ww; |
9f909e21 | 448 | void *vaddr; |
52665fe7 | 449 | int err = 0; |
9f909e21 CW |
450 | |
451 | if (i915_gem_object_is_readonly(obj) && write) | |
452 | return -EACCES; | |
453 | ||
454 | addr -= area->vm_start; | |
455 | if (addr >= obj->base.size) | |
456 | return -EINVAL; | |
457 | ||
52665fe7 ML |
458 | i915_gem_ww_ctx_init(&ww, true); |
459 | retry: | |
460 | err = i915_gem_object_lock(obj, &ww); | |
461 | if (err) | |
462 | goto out; | |
463 | ||
9f909e21 CW |
464 | /* As this is primarily for debugging, let's focus on simplicity */ |
465 | vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC); | |
52665fe7 ML |
466 | if (IS_ERR(vaddr)) { |
467 | err = PTR_ERR(vaddr); | |
468 | goto out; | |
469 | } | |
9f909e21 CW |
470 | |
471 | if (write) { | |
472 | memcpy(vaddr + addr, buf, len); | |
473 | __i915_gem_object_flush_map(obj, addr, len); | |
474 | } else { | |
475 | memcpy(buf, vaddr + addr, len); | |
476 | } | |
477 | ||
478 | i915_gem_object_unpin_map(obj); | |
52665fe7 ML |
479 | out: |
480 | if (err == -EDEADLK) { | |
481 | err = i915_gem_ww_ctx_backoff(&ww); | |
482 | if (!err) | |
483 | goto retry; | |
484 | } | |
485 | i915_gem_ww_ctx_fini(&ww); | |
486 | ||
487 | if (err) | |
488 | return err; | |
9f909e21 CW |
489 | |
490 | return len; | |
491 | } | |
492 | ||
cc662126 | 493 | void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) |
b414fcd5 CW |
494 | { |
495 | struct i915_vma *vma; | |
496 | ||
497 | GEM_BUG_ON(!obj->userfault_count); | |
498 | ||
b414fcd5 | 499 | for_each_ggtt_vma(vma, obj) |
cc662126 AJ |
500 | i915_vma_revoke_mmap(vma); |
501 | ||
502 | GEM_BUG_ON(obj->userfault_count); | |
b414fcd5 CW |
503 | } |
504 | ||
cc662126 | 505 | /* |
b414fcd5 CW |
506 | * It is vital that we remove the page mapping if we have mapped a tiled |
507 | * object through the GTT and then lose the fence register due to | |
508 | * resource pressure. Similarly if the object has been moved out of the | |
509 | * aperture, than pages mapped into userspace must be revoked. Removing the | |
510 | * mapping will then trigger a page fault on the next user access, allowing | |
cc662126 | 511 | * fixup by vm_fault_gtt(). |
b414fcd5 | 512 | */ |
cb2baf42 | 513 | void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) |
b414fcd5 CW |
514 | { |
515 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | |
516 | intel_wakeref_t wakeref; | |
517 | ||
cc662126 AJ |
518 | /* |
519 | * Serialisation between user GTT access and our code depends upon | |
b414fcd5 CW |
520 | * revoking the CPU's PTE whilst the mutex is held. The next user |
521 | * pagefault then has to wait until we release the mutex. | |
522 | * | |
523 | * Note that RPM complicates somewhat by adding an additional | |
524 | * requirement that operations to the GGTT be made holding the RPM | |
525 | * wakeref. | |
526 | */ | |
d858d569 | 527 | wakeref = intel_runtime_pm_get(&i915->runtime_pm); |
5c24c9d2 | 528 | mutex_lock(&to_gt(i915)->ggtt->vm.mutex); |
b414fcd5 CW |
529 | |
530 | if (!obj->userfault_count) | |
531 | goto out; | |
532 | ||
cc662126 | 533 | __i915_gem_object_release_mmap_gtt(obj); |
b414fcd5 | 534 | |
cc662126 AJ |
535 | /* |
536 | * Ensure that the CPU's PTE are revoked and there are not outstanding | |
b414fcd5 CW |
537 | * memory transactions from userspace before we return. The TLB |
538 | * flushing implied above by changing the PTE above *should* be | |
539 | * sufficient, an extra barrier here just provides us with a bit | |
540 | * of paranoid documentation about our requirement to serialise | |
541 | * memory writes before touching registers / GSM. | |
542 | */ | |
543 | wmb(); | |
544 | ||
545 | out: | |
5c24c9d2 | 546 | mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); |
d858d569 | 547 | intel_runtime_pm_put(&i915->runtime_pm, wakeref); |
b414fcd5 CW |
548 | } |
549 | ||
cc662126 AJ |
550 | void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) |
551 | { | |
78655598 | 552 | struct i915_mmap_offset *mmo, *mn; |
cc662126 | 553 | |
903e0387 MA |
554 | if (obj->ops->unmap_virtual) |
555 | obj->ops->unmap_virtual(obj); | |
556 | ||
cc662126 | 557 | spin_lock(&obj->mmo.lock); |
78655598 CW |
558 | rbtree_postorder_for_each_entry_safe(mmo, mn, |
559 | &obj->mmo.offsets, offset) { | |
cc662126 AJ |
560 | /* |
561 | * vma_node_unmap for GTT mmaps handled already in | |
562 | * __i915_gem_object_release_mmap_gtt | |
563 | */ | |
564 | if (mmo->mmap_type == I915_MMAP_TYPE_GTT) | |
565 | continue; | |
566 | ||
567 | spin_unlock(&obj->mmo.lock); | |
568 | drm_vma_node_unmap(&mmo->vma_node, | |
569 | obj->base.dev->anon_inode->i_mapping); | |
570 | spin_lock(&obj->mmo.lock); | |
571 | } | |
572 | spin_unlock(&obj->mmo.lock); | |
573 | } | |
574 | ||
78655598 CW |
575 | static struct i915_mmap_offset * |
576 | lookup_mmo(struct drm_i915_gem_object *obj, | |
577 | enum i915_mmap_type mmap_type) | |
578 | { | |
579 | struct rb_node *rb; | |
580 | ||
581 | spin_lock(&obj->mmo.lock); | |
582 | rb = obj->mmo.offsets.rb_node; | |
583 | while (rb) { | |
584 | struct i915_mmap_offset *mmo = | |
585 | rb_entry(rb, typeof(*mmo), offset); | |
586 | ||
587 | if (mmo->mmap_type == mmap_type) { | |
588 | spin_unlock(&obj->mmo.lock); | |
589 | return mmo; | |
590 | } | |
591 | ||
592 | if (mmo->mmap_type < mmap_type) | |
593 | rb = rb->rb_right; | |
594 | else | |
595 | rb = rb->rb_left; | |
596 | } | |
597 | spin_unlock(&obj->mmo.lock); | |
598 | ||
599 | return NULL; | |
600 | } | |
601 | ||
602 | static struct i915_mmap_offset * | |
603 | insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo) | |
604 | { | |
605 | struct rb_node *rb, **p; | |
606 | ||
607 | spin_lock(&obj->mmo.lock); | |
608 | rb = NULL; | |
609 | p = &obj->mmo.offsets.rb_node; | |
610 | while (*p) { | |
611 | struct i915_mmap_offset *pos; | |
612 | ||
613 | rb = *p; | |
614 | pos = rb_entry(rb, typeof(*pos), offset); | |
615 | ||
616 | if (pos->mmap_type == mmo->mmap_type) { | |
617 | spin_unlock(&obj->mmo.lock); | |
618 | drm_vma_offset_remove(obj->base.dev->vma_offset_manager, | |
619 | &mmo->vma_node); | |
620 | kfree(mmo); | |
621 | return pos; | |
622 | } | |
623 | ||
624 | if (pos->mmap_type < mmo->mmap_type) | |
625 | p = &rb->rb_right; | |
626 | else | |
627 | p = &rb->rb_left; | |
628 | } | |
629 | rb_link_node(&mmo->offset, rb, p); | |
630 | rb_insert_color(&mmo->offset, &obj->mmo.offsets); | |
631 | spin_unlock(&obj->mmo.lock); | |
632 | ||
633 | return mmo; | |
634 | } | |
635 | ||
cc662126 AJ |
636 | static struct i915_mmap_offset * |
637 | mmap_offset_attach(struct drm_i915_gem_object *obj, | |
638 | enum i915_mmap_type mmap_type, | |
639 | struct drm_file *file) | |
b414fcd5 CW |
640 | { |
641 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | |
cc662126 | 642 | struct i915_mmap_offset *mmo; |
b414fcd5 CW |
643 | int err; |
644 | ||
cf3e3e86 ML |
645 | GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops); |
646 | ||
78655598 CW |
647 | mmo = lookup_mmo(obj, mmap_type); |
648 | if (mmo) | |
649 | goto out; | |
650 | ||
cc662126 AJ |
651 | mmo = kmalloc(sizeof(*mmo), GFP_KERNEL); |
652 | if (!mmo) | |
653 | return ERR_PTR(-ENOMEM); | |
654 | ||
655 | mmo->obj = obj; | |
cc662126 AJ |
656 | mmo->mmap_type = mmap_type; |
657 | drm_vma_node_reset(&mmo->vma_node); | |
658 | ||
78655598 CW |
659 | err = drm_vma_offset_add(obj->base.dev->vma_offset_manager, |
660 | &mmo->vma_node, obj->base.size / PAGE_SIZE); | |
b414fcd5 | 661 | if (likely(!err)) |
78655598 | 662 | goto insert; |
b414fcd5 CW |
663 | |
664 | /* Attempt to reap some mmap space from dead objects */ | |
1a9c4db4 | 665 | err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT, |
b97060a9 | 666 | NULL); |
789ed955 | 667 | if (err) |
cc662126 | 668 | goto err; |
b414fcd5 | 669 | |
789ed955 | 670 | i915_gem_drain_freed_objects(i915); |
78655598 CW |
671 | err = drm_vma_offset_add(obj->base.dev->vma_offset_manager, |
672 | &mmo->vma_node, obj->base.size / PAGE_SIZE); | |
cc662126 AJ |
673 | if (err) |
674 | goto err; | |
675 | ||
78655598 CW |
676 | insert: |
677 | mmo = insert_mmo(obj, mmo); | |
678 | GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo); | |
cc662126 AJ |
679 | out: |
680 | if (file) | |
681 | drm_vma_node_allow(&mmo->vma_node, file); | |
cc662126 AJ |
682 | return mmo; |
683 | ||
684 | err: | |
685 | kfree(mmo); | |
686 | return ERR_PTR(err); | |
b414fcd5 CW |
687 | } |
688 | ||
cc662126 | 689 | static int |
cf3e3e86 | 690 | __assign_mmap_offset(struct drm_i915_gem_object *obj, |
cc662126 | 691 | enum i915_mmap_type mmap_type, |
cf3e3e86 | 692 | u64 *offset, struct drm_file *file) |
b414fcd5 | 693 | { |
cc662126 | 694 | struct i915_mmap_offset *mmo; |
bee0a70a | 695 | |
cf3e3e86 ML |
696 | if (i915_gem_object_never_mmap(obj)) |
697 | return -ENODEV; | |
b414fcd5 | 698 | |
cf3e3e86 | 699 | if (obj->ops->mmap_offset) { |
7961c5b6 ML |
700 | if (mmap_type != I915_MMAP_TYPE_FIXED) |
701 | return -ENODEV; | |
702 | ||
cf3e3e86 ML |
703 | *offset = obj->ops->mmap_offset(obj); |
704 | return 0; | |
a4311745 CW |
705 | } |
706 | ||
7961c5b6 ML |
707 | if (mmap_type == I915_MMAP_TYPE_FIXED) |
708 | return -ENODEV; | |
709 | ||
cc662126 | 710 | if (mmap_type != I915_MMAP_TYPE_GTT && |
c471748d | 711 | !i915_gem_object_has_struct_page(obj) && |
0ff37575 | 712 | !i915_gem_object_has_iomem(obj)) |
cf3e3e86 | 713 | return -ENODEV; |
cc662126 AJ |
714 | |
715 | mmo = mmap_offset_attach(obj, mmap_type, file); | |
cf3e3e86 ML |
716 | if (IS_ERR(mmo)) |
717 | return PTR_ERR(mmo); | |
b414fcd5 | 718 | |
cc662126 | 719 | *offset = drm_vma_node_offset_addr(&mmo->vma_node); |
cf3e3e86 ML |
720 | return 0; |
721 | } | |
722 | ||
723 | static int | |
724 | __assign_mmap_offset_handle(struct drm_file *file, | |
725 | u32 handle, | |
726 | enum i915_mmap_type mmap_type, | |
727 | u64 *offset) | |
728 | { | |
729 | struct drm_i915_gem_object *obj; | |
730 | int err; | |
731 | ||
732 | obj = i915_gem_object_lookup(file, handle); | |
733 | if (!obj) | |
734 | return -ENOENT; | |
735 | ||
0ff37575 TH |
736 | err = i915_gem_object_lock_interruptible(obj, NULL); |
737 | if (err) | |
738 | goto out_put; | |
cf3e3e86 | 739 | err = __assign_mmap_offset(obj, mmap_type, offset, file); |
0ff37575 TH |
740 | i915_gem_object_unlock(obj); |
741 | out_put: | |
b414fcd5 | 742 | i915_gem_object_put(obj); |
cc662126 AJ |
743 | return err; |
744 | } | |
745 | ||
746 | int | |
747 | i915_gem_dumb_mmap_offset(struct drm_file *file, | |
748 | struct drm_device *dev, | |
749 | u32 handle, | |
750 | u64 *offset) | |
751 | { | |
5c24c9d2 | 752 | struct drm_i915_private *i915 = to_i915(dev); |
cc662126 AJ |
753 | enum i915_mmap_type mmap_type; |
754 | ||
7961c5b6 ML |
755 | if (HAS_LMEM(to_i915(dev))) |
756 | mmap_type = I915_MMAP_TYPE_FIXED; | |
bdd8b6c9 | 757 | else if (pat_enabled()) |
cc662126 | 758 | mmap_type = I915_MMAP_TYPE_WC; |
5c24c9d2 | 759 | else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) |
cc662126 AJ |
760 | return -ENODEV; |
761 | else | |
762 | mmap_type = I915_MMAP_TYPE_GTT; | |
763 | ||
cf3e3e86 | 764 | return __assign_mmap_offset_handle(file, handle, mmap_type, offset); |
b414fcd5 CW |
765 | } |
766 | ||
767 | /** | |
cc662126 | 768 | * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing |
b414fcd5 CW |
769 | * @dev: DRM device |
770 | * @data: GTT mapping ioctl data | |
771 | * @file: GEM object info | |
772 | * | |
773 | * Simply returns the fake offset to userspace so it can mmap it. | |
774 | * The mmap call will end up in drm_gem_mmap(), which will set things | |
775 | * up so we can get faults in the handler above. | |
776 | * | |
777 | * The fault handler will take care of binding the object into the GTT | |
778 | * (since it may have been evicted to make room for something), allocating | |
779 | * a fence register, and mapping the appropriate aperture address into | |
780 | * userspace. | |
781 | */ | |
782 | int | |
cc662126 AJ |
783 | i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, |
784 | struct drm_file *file) | |
b414fcd5 | 785 | { |
cc662126 AJ |
786 | struct drm_i915_private *i915 = to_i915(dev); |
787 | struct drm_i915_gem_mmap_offset *args = data; | |
788 | enum i915_mmap_type type; | |
126d5de3 | 789 | int err; |
cc662126 | 790 | |
8d65859a CW |
791 | /* |
792 | * Historically we failed to check args.pad and args.offset | |
793 | * and so we cannot use those fields for user input and we cannot | |
794 | * add -EINVAL for them as the ABI is fixed, i.e. old userspace | |
795 | * may be feeding in garbage in those fields. | |
796 | * | |
797 | * if (args->pad) return -EINVAL; is verbotten! | |
798 | */ | |
799 | ||
126d5de3 CW |
800 | err = i915_user_extensions(u64_to_user_ptr(args->extensions), |
801 | NULL, 0, NULL); | |
802 | if (err) | |
803 | return err; | |
cc662126 AJ |
804 | |
805 | switch (args->flags) { | |
806 | case I915_MMAP_OFFSET_GTT: | |
5c24c9d2 | 807 | if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) |
cc662126 AJ |
808 | return -ENODEV; |
809 | type = I915_MMAP_TYPE_GTT; | |
810 | break; | |
811 | ||
812 | case I915_MMAP_OFFSET_WC: | |
bdd8b6c9 | 813 | if (!pat_enabled()) |
cc662126 AJ |
814 | return -ENODEV; |
815 | type = I915_MMAP_TYPE_WC; | |
816 | break; | |
817 | ||
818 | case I915_MMAP_OFFSET_WB: | |
819 | type = I915_MMAP_TYPE_WB; | |
820 | break; | |
821 | ||
822 | case I915_MMAP_OFFSET_UC: | |
bdd8b6c9 | 823 | if (!pat_enabled()) |
cc662126 AJ |
824 | return -ENODEV; |
825 | type = I915_MMAP_TYPE_UC; | |
826 | break; | |
827 | ||
7961c5b6 ML |
828 | case I915_MMAP_OFFSET_FIXED: |
829 | type = I915_MMAP_TYPE_FIXED; | |
830 | break; | |
831 | ||
cc662126 AJ |
832 | default: |
833 | return -EINVAL; | |
834 | } | |
b414fcd5 | 835 | |
cf3e3e86 | 836 | return __assign_mmap_offset_handle(file, args->handle, type, &args->offset); |
cc662126 AJ |
837 | } |
838 | ||
839 | static void vm_open(struct vm_area_struct *vma) | |
840 | { | |
841 | struct i915_mmap_offset *mmo = vma->vm_private_data; | |
842 | struct drm_i915_gem_object *obj = mmo->obj; | |
843 | ||
844 | GEM_BUG_ON(!obj); | |
845 | i915_gem_object_get(obj); | |
846 | } | |
847 | ||
848 | static void vm_close(struct vm_area_struct *vma) | |
849 | { | |
850 | struct i915_mmap_offset *mmo = vma->vm_private_data; | |
851 | struct drm_i915_gem_object *obj = mmo->obj; | |
852 | ||
853 | GEM_BUG_ON(!obj); | |
854 | i915_gem_object_put(obj); | |
855 | } | |
856 | ||
857 | static const struct vm_operations_struct vm_ops_gtt = { | |
858 | .fault = vm_fault_gtt, | |
9f909e21 | 859 | .access = vm_access, |
cc662126 AJ |
860 | .open = vm_open, |
861 | .close = vm_close, | |
862 | }; | |
863 | ||
864 | static const struct vm_operations_struct vm_ops_cpu = { | |
865 | .fault = vm_fault_cpu, | |
9f909e21 | 866 | .access = vm_access, |
cc662126 AJ |
867 | .open = vm_open, |
868 | .close = vm_close, | |
869 | }; | |
870 | ||
f17b8980 CW |
871 | static int singleton_release(struct inode *inode, struct file *file) |
872 | { | |
873 | struct drm_i915_private *i915 = file->private_data; | |
874 | ||
875 | cmpxchg(&i915->gem.mmap_singleton, file, NULL); | |
876 | drm_dev_put(&i915->drm); | |
877 | ||
878 | return 0; | |
879 | } | |
880 | ||
881 | static const struct file_operations singleton_fops = { | |
882 | .owner = THIS_MODULE, | |
883 | .release = singleton_release, | |
884 | }; | |
885 | ||
886 | static struct file *mmap_singleton(struct drm_i915_private *i915) | |
887 | { | |
888 | struct file *file; | |
889 | ||
890 | rcu_read_lock(); | |
4aea5a9e | 891 | file = READ_ONCE(i915->gem.mmap_singleton); |
f17b8980 CW |
892 | if (file && !get_file_rcu(file)) |
893 | file = NULL; | |
894 | rcu_read_unlock(); | |
895 | if (file) | |
896 | return file; | |
897 | ||
898 | file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR); | |
899 | if (IS_ERR(file)) | |
900 | return file; | |
901 | ||
902 | /* Everyone shares a single global address space */ | |
903 | file->f_mapping = i915->drm.anon_inode->i_mapping; | |
904 | ||
905 | smp_store_mb(i915->gem.mmap_singleton, file); | |
906 | drm_dev_get(&i915->drm); | |
907 | ||
908 | return file; | |
909 | } | |
910 | ||
cc662126 AJ |
911 | /* |
912 | * This overcomes the limitation in drm_gem_mmap's assignment of a | |
913 | * drm_gem_object as the vma->vm_private_data. Since we need to | |
914 | * be able to resolve multiple mmap offsets which could be tied | |
915 | * to a single gem object. | |
916 | */ | |
917 | int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |
918 | { | |
919 | struct drm_vma_offset_node *node; | |
920 | struct drm_file *priv = filp->private_data; | |
921 | struct drm_device *dev = priv->minor->dev; | |
280d14a6 | 922 | struct drm_i915_gem_object *obj = NULL; |
cc662126 | 923 | struct i915_mmap_offset *mmo = NULL; |
f17b8980 | 924 | struct file *anon; |
cc662126 AJ |
925 | |
926 | if (drm_dev_is_unplugged(dev)) | |
927 | return -ENODEV; | |
928 | ||
280d14a6 | 929 | rcu_read_lock(); |
cc662126 AJ |
930 | drm_vma_offset_lock_lookup(dev->vma_offset_manager); |
931 | node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, | |
932 | vma->vm_pgoff, | |
933 | vma_pages(vma)); | |
280d14a6 | 934 | if (node && drm_vma_node_is_allowed(node, priv)) { |
cc662126 AJ |
935 | /* |
936 | * Skip 0-refcnted objects as it is in the process of being | |
937 | * destroyed and will be invalid when the vma manager lock | |
938 | * is released. | |
939 | */ | |
cf3e3e86 ML |
940 | if (!node->driver_private) { |
941 | mmo = container_of(node, struct i915_mmap_offset, vma_node); | |
942 | obj = i915_gem_object_get_rcu(mmo->obj); | |
943 | ||
944 | GEM_BUG_ON(obj && obj->ops->mmap_ops); | |
945 | } else { | |
946 | obj = i915_gem_object_get_rcu | |
947 | (container_of(node, struct drm_i915_gem_object, | |
948 | base.vma_node)); | |
949 | ||
950 | GEM_BUG_ON(obj && !obj->ops->mmap_ops); | |
951 | } | |
cc662126 AJ |
952 | } |
953 | drm_vma_offset_unlock_lookup(dev->vma_offset_manager); | |
280d14a6 | 954 | rcu_read_unlock(); |
cc662126 | 955 | if (!obj) |
280d14a6 | 956 | return node ? -EACCES : -EINVAL; |
cc662126 | 957 | |
280d14a6 | 958 | if (i915_gem_object_is_readonly(obj)) { |
cc662126 | 959 | if (vma->vm_flags & VM_WRITE) { |
280d14a6 | 960 | i915_gem_object_put(obj); |
cc662126 AJ |
961 | return -EINVAL; |
962 | } | |
963 | vma->vm_flags &= ~VM_MAYWRITE; | |
964 | } | |
965 | ||
280d14a6 | 966 | anon = mmap_singleton(to_i915(dev)); |
f17b8980 | 967 | if (IS_ERR(anon)) { |
280d14a6 | 968 | i915_gem_object_put(obj); |
f17b8980 CW |
969 | return PTR_ERR(anon); |
970 | } | |
971 | ||
0ff37575 | 972 | vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO; |
cc662126 | 973 | |
f17b8980 CW |
974 | /* |
975 | * We keep the ref on mmo->obj, not vm_file, but we require | |
976 | * vma->vm_file->f_mapping, see vma_link(), for later revocation. | |
977 | * Our userspace is accustomed to having per-file resource cleanup | |
978 | * (i.e. contexts, objects and requests) on their close(fd), which | |
979 | * requires avoiding extraneous references to their filp, hence why | |
980 | * we prefer to use an anonymous file for their mmaps. | |
981 | */ | |
295992fb CK |
982 | vma_set_file(vma, anon); |
983 | /* Drop the initial creation reference, the vma is now holding one. */ | |
984 | fput(anon); | |
f17b8980 | 985 | |
cf3e3e86 ML |
986 | if (obj->ops->mmap_ops) { |
987 | vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags)); | |
988 | vma->vm_ops = obj->ops->mmap_ops; | |
989 | vma->vm_private_data = node->driver_private; | |
990 | return 0; | |
991 | } | |
992 | ||
993 | vma->vm_private_data = mmo; | |
994 | ||
cc662126 AJ |
995 | switch (mmo->mmap_type) { |
996 | case I915_MMAP_TYPE_WC: | |
997 | vma->vm_page_prot = | |
998 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | |
999 | vma->vm_ops = &vm_ops_cpu; | |
1000 | break; | |
1001 | ||
7961c5b6 ML |
1002 | case I915_MMAP_TYPE_FIXED: |
1003 | GEM_WARN_ON(1); | |
1004 | fallthrough; | |
cc662126 AJ |
1005 | case I915_MMAP_TYPE_WB: |
1006 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | |
1007 | vma->vm_ops = &vm_ops_cpu; | |
1008 | break; | |
1009 | ||
1010 | case I915_MMAP_TYPE_UC: | |
1011 | vma->vm_page_prot = | |
1012 | pgprot_noncached(vm_get_page_prot(vma->vm_flags)); | |
1013 | vma->vm_ops = &vm_ops_cpu; | |
1014 | break; | |
1015 | ||
1016 | case I915_MMAP_TYPE_GTT: | |
1017 | vma->vm_page_prot = | |
1018 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | |
1019 | vma->vm_ops = &vm_ops_gtt; | |
1020 | break; | |
1021 | } | |
1022 | vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); | |
1023 | ||
1024 | return 0; | |
b414fcd5 CW |
1025 | } |
1026 | ||
1027 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | |
1028 | #include "selftests/i915_gem_mman.c" | |
1029 | #endif |