2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <drm/drm_vma_manager.h>
29 #include <linux/dma-fence-array.h>
30 #include <linux/kthread.h>
31 #include <linux/dma-resv.h>
32 #include <linux/shmem_fs.h>
33 #include <linux/slab.h>
34 #include <linux/stop_machine.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37 #include <linux/dma-buf.h>
38 #include <linux/mman.h>
40 #include "display/intel_display.h"
41 #include "display/intel_frontbuffer.h"
43 #include "gem/i915_gem_clflush.h"
44 #include "gem/i915_gem_context.h"
45 #include "gem/i915_gem_ioctls.h"
46 #include "gem/i915_gem_mman.h"
47 #include "gem/i915_gem_region.h"
48 #include "gt/intel_engine_user.h"
49 #include "gt/intel_gt.h"
50 #include "gt/intel_gt_pm.h"
51 #include "gt/intel_workarounds.h"
54 #include "i915_trace.h"
55 #include "i915_vgpu.h"
60 insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
64 err = mutex_lock_interruptible(&ggtt->vm.mutex);
68 memset(node, 0, sizeof(*node));
69 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
70 size, 0, I915_COLOR_UNEVICTABLE,
71 0, ggtt->mappable_end,
74 mutex_unlock(&ggtt->vm.mutex);
80 remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
82 mutex_lock(&ggtt->vm.mutex);
83 drm_mm_remove_node(node);
84 mutex_unlock(&ggtt->vm.mutex);
88 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
89 struct drm_file *file)
91 struct drm_i915_private *i915 = to_i915(dev);
92 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
93 struct drm_i915_gem_get_aperture *args = data;
97 if (mutex_lock_interruptible(&ggtt->vm.mutex))
100 pinned = ggtt->vm.reserved;
101 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
102 if (i915_vma_is_pinned(vma))
103 pinned += vma->node.size;
105 mutex_unlock(&ggtt->vm.mutex);
107 args->aper_size = ggtt->vm.total;
108 args->aper_available_size = args->aper_size - pinned;
113 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
116 struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
117 LIST_HEAD(still_in_list);
118 intel_wakeref_t wakeref;
119 struct i915_vma *vma;
122 assert_object_held(obj);
124 if (list_empty(&obj->vma.list))
128 * As some machines use ACPI to handle runtime-resume callbacks, and
129 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
130 * as they are required by the shrinker. Ergo, we wake the device up
131 * first just in case.
133 wakeref = intel_runtime_pm_get(rpm);
137 spin_lock(&obj->vma.lock);
138 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
141 struct i915_address_space *vm = vma->vm;
143 list_move_tail(&vma->obj_link, &still_in_list);
144 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
147 if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
153 if (!i915_vm_tryopen(vm))
156 /* Prevent vma being freed by i915_vma_parked as we unbind */
157 vma = __i915_vma_get(vma);
158 spin_unlock(&obj->vma.lock);
161 bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
163 if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) {
164 assert_object_held(vma->obj);
165 ret = i915_vma_unbind_async(vma, vm_trylock);
168 if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
169 !i915_vma_is_active(vma))) {
171 if (mutex_trylock(&vma->vm->mutex)) {
172 ret = __i915_vma_unbind(vma);
173 mutex_unlock(&vma->vm->mutex);
178 ret = i915_vma_unbind(vma);
186 spin_lock(&obj->vma.lock);
188 list_splice_init(&still_in_list, &obj->vma.list);
189 spin_unlock(&obj->vma.lock);
191 if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
192 rcu_barrier(); /* flush the i915_vm_release() */
196 intel_runtime_pm_put(rpm, wakeref);
202 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
211 drm_clflush_virt_range(vaddr + offset, len);
213 ret = __copy_to_user(user_data, vaddr + offset, len);
217 return ret ? -EFAULT : 0;
221 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
222 struct drm_i915_gem_pread *args)
224 unsigned int needs_clflush;
225 unsigned int idx, offset;
226 char __user *user_data;
230 ret = i915_gem_object_lock_interruptible(obj, NULL);
234 ret = i915_gem_object_pin_pages(obj);
238 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
242 i915_gem_object_finish_access(obj);
243 i915_gem_object_unlock(obj);
246 user_data = u64_to_user_ptr(args->data_ptr);
247 offset = offset_in_page(args->offset);
248 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
249 struct page *page = i915_gem_object_get_page(obj, idx);
250 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
252 ret = shmem_pread(page, offset, length, user_data,
262 i915_gem_object_unpin_pages(obj);
266 i915_gem_object_unpin_pages(obj);
268 i915_gem_object_unlock(obj);
273 gtt_user_read(struct io_mapping *mapping,
274 loff_t base, int offset,
275 char __user *user_data, int length)
278 unsigned long unwritten;
280 /* We can use the cpu mem copy function because this is X86. */
281 vaddr = io_mapping_map_atomic_wc(mapping, base);
282 unwritten = __copy_to_user_inatomic(user_data,
283 (void __force *)vaddr + offset,
285 io_mapping_unmap_atomic(vaddr);
287 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
288 unwritten = copy_to_user(user_data,
289 (void __force *)vaddr + offset,
291 io_mapping_unmap(vaddr);
296 static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
297 struct drm_mm_node *node,
300 struct drm_i915_private *i915 = to_i915(obj->base.dev);
301 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
302 struct i915_vma *vma;
303 struct i915_gem_ww_ctx ww;
306 i915_gem_ww_ctx_init(&ww, true);
308 vma = ERR_PTR(-ENODEV);
309 ret = i915_gem_object_lock(obj, &ww);
313 ret = i915_gem_object_set_to_gtt_domain(obj, write);
317 if (!i915_gem_object_is_tiled(obj))
318 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
320 PIN_NONBLOCK /* NOWARN */ |
322 if (vma == ERR_PTR(-EDEADLK)) {
325 } else if (!IS_ERR(vma)) {
326 node->start = i915_ggtt_offset(vma);
329 ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
332 GEM_BUG_ON(!drm_mm_node_allocated(node));
336 ret = i915_gem_object_pin_pages(obj);
338 if (drm_mm_node_allocated(node)) {
339 ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
340 remove_mappable_node(ggtt, node);
347 if (ret == -EDEADLK) {
348 ret = i915_gem_ww_ctx_backoff(&ww);
352 i915_gem_ww_ctx_fini(&ww);
354 return ret ? ERR_PTR(ret) : vma;
357 static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
358 struct drm_mm_node *node,
359 struct i915_vma *vma)
361 struct drm_i915_private *i915 = to_i915(obj->base.dev);
362 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
364 i915_gem_object_unpin_pages(obj);
365 if (drm_mm_node_allocated(node)) {
366 ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
367 remove_mappable_node(ggtt, node);
374 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
375 const struct drm_i915_gem_pread *args)
377 struct drm_i915_private *i915 = to_i915(obj->base.dev);
378 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
379 intel_wakeref_t wakeref;
380 struct drm_mm_node node;
381 void __user *user_data;
382 struct i915_vma *vma;
386 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
388 vma = i915_gem_gtt_prepare(obj, &node, false);
394 user_data = u64_to_user_ptr(args->data_ptr);
396 offset = args->offset;
399 /* Operation in this page
401 * page_base = page offset within aperture
402 * page_offset = offset within page
403 * page_length = bytes to copy for this page
405 u32 page_base = node.start;
406 unsigned page_offset = offset_in_page(offset);
407 unsigned page_length = PAGE_SIZE - page_offset;
408 page_length = remain < page_length ? remain : page_length;
409 if (drm_mm_node_allocated(&node)) {
410 ggtt->vm.insert_page(&ggtt->vm,
411 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
412 node.start, I915_CACHE_NONE, 0);
414 page_base += offset & PAGE_MASK;
417 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
418 user_data, page_length)) {
423 remain -= page_length;
424 user_data += page_length;
425 offset += page_length;
428 i915_gem_gtt_cleanup(obj, &node, vma);
430 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
435 * Reads data from the object referenced by handle.
436 * @dev: drm device pointer
437 * @data: ioctl data blob
438 * @file: drm file pointer
440 * On error, the contents of *data are undefined.
443 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
444 struct drm_file *file)
446 struct drm_i915_private *i915 = to_i915(dev);
447 struct drm_i915_gem_pread *args = data;
448 struct drm_i915_gem_object *obj;
451 /* PREAD is disallowed for all platforms after TGL-LP. This also
452 * covers all platforms with local memory.
454 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
460 if (!access_ok(u64_to_user_ptr(args->data_ptr),
464 obj = i915_gem_object_lookup(file, args->handle);
468 /* Bounds check source. */
469 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
474 trace_i915_gem_object_pread(obj, args->offset, args->size);
477 ret = obj->ops->pread(obj, args);
481 ret = i915_gem_object_wait(obj,
482 I915_WAIT_INTERRUPTIBLE,
483 MAX_SCHEDULE_TIMEOUT);
487 ret = i915_gem_shmem_pread(obj, args);
488 if (ret == -EFAULT || ret == -ENODEV)
489 ret = i915_gem_gtt_pread(obj, args);
492 i915_gem_object_put(obj);
496 /* This is the fast write path which cannot handle
497 * page faults in the source data
501 ggtt_write(struct io_mapping *mapping,
502 loff_t base, int offset,
503 char __user *user_data, int length)
506 unsigned long unwritten;
508 /* We can use the cpu mem copy function because this is X86. */
509 vaddr = io_mapping_map_atomic_wc(mapping, base);
510 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
512 io_mapping_unmap_atomic(vaddr);
514 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
515 unwritten = copy_from_user((void __force *)vaddr + offset,
517 io_mapping_unmap(vaddr);
524 * This is the fast pwrite path, where we copy the data directly from the
525 * user into the GTT, uncached.
526 * @obj: i915 GEM object
527 * @args: pwrite arguments structure
530 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
531 const struct drm_i915_gem_pwrite *args)
533 struct drm_i915_private *i915 = to_i915(obj->base.dev);
534 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
535 struct intel_runtime_pm *rpm = &i915->runtime_pm;
536 intel_wakeref_t wakeref;
537 struct drm_mm_node node;
538 struct i915_vma *vma;
540 void __user *user_data;
543 if (i915_gem_object_has_struct_page(obj)) {
545 * Avoid waking the device up if we can fallback, as
546 * waking/resuming is very slow (worst-case 10-100 ms
547 * depending on PCI sleeps and our own resume time).
548 * This easily dwarfs any performance advantage from
549 * using the cache bypass of indirect GGTT access.
551 wakeref = intel_runtime_pm_get_if_in_use(rpm);
555 /* No backing pages, no fallback, we must force GGTT access */
556 wakeref = intel_runtime_pm_get(rpm);
559 vma = i915_gem_gtt_prepare(obj, &node, true);
565 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
567 user_data = u64_to_user_ptr(args->data_ptr);
568 offset = args->offset;
571 /* Operation in this page
573 * page_base = page offset within aperture
574 * page_offset = offset within page
575 * page_length = bytes to copy for this page
577 u32 page_base = node.start;
578 unsigned int page_offset = offset_in_page(offset);
579 unsigned int page_length = PAGE_SIZE - page_offset;
580 page_length = remain < page_length ? remain : page_length;
581 if (drm_mm_node_allocated(&node)) {
582 /* flush the write before we modify the GGTT */
583 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
584 ggtt->vm.insert_page(&ggtt->vm,
585 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
586 node.start, I915_CACHE_NONE, 0);
587 wmb(); /* flush modifications to the GGTT (insert_page) */
589 page_base += offset & PAGE_MASK;
591 /* If we get a fault while copying data, then (presumably) our
592 * source page isn't available. Return the error and we'll
593 * retry in the slow path.
594 * If the object is non-shmem backed, we retry again with the
595 * path that handles page fault.
597 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
598 user_data, page_length)) {
603 remain -= page_length;
604 user_data += page_length;
605 offset += page_length;
608 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
609 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
611 i915_gem_gtt_cleanup(obj, &node, vma);
613 intel_runtime_pm_put(rpm, wakeref);
617 /* Per-page copy function for the shmem pwrite fastpath.
618 * Flushes invalid cachelines before writing to the target if
619 * needs_clflush_before is set and flushes out any written cachelines after
620 * writing if needs_clflush is set.
623 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
624 bool needs_clflush_before,
625 bool needs_clflush_after)
632 if (needs_clflush_before)
633 drm_clflush_virt_range(vaddr + offset, len);
635 ret = __copy_from_user(vaddr + offset, user_data, len);
636 if (!ret && needs_clflush_after)
637 drm_clflush_virt_range(vaddr + offset, len);
641 return ret ? -EFAULT : 0;
645 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
646 const struct drm_i915_gem_pwrite *args)
648 unsigned int partial_cacheline_write;
649 unsigned int needs_clflush;
650 unsigned int offset, idx;
651 void __user *user_data;
655 ret = i915_gem_object_lock_interruptible(obj, NULL);
659 ret = i915_gem_object_pin_pages(obj);
663 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
667 i915_gem_object_finish_access(obj);
668 i915_gem_object_unlock(obj);
670 /* If we don't overwrite a cacheline completely we need to be
671 * careful to have up-to-date data by first clflushing. Don't
672 * overcomplicate things and flush the entire patch.
674 partial_cacheline_write = 0;
675 if (needs_clflush & CLFLUSH_BEFORE)
676 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
678 user_data = u64_to_user_ptr(args->data_ptr);
680 offset = offset_in_page(args->offset);
681 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
682 struct page *page = i915_gem_object_get_page(obj, idx);
683 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
685 ret = shmem_pwrite(page, offset, length, user_data,
686 (offset | length) & partial_cacheline_write,
687 needs_clflush & CLFLUSH_AFTER);
696 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
698 i915_gem_object_unpin_pages(obj);
702 i915_gem_object_unpin_pages(obj);
704 i915_gem_object_unlock(obj);
709 * Writes data to the object referenced by handle.
711 * @data: ioctl data blob
714 * On error, the contents of the buffer that were to be modified are undefined.
717 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
718 struct drm_file *file)
720 struct drm_i915_private *i915 = to_i915(dev);
721 struct drm_i915_gem_pwrite *args = data;
722 struct drm_i915_gem_object *obj;
725 /* PWRITE is disallowed for all platforms after TGL-LP. This also
726 * covers all platforms with local memory.
728 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
734 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
737 obj = i915_gem_object_lookup(file, args->handle);
741 /* Bounds check destination. */
742 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
747 /* Writes not allowed into this read-only object */
748 if (i915_gem_object_is_readonly(obj)) {
753 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
756 if (obj->ops->pwrite)
757 ret = obj->ops->pwrite(obj, args);
761 ret = i915_gem_object_wait(obj,
762 I915_WAIT_INTERRUPTIBLE |
764 MAX_SCHEDULE_TIMEOUT);
769 /* We can only do the GTT pwrite on untiled buffers, as otherwise
770 * it would end up going through the fenced access, and we'll get
771 * different detiling behavior between reading and writing.
772 * pread/pwrite currently are reading and writing from the CPU
773 * perspective, requiring manual detiling by the client.
775 if (!i915_gem_object_has_struct_page(obj) ||
776 i915_gem_cpu_write_needs_clflush(obj))
777 /* Note that the gtt paths might fail with non-page-backed user
778 * pointers (e.g. gtt mappings when moving data between
779 * textures). Fallback to the shmem path in that case.
781 ret = i915_gem_gtt_pwrite_fast(obj, args);
783 if (ret == -EFAULT || ret == -ENOSPC) {
784 if (i915_gem_object_has_struct_page(obj))
785 ret = i915_gem_shmem_pwrite(obj, args);
789 i915_gem_object_put(obj);
794 * Called when user space has done writes to this buffer
796 * @data: ioctl data blob
800 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
801 struct drm_file *file)
803 struct drm_i915_gem_sw_finish *args = data;
804 struct drm_i915_gem_object *obj;
806 obj = i915_gem_object_lookup(file, args->handle);
811 * Proxy objects are barred from CPU access, so there is no
812 * need to ban sw_finish as it is a nop.
815 /* Pinned buffers may be scanout, so flush the cache */
816 i915_gem_object_flush_if_display(obj);
817 i915_gem_object_put(obj);
822 void i915_gem_runtime_suspend(struct drm_i915_private *i915)
824 struct drm_i915_gem_object *obj, *on;
828 * Only called during RPM suspend. All users of the userfault_list
829 * must be holding an RPM wakeref to ensure that this can not
830 * run concurrently with themselves (and use the struct_mutex for
831 * protection between themselves).
834 list_for_each_entry_safe(obj, on,
835 &to_gt(i915)->ggtt->userfault_list, userfault_link)
836 __i915_gem_object_release_mmap_gtt(obj);
839 * The fence will be lost when the device powers down. If any were
840 * in use by hardware (i.e. they are pinned), we should not be powering
841 * down! All other fences will be reacquired by the user upon waking.
843 for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) {
844 struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i];
847 * Ideally we want to assert that the fence register is not
848 * live at this point (i.e. that no piece of code will be
849 * trying to write through fence + GTT, as that both violates
850 * our tracking of activity and associated locking/barriers,
851 * but also is illegal given that the hw is powered down).
853 * Previously we used reg->pin_count as a "liveness" indicator.
854 * That is not sufficient, and we need a more fine-grained
855 * tool if we want to have a sanity check here.
861 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
866 static void discard_ggtt_vma(struct i915_vma *vma)
868 struct drm_i915_gem_object *obj = vma->obj;
870 spin_lock(&obj->vma.lock);
871 if (!RB_EMPTY_NODE(&vma->obj_node)) {
872 rb_erase(&vma->obj_node, &obj->vma.tree);
873 RB_CLEAR_NODE(&vma->obj_node);
875 spin_unlock(&obj->vma.lock);
879 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
880 struct i915_gem_ww_ctx *ww,
881 const struct i915_ggtt_view *view,
882 u64 size, u64 alignment, u64 flags)
884 struct drm_i915_private *i915 = to_i915(obj->base.dev);
885 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
886 struct i915_vma *vma;
891 if (flags & PIN_MAPPABLE &&
892 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
894 * If the required space is larger than the available
895 * aperture, we will not able to find a slot for the
896 * object and unbinding the object now will be in
897 * vain. Worse, doing so may cause us to ping-pong
898 * the object in and out of the Global GTT and
899 * waste a lot of cycles under the mutex.
901 if (obj->base.size > ggtt->mappable_end)
902 return ERR_PTR(-E2BIG);
905 * If NONBLOCK is set the caller is optimistically
906 * trying to cache the full object within the mappable
907 * aperture, and *must* have a fallback in place for
908 * situations where we cannot bind the object. We
909 * can be a little more lax here and use the fallback
910 * more often to avoid costly migrations of ourselves
911 * and other objects within the aperture.
913 * Half-the-aperture is used as a simple heuristic.
914 * More interesting would to do search for a free
915 * block prior to making the commitment to unbind.
916 * That caters for the self-harm case, and with a
917 * little more heuristics (e.g. NOFAULT, NOEVICT)
918 * we could try to minimise harm to others.
920 if (flags & PIN_NONBLOCK &&
921 obj->base.size > ggtt->mappable_end / 2)
922 return ERR_PTR(-ENOSPC);
926 vma = i915_vma_instance(obj, &ggtt->vm, view);
930 if (i915_vma_misplaced(vma, size, alignment, flags)) {
931 if (flags & PIN_NONBLOCK) {
932 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
933 return ERR_PTR(-ENOSPC);
935 if (flags & PIN_MAPPABLE &&
936 vma->fence_size > ggtt->mappable_end / 2)
937 return ERR_PTR(-ENOSPC);
940 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
941 discard_ggtt_vma(vma);
945 ret = i915_vma_unbind(vma);
950 ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
955 if (vma->fence && !i915_gem_object_is_tiled(obj)) {
956 mutex_lock(&ggtt->vm.mutex);
957 i915_vma_revoke_fence(vma);
958 mutex_unlock(&ggtt->vm.mutex);
961 ret = i915_vma_wait_for_bind(vma);
970 struct i915_vma * __must_check
971 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
972 const struct i915_ggtt_view *view,
973 u64 size, u64 alignment, u64 flags)
975 struct i915_gem_ww_ctx ww;
976 struct i915_vma *ret;
979 for_i915_gem_ww(&ww, err, true) {
980 err = i915_gem_object_lock(obj, &ww);
984 ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size,
990 return err ? ERR_PTR(err) : ret;
994 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
995 struct drm_file *file_priv)
997 struct drm_i915_private *i915 = to_i915(dev);
998 struct drm_i915_gem_madvise *args = data;
999 struct drm_i915_gem_object *obj;
1002 switch (args->madv) {
1003 case I915_MADV_DONTNEED:
1004 case I915_MADV_WILLNEED:
1010 obj = i915_gem_object_lookup(file_priv, args->handle);
1014 err = i915_gem_object_lock_interruptible(obj, NULL);
1018 if (i915_gem_object_has_pages(obj) &&
1019 i915_gem_object_is_tiled(obj) &&
1020 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1021 if (obj->mm.madv == I915_MADV_WILLNEED) {
1022 GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
1023 i915_gem_object_clear_tiling_quirk(obj);
1024 i915_gem_object_make_shrinkable(obj);
1026 if (args->madv == I915_MADV_WILLNEED) {
1027 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
1028 i915_gem_object_make_unshrinkable(obj);
1029 i915_gem_object_set_tiling_quirk(obj);
1033 if (obj->mm.madv != __I915_MADV_PURGED) {
1034 obj->mm.madv = args->madv;
1035 if (obj->ops->adjust_lru)
1036 obj->ops->adjust_lru(obj);
1039 if (i915_gem_object_has_pages(obj) ||
1040 i915_gem_object_has_self_managed_shrink_list(obj)) {
1041 unsigned long flags;
1043 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1044 if (!list_empty(&obj->mm.link)) {
1045 struct list_head *list;
1047 if (obj->mm.madv != I915_MADV_WILLNEED)
1048 list = &i915->mm.purge_list;
1050 list = &i915->mm.shrink_list;
1051 list_move_tail(&obj->mm.link, list);
1054 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1057 /* if the object is no longer attached, discard its backing storage */
1058 if (obj->mm.madv == I915_MADV_DONTNEED &&
1059 !i915_gem_object_has_pages(obj))
1060 i915_gem_object_truncate(obj);
1062 args->retained = obj->mm.madv != __I915_MADV_PURGED;
1064 i915_gem_object_unlock(obj);
1066 i915_gem_object_put(obj);
1070 int i915_gem_init(struct drm_i915_private *dev_priv)
1074 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1075 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1076 mkwrite_device_info(dev_priv)->page_sizes =
1077 I915_GTT_PAGE_SIZE_4K;
1079 ret = i915_gem_init_userptr(dev_priv);
1083 intel_uc_fetch_firmwares(&to_gt(dev_priv)->uc);
1084 intel_wopcm_init(&dev_priv->wopcm);
1086 ret = i915_init_ggtt(dev_priv);
1088 GEM_BUG_ON(ret == -EIO);
1093 * Despite its name intel_init_clock_gating applies both display
1094 * clock gating workarounds; GT mmio workarounds and the occasional
1095 * GT power context workaround. Worse, sometimes it includes a context
1096 * register workaround which we need to apply before we record the
1097 * default HW state for all contexts.
1099 * FIXME: break up the workarounds and apply them at the right time!
1101 intel_init_clock_gating(dev_priv);
1103 ret = intel_gt_init(to_gt(dev_priv));
1110 * Unwinding is complicated by that we want to handle -EIO to mean
1111 * disable GPU submission but keep KMS alive. We want to mark the
1112 * HW as irrevisibly wedged, but keep enough state around that the
1113 * driver doesn't explode during runtime.
1116 i915_gem_drain_workqueue(dev_priv);
1119 intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
1123 * Allow engines or uC initialisation to fail by marking the GPU
1124 * as wedged. But we only want to do this when the GPU is angry,
1125 * for all other failure, such as an allocation failure, bail.
1127 if (!intel_gt_is_wedged(to_gt(dev_priv))) {
1128 i915_probe_error(dev_priv,
1129 "Failed to initialize GPU, declaring it wedged!\n");
1130 intel_gt_set_wedged(to_gt(dev_priv));
1133 /* Minimal basic recovery for KMS */
1134 ret = i915_ggtt_enable_hw(dev_priv);
1135 i915_ggtt_resume(to_gt(dev_priv)->ggtt);
1136 intel_init_clock_gating(dev_priv);
1139 i915_gem_drain_freed_objects(dev_priv);
1144 void i915_gem_driver_register(struct drm_i915_private *i915)
1146 i915_gem_driver_register__shrinker(i915);
1148 intel_engines_driver_register(i915);
1151 void i915_gem_driver_unregister(struct drm_i915_private *i915)
1153 i915_gem_driver_unregister__shrinker(i915);
1156 void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
1158 intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref);
1160 i915_gem_suspend_late(dev_priv);
1161 intel_gt_driver_remove(to_gt(dev_priv));
1162 dev_priv->uabi_engines = RB_ROOT;
1164 /* Flush any outstanding unpin_work. */
1165 i915_gem_drain_workqueue(dev_priv);
1167 i915_gem_drain_freed_objects(dev_priv);
1170 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
1172 intel_gt_driver_release(to_gt(dev_priv));
1174 intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
1176 i915_gem_drain_freed_objects(dev_priv);
1178 drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
1181 static void i915_gem_init__mm(struct drm_i915_private *i915)
1183 spin_lock_init(&i915->mm.obj_lock);
1185 init_llist_head(&i915->mm.free_list);
1187 INIT_LIST_HEAD(&i915->mm.purge_list);
1188 INIT_LIST_HEAD(&i915->mm.shrink_list);
1190 i915_gem_init__objects(i915);
1193 void i915_gem_init_early(struct drm_i915_private *dev_priv)
1195 i915_gem_init__mm(dev_priv);
1196 i915_gem_init__contexts(dev_priv);
1198 spin_lock_init(&dev_priv->fb_tracking.lock);
1201 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1203 i915_gem_drain_freed_objects(dev_priv);
1204 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1205 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1206 drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
1209 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1211 struct drm_i915_file_private *file_priv;
1216 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1220 file->driver_priv = file_priv;
1221 file_priv->dev_priv = i915;
1222 file_priv->file = file;
1224 file_priv->bsd_engine = -1;
1225 file_priv->hang_timestamp = jiffies;
1227 ret = i915_gem_context_open(i915, file);
1234 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1235 #include "selftests/mock_gem_device.c"
1236 #include "selftests/i915_gem.c"