2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_gem_dmabuf.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_mocs.h"
37 #include <linux/reservation.h>
38 #include <linux/shmem_fs.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/pci.h>
42 #include <linux/dma-buf.h>
44 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
45 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
47 static bool cpu_cache_is_coherent(struct drm_device *dev,
48 enum i915_cache_level level)
50 return HAS_LLC(dev) || level != I915_CACHE_NONE;
53 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
55 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
58 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
61 return obj->pin_display;
65 insert_mappable_node(struct drm_i915_private *i915,
66 struct drm_mm_node *node, u32 size)
68 memset(node, 0, sizeof(*node));
69 return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
71 i915->ggtt.mappable_end,
72 DRM_MM_SEARCH_DEFAULT,
73 DRM_MM_CREATE_DEFAULT);
77 remove_mappable_node(struct drm_mm_node *node)
79 drm_mm_remove_node(node);
82 /* some bookkeeping */
83 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
86 spin_lock(&dev_priv->mm.object_stat_lock);
87 dev_priv->mm.object_count++;
88 dev_priv->mm.object_memory += size;
89 spin_unlock(&dev_priv->mm.object_stat_lock);
92 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
95 spin_lock(&dev_priv->mm.object_stat_lock);
96 dev_priv->mm.object_count--;
97 dev_priv->mm.object_memory -= size;
98 spin_unlock(&dev_priv->mm.object_stat_lock);
102 i915_gem_wait_for_error(struct i915_gpu_error *error)
106 if (!i915_reset_in_progress(error))
110 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
111 * userspace. If it takes that long something really bad is going on and
112 * we should simply try to bail out and fail as gracefully as possible.
114 ret = wait_event_interruptible_timeout(error->reset_queue,
115 !i915_reset_in_progress(error),
118 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
120 } else if (ret < 0) {
127 int i915_mutex_lock_interruptible(struct drm_device *dev)
129 struct drm_i915_private *dev_priv = to_i915(dev);
132 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
136 ret = mutex_lock_interruptible(&dev->struct_mutex);
144 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
145 struct drm_file *file)
147 struct drm_i915_private *dev_priv = to_i915(dev);
148 struct i915_ggtt *ggtt = &dev_priv->ggtt;
149 struct drm_i915_gem_get_aperture *args = data;
150 struct i915_vma *vma;
154 mutex_lock(&dev->struct_mutex);
155 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
156 if (i915_vma_is_pinned(vma))
157 pinned += vma->node.size;
158 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
159 if (i915_vma_is_pinned(vma))
160 pinned += vma->node.size;
161 mutex_unlock(&dev->struct_mutex);
163 args->aper_size = ggtt->base.total;
164 args->aper_available_size = args->aper_size - pinned;
170 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
172 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
173 char *vaddr = obj->phys_handle->vaddr;
175 struct scatterlist *sg;
178 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
181 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
185 page = shmem_read_mapping_page(mapping, i);
187 return PTR_ERR(page);
189 src = kmap_atomic(page);
190 memcpy(vaddr, src, PAGE_SIZE);
191 drm_clflush_virt_range(vaddr, PAGE_SIZE);
198 i915_gem_chipset_flush(to_i915(obj->base.dev));
200 st = kmalloc(sizeof(*st), GFP_KERNEL);
204 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
211 sg->length = obj->base.size;
213 sg_dma_address(sg) = obj->phys_handle->busaddr;
214 sg_dma_len(sg) = obj->base.size;
221 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
225 BUG_ON(obj->madv == __I915_MADV_PURGED);
227 ret = i915_gem_object_set_to_cpu_domain(obj, true);
229 /* In the event of a disaster, abandon all caches and
232 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
235 if (obj->madv == I915_MADV_DONTNEED)
239 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
240 char *vaddr = obj->phys_handle->vaddr;
243 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
247 page = shmem_read_mapping_page(mapping, i);
251 dst = kmap_atomic(page);
252 drm_clflush_virt_range(vaddr, PAGE_SIZE);
253 memcpy(dst, vaddr, PAGE_SIZE);
256 set_page_dirty(page);
257 if (obj->madv == I915_MADV_WILLNEED)
258 mark_page_accessed(page);
265 sg_free_table(obj->pages);
270 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
272 drm_pci_free(obj->base.dev, obj->phys_handle);
275 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
276 .get_pages = i915_gem_object_get_pages_phys,
277 .put_pages = i915_gem_object_put_pages_phys,
278 .release = i915_gem_object_release_phys,
282 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
284 struct i915_vma *vma;
285 LIST_HEAD(still_in_list);
288 /* The vma will only be freed if it is marked as closed, and if we wait
289 * upon rendering to the vma, we may unbind anything in the list.
291 while ((vma = list_first_entry_or_null(&obj->vma_list,
294 list_move_tail(&vma->obj_link, &still_in_list);
295 ret = i915_vma_unbind(vma);
299 list_splice(&still_in_list, &obj->vma_list);
305 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
308 drm_dma_handle_t *phys;
311 if (obj->phys_handle) {
312 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
318 if (obj->madv != I915_MADV_WILLNEED)
321 if (obj->base.filp == NULL)
324 ret = i915_gem_object_unbind(obj);
328 ret = i915_gem_object_put_pages(obj);
332 /* create a new object */
333 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
337 obj->phys_handle = phys;
338 obj->ops = &i915_gem_phys_ops;
340 return i915_gem_object_get_pages(obj);
344 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
345 struct drm_i915_gem_pwrite *args,
346 struct drm_file *file_priv)
348 struct drm_device *dev = obj->base.dev;
349 void *vaddr = obj->phys_handle->vaddr + args->offset;
350 char __user *user_data = u64_to_user_ptr(args->data_ptr);
353 /* We manually control the domain here and pretend that it
354 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
356 ret = i915_gem_object_wait_rendering(obj, false);
360 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
361 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
362 unsigned long unwritten;
364 /* The physical object once assigned is fixed for the lifetime
365 * of the obj, so we can safely drop the lock and continue
368 mutex_unlock(&dev->struct_mutex);
369 unwritten = copy_from_user(vaddr, user_data, args->size);
370 mutex_lock(&dev->struct_mutex);
377 drm_clflush_virt_range(vaddr, args->size);
378 i915_gem_chipset_flush(to_i915(dev));
381 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
385 void *i915_gem_object_alloc(struct drm_device *dev)
387 struct drm_i915_private *dev_priv = to_i915(dev);
388 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
391 void i915_gem_object_free(struct drm_i915_gem_object *obj)
393 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
394 kmem_cache_free(dev_priv->objects, obj);
398 i915_gem_create(struct drm_file *file,
399 struct drm_device *dev,
403 struct drm_i915_gem_object *obj;
407 size = roundup(size, PAGE_SIZE);
411 /* Allocate the new object */
412 obj = i915_gem_object_create(dev, size);
416 ret = drm_gem_handle_create(file, &obj->base, &handle);
417 /* drop reference from allocate - handle holds it now */
418 i915_gem_object_put_unlocked(obj);
427 i915_gem_dumb_create(struct drm_file *file,
428 struct drm_device *dev,
429 struct drm_mode_create_dumb *args)
431 /* have to work out size/pitch and return them */
432 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
433 args->size = args->pitch * args->height;
434 return i915_gem_create(file, dev,
435 args->size, &args->handle);
439 * Creates a new mm object and returns a handle to it.
440 * @dev: drm device pointer
441 * @data: ioctl data blob
442 * @file: drm file pointer
445 i915_gem_create_ioctl(struct drm_device *dev, void *data,
446 struct drm_file *file)
448 struct drm_i915_gem_create *args = data;
450 return i915_gem_create(file, dev,
451 args->size, &args->handle);
455 __copy_to_user_swizzled(char __user *cpu_vaddr,
456 const char *gpu_vaddr, int gpu_offset,
459 int ret, cpu_offset = 0;
462 int cacheline_end = ALIGN(gpu_offset + 1, 64);
463 int this_length = min(cacheline_end - gpu_offset, length);
464 int swizzled_gpu_offset = gpu_offset ^ 64;
466 ret = __copy_to_user(cpu_vaddr + cpu_offset,
467 gpu_vaddr + swizzled_gpu_offset,
472 cpu_offset += this_length;
473 gpu_offset += this_length;
474 length -= this_length;
481 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
482 const char __user *cpu_vaddr,
485 int ret, cpu_offset = 0;
488 int cacheline_end = ALIGN(gpu_offset + 1, 64);
489 int this_length = min(cacheline_end - gpu_offset, length);
490 int swizzled_gpu_offset = gpu_offset ^ 64;
492 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
493 cpu_vaddr + cpu_offset,
498 cpu_offset += this_length;
499 gpu_offset += this_length;
500 length -= this_length;
507 * Pins the specified object's pages and synchronizes the object with
508 * GPU accesses. Sets needs_clflush to non-zero if the caller should
509 * flush the object from the CPU cache.
511 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
518 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
521 ret = i915_gem_object_wait_rendering(obj, true);
525 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
526 /* If we're not in the cpu read domain, set ourself into the gtt
527 * read domain and manually flush cachelines (if required). This
528 * optimizes for the case when the gpu will dirty the data
529 * anyway again before the next pread happens. */
530 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
534 ret = i915_gem_object_get_pages(obj);
538 i915_gem_object_pin_pages(obj);
543 /* Per-page copy function for the shmem pread fastpath.
544 * Flushes invalid cachelines before reading the target if
545 * needs_clflush is set. */
547 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
548 char __user *user_data,
549 bool page_do_bit17_swizzling, bool needs_clflush)
554 if (unlikely(page_do_bit17_swizzling))
557 vaddr = kmap_atomic(page);
559 drm_clflush_virt_range(vaddr + shmem_page_offset,
561 ret = __copy_to_user_inatomic(user_data,
562 vaddr + shmem_page_offset,
564 kunmap_atomic(vaddr);
566 return ret ? -EFAULT : 0;
570 shmem_clflush_swizzled_range(char *addr, unsigned long length,
573 if (unlikely(swizzled)) {
574 unsigned long start = (unsigned long) addr;
575 unsigned long end = (unsigned long) addr + length;
577 /* For swizzling simply ensure that we always flush both
578 * channels. Lame, but simple and it works. Swizzled
579 * pwrite/pread is far from a hotpath - current userspace
580 * doesn't use it at all. */
581 start = round_down(start, 128);
582 end = round_up(end, 128);
584 drm_clflush_virt_range((void *)start, end - start);
586 drm_clflush_virt_range(addr, length);
591 /* Only difference to the fast-path function is that this can handle bit17
592 * and uses non-atomic copy and kmap functions. */
594 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
595 char __user *user_data,
596 bool page_do_bit17_swizzling, bool needs_clflush)
603 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
605 page_do_bit17_swizzling);
607 if (page_do_bit17_swizzling)
608 ret = __copy_to_user_swizzled(user_data,
609 vaddr, shmem_page_offset,
612 ret = __copy_to_user(user_data,
613 vaddr + shmem_page_offset,
617 return ret ? - EFAULT : 0;
620 static inline unsigned long
621 slow_user_access(struct io_mapping *mapping,
622 uint64_t page_base, int page_offset,
623 char __user *user_data,
624 unsigned long length, bool pwrite)
626 void __iomem *ioaddr;
630 ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
631 /* We can use the cpu mem copy function because this is X86. */
632 vaddr = (void __force *)ioaddr + page_offset;
634 unwritten = __copy_from_user(vaddr, user_data, length);
636 unwritten = __copy_to_user(user_data, vaddr, length);
638 io_mapping_unmap(ioaddr);
643 i915_gem_gtt_pread(struct drm_device *dev,
644 struct drm_i915_gem_object *obj, uint64_t size,
645 uint64_t data_offset, uint64_t data_ptr)
647 struct drm_i915_private *dev_priv = to_i915(dev);
648 struct i915_ggtt *ggtt = &dev_priv->ggtt;
649 struct drm_mm_node node;
650 char __user *user_data;
655 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
657 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
661 ret = i915_gem_object_get_pages(obj);
663 remove_mappable_node(&node);
667 i915_gem_object_pin_pages(obj);
669 node.start = i915_gem_obj_ggtt_offset(obj);
670 node.allocated = false;
671 ret = i915_gem_object_put_fence(obj);
676 ret = i915_gem_object_set_to_gtt_domain(obj, false);
680 user_data = u64_to_user_ptr(data_ptr);
682 offset = data_offset;
684 mutex_unlock(&dev->struct_mutex);
685 if (likely(!i915.prefault_disable)) {
686 ret = fault_in_multipages_writeable(user_data, remain);
688 mutex_lock(&dev->struct_mutex);
694 /* Operation in this page
696 * page_base = page offset within aperture
697 * page_offset = offset within page
698 * page_length = bytes to copy for this page
700 u32 page_base = node.start;
701 unsigned page_offset = offset_in_page(offset);
702 unsigned page_length = PAGE_SIZE - page_offset;
703 page_length = remain < page_length ? remain : page_length;
704 if (node.allocated) {
706 ggtt->base.insert_page(&ggtt->base,
707 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
712 page_base += offset & PAGE_MASK;
714 /* This is a slow read/write as it tries to read from
715 * and write to user memory which may result into page
716 * faults, and so we cannot perform this under struct_mutex.
718 if (slow_user_access(ggtt->mappable, page_base,
719 page_offset, user_data,
720 page_length, false)) {
725 remain -= page_length;
726 user_data += page_length;
727 offset += page_length;
730 mutex_lock(&dev->struct_mutex);
731 if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
732 /* The user has modified the object whilst we tried
733 * reading from it, and we now have no idea what domain
734 * the pages should be in. As we have just been touching
735 * them directly, flush everything back to the GTT
738 ret = i915_gem_object_set_to_gtt_domain(obj, false);
742 if (node.allocated) {
744 ggtt->base.clear_range(&ggtt->base,
745 node.start, node.size,
747 i915_gem_object_unpin_pages(obj);
748 remove_mappable_node(&node);
750 i915_gem_object_ggtt_unpin(obj);
757 i915_gem_shmem_pread(struct drm_device *dev,
758 struct drm_i915_gem_object *obj,
759 struct drm_i915_gem_pread *args,
760 struct drm_file *file)
762 char __user *user_data;
765 int shmem_page_offset, page_length, ret = 0;
766 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
768 int needs_clflush = 0;
769 struct sg_page_iter sg_iter;
771 if (!i915_gem_object_has_struct_page(obj))
774 user_data = u64_to_user_ptr(args->data_ptr);
777 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
779 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
783 offset = args->offset;
785 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
786 offset >> PAGE_SHIFT) {
787 struct page *page = sg_page_iter_page(&sg_iter);
792 /* Operation in this page
794 * shmem_page_offset = offset within page in shmem file
795 * page_length = bytes to copy for this page
797 shmem_page_offset = offset_in_page(offset);
798 page_length = remain;
799 if ((shmem_page_offset + page_length) > PAGE_SIZE)
800 page_length = PAGE_SIZE - shmem_page_offset;
802 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
803 (page_to_phys(page) & (1 << 17)) != 0;
805 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
806 user_data, page_do_bit17_swizzling,
811 mutex_unlock(&dev->struct_mutex);
813 if (likely(!i915.prefault_disable) && !prefaulted) {
814 ret = fault_in_multipages_writeable(user_data, remain);
815 /* Userspace is tricking us, but we've already clobbered
816 * its pages with the prefault and promised to write the
817 * data up to the first fault. Hence ignore any errors
818 * and just continue. */
823 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
824 user_data, page_do_bit17_swizzling,
827 mutex_lock(&dev->struct_mutex);
833 remain -= page_length;
834 user_data += page_length;
835 offset += page_length;
839 i915_gem_object_unpin_pages(obj);
845 * Reads data from the object referenced by handle.
846 * @dev: drm device pointer
847 * @data: ioctl data blob
848 * @file: drm file pointer
850 * On error, the contents of *data are undefined.
853 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
854 struct drm_file *file)
856 struct drm_i915_gem_pread *args = data;
857 struct drm_i915_gem_object *obj;
863 if (!access_ok(VERIFY_WRITE,
864 u64_to_user_ptr(args->data_ptr),
868 ret = i915_mutex_lock_interruptible(dev);
872 obj = i915_gem_object_lookup(file, args->handle);
878 /* Bounds check source. */
879 if (args->offset > obj->base.size ||
880 args->size > obj->base.size - args->offset) {
885 trace_i915_gem_object_pread(obj, args->offset, args->size);
887 ret = i915_gem_shmem_pread(dev, obj, args, file);
889 /* pread for non shmem backed objects */
890 if (ret == -EFAULT || ret == -ENODEV) {
891 intel_runtime_pm_get(to_i915(dev));
892 ret = i915_gem_gtt_pread(dev, obj, args->size,
893 args->offset, args->data_ptr);
894 intel_runtime_pm_put(to_i915(dev));
898 i915_gem_object_put(obj);
900 mutex_unlock(&dev->struct_mutex);
904 /* This is the fast write path which cannot handle
905 * page faults in the source data
909 fast_user_write(struct io_mapping *mapping,
910 loff_t page_base, int page_offset,
911 char __user *user_data,
914 void __iomem *vaddr_atomic;
916 unsigned long unwritten;
918 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
919 /* We can use the cpu mem copy function because this is X86. */
920 vaddr = (void __force*)vaddr_atomic + page_offset;
921 unwritten = __copy_from_user_inatomic_nocache(vaddr,
923 io_mapping_unmap_atomic(vaddr_atomic);
928 * This is the fast pwrite path, where we copy the data directly from the
929 * user into the GTT, uncached.
930 * @i915: i915 device private data
931 * @obj: i915 gem object
932 * @args: pwrite arguments structure
933 * @file: drm file pointer
936 i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
937 struct drm_i915_gem_object *obj,
938 struct drm_i915_gem_pwrite *args,
939 struct drm_file *file)
941 struct i915_ggtt *ggtt = &i915->ggtt;
942 struct drm_device *dev = obj->base.dev;
943 struct drm_mm_node node;
944 uint64_t remain, offset;
945 char __user *user_data;
947 bool hit_slow_path = false;
949 if (obj->tiling_mode != I915_TILING_NONE)
952 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
954 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
958 ret = i915_gem_object_get_pages(obj);
960 remove_mappable_node(&node);
964 i915_gem_object_pin_pages(obj);
966 node.start = i915_gem_obj_ggtt_offset(obj);
967 node.allocated = false;
968 ret = i915_gem_object_put_fence(obj);
973 ret = i915_gem_object_set_to_gtt_domain(obj, true);
977 intel_fb_obj_invalidate(obj, ORIGIN_GTT);
980 user_data = u64_to_user_ptr(args->data_ptr);
981 offset = args->offset;
984 /* Operation in this page
986 * page_base = page offset within aperture
987 * page_offset = offset within page
988 * page_length = bytes to copy for this page
990 u32 page_base = node.start;
991 unsigned page_offset = offset_in_page(offset);
992 unsigned page_length = PAGE_SIZE - page_offset;
993 page_length = remain < page_length ? remain : page_length;
994 if (node.allocated) {
995 wmb(); /* flush the write before we modify the GGTT */
996 ggtt->base.insert_page(&ggtt->base,
997 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
998 node.start, I915_CACHE_NONE, 0);
999 wmb(); /* flush modifications to the GGTT (insert_page) */
1001 page_base += offset & PAGE_MASK;
1003 /* If we get a fault while copying data, then (presumably) our
1004 * source page isn't available. Return the error and we'll
1005 * retry in the slow path.
1006 * If the object is non-shmem backed, we retry again with the
1007 * path that handles page fault.
1009 if (fast_user_write(ggtt->mappable, page_base,
1010 page_offset, user_data, page_length)) {
1011 hit_slow_path = true;
1012 mutex_unlock(&dev->struct_mutex);
1013 if (slow_user_access(ggtt->mappable,
1015 page_offset, user_data,
1016 page_length, true)) {
1018 mutex_lock(&dev->struct_mutex);
1022 mutex_lock(&dev->struct_mutex);
1025 remain -= page_length;
1026 user_data += page_length;
1027 offset += page_length;
1031 if (hit_slow_path) {
1033 (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1034 /* The user has modified the object whilst we tried
1035 * reading from it, and we now have no idea what domain
1036 * the pages should be in. As we have just been touching
1037 * them directly, flush everything back to the GTT
1040 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1044 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
1046 if (node.allocated) {
1048 ggtt->base.clear_range(&ggtt->base,
1049 node.start, node.size,
1051 i915_gem_object_unpin_pages(obj);
1052 remove_mappable_node(&node);
1054 i915_gem_object_ggtt_unpin(obj);
1060 /* Per-page copy function for the shmem pwrite fastpath.
1061 * Flushes invalid cachelines before writing to the target if
1062 * needs_clflush_before is set and flushes out any written cachelines after
1063 * writing if needs_clflush is set. */
1065 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1066 char __user *user_data,
1067 bool page_do_bit17_swizzling,
1068 bool needs_clflush_before,
1069 bool needs_clflush_after)
1074 if (unlikely(page_do_bit17_swizzling))
1077 vaddr = kmap_atomic(page);
1078 if (needs_clflush_before)
1079 drm_clflush_virt_range(vaddr + shmem_page_offset,
1081 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1082 user_data, page_length);
1083 if (needs_clflush_after)
1084 drm_clflush_virt_range(vaddr + shmem_page_offset,
1086 kunmap_atomic(vaddr);
1088 return ret ? -EFAULT : 0;
1091 /* Only difference to the fast-path function is that this can handle bit17
1092 * and uses non-atomic copy and kmap functions. */
1094 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1095 char __user *user_data,
1096 bool page_do_bit17_swizzling,
1097 bool needs_clflush_before,
1098 bool needs_clflush_after)
1104 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1105 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1107 page_do_bit17_swizzling);
1108 if (page_do_bit17_swizzling)
1109 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
1113 ret = __copy_from_user(vaddr + shmem_page_offset,
1116 if (needs_clflush_after)
1117 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1119 page_do_bit17_swizzling);
1122 return ret ? -EFAULT : 0;
1126 i915_gem_shmem_pwrite(struct drm_device *dev,
1127 struct drm_i915_gem_object *obj,
1128 struct drm_i915_gem_pwrite *args,
1129 struct drm_file *file)
1133 char __user *user_data;
1134 int shmem_page_offset, page_length, ret = 0;
1135 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
1136 int hit_slowpath = 0;
1137 int needs_clflush_after = 0;
1138 int needs_clflush_before = 0;
1139 struct sg_page_iter sg_iter;
1141 user_data = u64_to_user_ptr(args->data_ptr);
1142 remain = args->size;
1144 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
1146 ret = i915_gem_object_wait_rendering(obj, false);
1150 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1151 /* If we're not in the cpu write domain, set ourself into the gtt
1152 * write domain and manually flush cachelines (if required). This
1153 * optimizes for the case when the gpu will use the data
1154 * right away and we therefore have to clflush anyway. */
1155 needs_clflush_after = cpu_write_needs_clflush(obj);
1157 /* Same trick applies to invalidate partially written cachelines read
1158 * before writing. */
1159 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
1160 needs_clflush_before =
1161 !cpu_cache_is_coherent(dev, obj->cache_level);
1163 ret = i915_gem_object_get_pages(obj);
1167 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1169 i915_gem_object_pin_pages(obj);
1171 offset = args->offset;
1174 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1175 offset >> PAGE_SHIFT) {
1176 struct page *page = sg_page_iter_page(&sg_iter);
1177 int partial_cacheline_write;
1182 /* Operation in this page
1184 * shmem_page_offset = offset within page in shmem file
1185 * page_length = bytes to copy for this page
1187 shmem_page_offset = offset_in_page(offset);
1189 page_length = remain;
1190 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1191 page_length = PAGE_SIZE - shmem_page_offset;
1193 /* If we don't overwrite a cacheline completely we need to be
1194 * careful to have up-to-date data by first clflushing. Don't
1195 * overcomplicate things and flush the entire patch. */
1196 partial_cacheline_write = needs_clflush_before &&
1197 ((shmem_page_offset | page_length)
1198 & (boot_cpu_data.x86_clflush_size - 1));
1200 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1201 (page_to_phys(page) & (1 << 17)) != 0;
1203 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1204 user_data, page_do_bit17_swizzling,
1205 partial_cacheline_write,
1206 needs_clflush_after);
1211 mutex_unlock(&dev->struct_mutex);
1212 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1213 user_data, page_do_bit17_swizzling,
1214 partial_cacheline_write,
1215 needs_clflush_after);
1217 mutex_lock(&dev->struct_mutex);
1223 remain -= page_length;
1224 user_data += page_length;
1225 offset += page_length;
1229 i915_gem_object_unpin_pages(obj);
1233 * Fixup: Flush cpu caches in case we didn't flush the dirty
1234 * cachelines in-line while writing and the object moved
1235 * out of the cpu write domain while we've dropped the lock.
1237 if (!needs_clflush_after &&
1238 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1239 if (i915_gem_clflush_object(obj, obj->pin_display))
1240 needs_clflush_after = true;
1244 if (needs_clflush_after)
1245 i915_gem_chipset_flush(to_i915(dev));
1247 obj->cache_dirty = true;
1249 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1254 * Writes data to the object referenced by handle.
1256 * @data: ioctl data blob
1259 * On error, the contents of the buffer that were to be modified are undefined.
1262 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1263 struct drm_file *file)
1265 struct drm_i915_private *dev_priv = to_i915(dev);
1266 struct drm_i915_gem_pwrite *args = data;
1267 struct drm_i915_gem_object *obj;
1270 if (args->size == 0)
1273 if (!access_ok(VERIFY_READ,
1274 u64_to_user_ptr(args->data_ptr),
1278 if (likely(!i915.prefault_disable)) {
1279 ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
1285 intel_runtime_pm_get(dev_priv);
1287 ret = i915_mutex_lock_interruptible(dev);
1291 obj = i915_gem_object_lookup(file, args->handle);
1297 /* Bounds check destination. */
1298 if (args->offset > obj->base.size ||
1299 args->size > obj->base.size - args->offset) {
1304 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1307 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1308 * it would end up going through the fenced access, and we'll get
1309 * different detiling behavior between reading and writing.
1310 * pread/pwrite currently are reading and writing from the CPU
1311 * perspective, requiring manual detiling by the client.
1313 if (!i915_gem_object_has_struct_page(obj) ||
1314 cpu_write_needs_clflush(obj)) {
1315 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1316 /* Note that the gtt paths might fail with non-page-backed user
1317 * pointers (e.g. gtt mappings when moving data between
1318 * textures). Fallback to the shmem path in that case. */
1321 if (ret == -EFAULT || ret == -ENOSPC) {
1322 if (obj->phys_handle)
1323 ret = i915_gem_phys_pwrite(obj, args, file);
1324 else if (i915_gem_object_has_struct_page(obj))
1325 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1331 i915_gem_object_put(obj);
1333 mutex_unlock(&dev->struct_mutex);
1335 intel_runtime_pm_put(dev_priv);
1341 * Ensures that all rendering to the object has completed and the object is
1342 * safe to unbind from the GTT or access from the CPU.
1343 * @obj: i915 gem object
1344 * @readonly: waiting for read access or write
1347 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1350 struct reservation_object *resv;
1351 struct i915_gem_active *active;
1352 unsigned long active_mask;
1355 lockdep_assert_held(&obj->base.dev->struct_mutex);
1358 active = obj->last_read;
1359 active_mask = obj->active;
1362 active = &obj->last_write;
1365 for_each_active(active_mask, idx) {
1366 ret = i915_gem_active_wait(&active[idx],
1367 &obj->base.dev->struct_mutex);
1372 resv = i915_gem_object_get_dmabuf_resv(obj);
1376 err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
1377 MAX_SCHEDULE_TIMEOUT);
1385 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1386 * as the object state may change during this call.
1388 static __must_check int
1389 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1390 struct intel_rps_client *rps,
1393 struct drm_device *dev = obj->base.dev;
1394 struct drm_i915_private *dev_priv = to_i915(dev);
1395 struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
1396 struct i915_gem_active *active;
1397 unsigned long active_mask;
1400 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1401 BUG_ON(!dev_priv->mm.interruptible);
1403 active_mask = obj->active;
1408 active = obj->last_read;
1411 active = &obj->last_write;
1414 for_each_active(active_mask, i) {
1415 struct drm_i915_gem_request *req;
1417 req = i915_gem_active_get(&active[i],
1418 &obj->base.dev->struct_mutex);
1420 requests[n++] = req;
1423 mutex_unlock(&dev->struct_mutex);
1425 for (i = 0; ret == 0 && i < n; i++)
1426 ret = i915_wait_request(requests[i], true, NULL, rps);
1427 mutex_lock(&dev->struct_mutex);
1429 for (i = 0; i < n; i++)
1430 i915_gem_request_put(requests[i]);
1435 static struct intel_rps_client *to_rps_client(struct drm_file *file)
1437 struct drm_i915_file_private *fpriv = file->driver_priv;
1441 static enum fb_op_origin
1442 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1444 return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
1445 ORIGIN_GTT : ORIGIN_CPU;
1449 * Called when user space prepares to use an object with the CPU, either
1450 * through the mmap ioctl's mapping or a GTT mapping.
1452 * @data: ioctl data blob
1456 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1457 struct drm_file *file)
1459 struct drm_i915_gem_set_domain *args = data;
1460 struct drm_i915_gem_object *obj;
1461 uint32_t read_domains = args->read_domains;
1462 uint32_t write_domain = args->write_domain;
1465 /* Only handle setting domains to types used by the CPU. */
1466 if (write_domain & I915_GEM_GPU_DOMAINS)
1469 if (read_domains & I915_GEM_GPU_DOMAINS)
1472 /* Having something in the write domain implies it's in the read
1473 * domain, and only that read domain. Enforce that in the request.
1475 if (write_domain != 0 && read_domains != write_domain)
1478 ret = i915_mutex_lock_interruptible(dev);
1482 obj = i915_gem_object_lookup(file, args->handle);
1488 /* Try to flush the object off the GPU without holding the lock.
1489 * We will repeat the flush holding the lock in the normal manner
1490 * to catch cases where we are gazumped.
1492 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1493 to_rps_client(file),
1498 if (read_domains & I915_GEM_DOMAIN_GTT)
1499 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1501 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1503 if (write_domain != 0)
1504 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1507 i915_gem_object_put(obj);
1509 mutex_unlock(&dev->struct_mutex);
1514 * Called when user space has done writes to this buffer
1516 * @data: ioctl data blob
1520 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1521 struct drm_file *file)
1523 struct drm_i915_gem_sw_finish *args = data;
1524 struct drm_i915_gem_object *obj;
1527 ret = i915_mutex_lock_interruptible(dev);
1531 obj = i915_gem_object_lookup(file, args->handle);
1537 /* Pinned buffers may be scanout, so flush the cache */
1538 if (obj->pin_display)
1539 i915_gem_object_flush_cpu_write_domain(obj);
1541 i915_gem_object_put(obj);
1543 mutex_unlock(&dev->struct_mutex);
1548 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1551 * @data: ioctl data blob
1554 * While the mapping holds a reference on the contents of the object, it doesn't
1555 * imply a ref on the object itself.
1559 * DRM driver writers who look a this function as an example for how to do GEM
1560 * mmap support, please don't implement mmap support like here. The modern way
1561 * to implement DRM mmap support is with an mmap offset ioctl (like
1562 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1563 * That way debug tooling like valgrind will understand what's going on, hiding
1564 * the mmap call in a driver private ioctl will break that. The i915 driver only
1565 * does cpu mmaps this way because we didn't know better.
1568 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1569 struct drm_file *file)
1571 struct drm_i915_gem_mmap *args = data;
1572 struct drm_i915_gem_object *obj;
1575 if (args->flags & ~(I915_MMAP_WC))
1578 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1581 obj = i915_gem_object_lookup(file, args->handle);
1585 /* prime objects have no backing filp to GEM mmap
1588 if (!obj->base.filp) {
1589 i915_gem_object_put_unlocked(obj);
1593 addr = vm_mmap(obj->base.filp, 0, args->size,
1594 PROT_READ | PROT_WRITE, MAP_SHARED,
1596 if (args->flags & I915_MMAP_WC) {
1597 struct mm_struct *mm = current->mm;
1598 struct vm_area_struct *vma;
1600 if (down_write_killable(&mm->mmap_sem)) {
1601 i915_gem_object_put_unlocked(obj);
1604 vma = find_vma(mm, addr);
1607 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1610 up_write(&mm->mmap_sem);
1612 /* This may race, but that's ok, it only gets set */
1613 WRITE_ONCE(obj->has_wc_mmap, true);
1615 i915_gem_object_put_unlocked(obj);
1616 if (IS_ERR((void *)addr))
1619 args->addr_ptr = (uint64_t) addr;
1625 * i915_gem_fault - fault a page into the GTT
1626 * @vma: VMA in question
1629 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1630 * from userspace. The fault handler takes care of binding the object to
1631 * the GTT (if needed), allocating and programming a fence register (again,
1632 * only if needed based on whether the old reg is still valid or the object
1633 * is tiled) and inserting a new PTE into the faulting process.
1635 * Note that the faulting process may involve evicting existing objects
1636 * from the GTT and/or fence registers to make room. So performance may
1637 * suffer if the GTT working set is large or there are few fence registers
1640 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1642 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1643 struct drm_device *dev = obj->base.dev;
1644 struct drm_i915_private *dev_priv = to_i915(dev);
1645 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1646 struct i915_ggtt_view view = i915_ggtt_view_normal;
1647 pgoff_t page_offset;
1650 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1652 intel_runtime_pm_get(dev_priv);
1654 /* We don't use vmf->pgoff since that has the fake offset */
1655 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1658 ret = i915_mutex_lock_interruptible(dev);
1662 trace_i915_gem_object_fault(obj, page_offset, true, write);
1664 /* Try to flush the object off the GPU first without holding the lock.
1665 * Upon reacquiring the lock, we will perform our sanity checks and then
1666 * repeat the flush holding the lock in the normal manner to catch cases
1667 * where we are gazumped.
1669 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1673 /* Access to snoopable pages through the GTT is incoherent. */
1674 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1679 /* Use a partial view if the object is bigger than the aperture. */
1680 if (obj->base.size >= ggtt->mappable_end &&
1681 obj->tiling_mode == I915_TILING_NONE) {
1682 static const unsigned int chunk_size = 256; // 1 MiB
1684 memset(&view, 0, sizeof(view));
1685 view.type = I915_GGTT_VIEW_PARTIAL;
1686 view.params.partial.offset = rounddown(page_offset, chunk_size);
1687 view.params.partial.size =
1690 (vma->vm_end - vma->vm_start)/PAGE_SIZE -
1691 view.params.partial.offset);
1694 /* Now pin it into the GTT if needed */
1695 ret = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1699 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1703 ret = i915_gem_object_get_fence(obj);
1707 /* Finally, remap it using the new GTT offset */
1708 pfn = ggtt->mappable_base +
1709 i915_gem_obj_ggtt_offset_view(obj, &view);
1712 if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
1713 /* Overriding existing pages in partial view does not cause
1714 * us any trouble as TLBs are still valid because the fault
1715 * is due to userspace losing part of the mapping or never
1716 * having accessed it before (at this partials' range).
1718 unsigned long base = vma->vm_start +
1719 (view.params.partial.offset << PAGE_SHIFT);
1722 for (i = 0; i < view.params.partial.size; i++) {
1723 ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
1728 obj->fault_mappable = true;
1730 if (!obj->fault_mappable) {
1731 unsigned long size = min_t(unsigned long,
1732 vma->vm_end - vma->vm_start,
1736 for (i = 0; i < size >> PAGE_SHIFT; i++) {
1737 ret = vm_insert_pfn(vma,
1738 (unsigned long)vma->vm_start + i * PAGE_SIZE,
1744 obj->fault_mappable = true;
1746 ret = vm_insert_pfn(vma,
1747 (unsigned long)vmf->virtual_address,
1751 i915_gem_object_ggtt_unpin_view(obj, &view);
1753 mutex_unlock(&dev->struct_mutex);
1758 * We eat errors when the gpu is terminally wedged to avoid
1759 * userspace unduly crashing (gl has no provisions for mmaps to
1760 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1761 * and so needs to be reported.
1763 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1764 ret = VM_FAULT_SIGBUS;
1769 * EAGAIN means the gpu is hung and we'll wait for the error
1770 * handler to reset everything when re-faulting in
1771 * i915_mutex_lock_interruptible.
1778 * EBUSY is ok: this just means that another thread
1779 * already did the job.
1781 ret = VM_FAULT_NOPAGE;
1788 ret = VM_FAULT_SIGBUS;
1791 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1792 ret = VM_FAULT_SIGBUS;
1796 intel_runtime_pm_put(dev_priv);
1801 * i915_gem_release_mmap - remove physical page mappings
1802 * @obj: obj in question
1804 * Preserve the reservation of the mmapping with the DRM core code, but
1805 * relinquish ownership of the pages back to the system.
1807 * It is vital that we remove the page mapping if we have mapped a tiled
1808 * object through the GTT and then lose the fence register due to
1809 * resource pressure. Similarly if the object has been moved out of the
1810 * aperture, than pages mapped into userspace must be revoked. Removing the
1811 * mapping will then trigger a page fault on the next user access, allowing
1812 * fixup by i915_gem_fault().
1815 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1817 /* Serialisation between user GTT access and our code depends upon
1818 * revoking the CPU's PTE whilst the mutex is held. The next user
1819 * pagefault then has to wait until we release the mutex.
1821 lockdep_assert_held(&obj->base.dev->struct_mutex);
1823 if (!obj->fault_mappable)
1826 drm_vma_node_unmap(&obj->base.vma_node,
1827 obj->base.dev->anon_inode->i_mapping);
1829 /* Ensure that the CPU's PTE are revoked and there are not outstanding
1830 * memory transactions from userspace before we return. The TLB
1831 * flushing implied above by changing the PTE above *should* be
1832 * sufficient, an extra barrier here just provides us with a bit
1833 * of paranoid documentation about our requirement to serialise
1834 * memory writes before touching registers / GSM.
1838 obj->fault_mappable = false;
1842 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1844 struct drm_i915_gem_object *obj;
1846 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1847 i915_gem_release_mmap(obj);
1851 * i915_gem_get_ggtt_size - return required global GTT size for an object
1852 * @dev_priv: i915 device
1853 * @size: object size
1854 * @tiling_mode: tiling mode
1856 * Return the required global GTT size for an object, taking into account
1857 * potential fence register mapping.
1859 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
1860 u64 size, int tiling_mode)
1864 GEM_BUG_ON(size == 0);
1866 if (INTEL_GEN(dev_priv) >= 4 ||
1867 tiling_mode == I915_TILING_NONE)
1870 /* Previous chips need a power-of-two fence region when tiling */
1871 if (IS_GEN3(dev_priv))
1872 ggtt_size = 1024*1024;
1874 ggtt_size = 512*1024;
1876 while (ggtt_size < size)
1883 * i915_gem_get_ggtt_alignment - return required global GTT alignment
1884 * @dev_priv: i915 device
1885 * @size: object size
1886 * @tiling_mode: tiling mode
1887 * @fenced: is fenced alignment required or not
1889 * Return the required global GTT alignment for an object, taking into account
1890 * potential fence register mapping.
1892 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
1893 int tiling_mode, bool fenced)
1895 GEM_BUG_ON(size == 0);
1898 * Minimum alignment is 4k (GTT page size), but might be greater
1899 * if a fence register is needed for the object.
1901 if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
1902 tiling_mode == I915_TILING_NONE)
1906 * Previous chips need to be aligned to the size of the smallest
1907 * fence register that can contain the object.
1909 return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
1912 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1914 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1917 dev_priv->mm.shrinker_no_lock_stealing = true;
1919 ret = drm_gem_create_mmap_offset(&obj->base);
1923 /* Badly fragmented mmap space? The only way we can recover
1924 * space is by destroying unwanted objects. We can't randomly release
1925 * mmap_offsets as userspace expects them to be persistent for the
1926 * lifetime of the objects. The closest we can is to release the
1927 * offsets on purgeable objects by truncating it and marking it purged,
1928 * which prevents userspace from ever using that object again.
1930 i915_gem_shrink(dev_priv,
1931 obj->base.size >> PAGE_SHIFT,
1933 I915_SHRINK_UNBOUND |
1934 I915_SHRINK_PURGEABLE);
1935 ret = drm_gem_create_mmap_offset(&obj->base);
1939 i915_gem_shrink_all(dev_priv);
1940 ret = drm_gem_create_mmap_offset(&obj->base);
1942 dev_priv->mm.shrinker_no_lock_stealing = false;
1947 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1949 drm_gem_free_mmap_offset(&obj->base);
1953 i915_gem_mmap_gtt(struct drm_file *file,
1954 struct drm_device *dev,
1958 struct drm_i915_gem_object *obj;
1961 ret = i915_mutex_lock_interruptible(dev);
1965 obj = i915_gem_object_lookup(file, handle);
1971 if (obj->madv != I915_MADV_WILLNEED) {
1972 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1977 ret = i915_gem_object_create_mmap_offset(obj);
1981 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1984 i915_gem_object_put(obj);
1986 mutex_unlock(&dev->struct_mutex);
1991 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1993 * @data: GTT mapping ioctl data
1994 * @file: GEM object info
1996 * Simply returns the fake offset to userspace so it can mmap it.
1997 * The mmap call will end up in drm_gem_mmap(), which will set things
1998 * up so we can get faults in the handler above.
2000 * The fault handler will take care of binding the object into the GTT
2001 * (since it may have been evicted to make room for something), allocating
2002 * a fence register, and mapping the appropriate aperture address into
2006 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2007 struct drm_file *file)
2009 struct drm_i915_gem_mmap_gtt *args = data;
2011 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2014 /* Immediately discard the backing storage */
2016 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2018 i915_gem_object_free_mmap_offset(obj);
2020 if (obj->base.filp == NULL)
2023 /* Our goal here is to return as much of the memory as
2024 * is possible back to the system as we are called from OOM.
2025 * To do this we must instruct the shmfs to drop all of its
2026 * backing pages, *now*.
2028 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2029 obj->madv = __I915_MADV_PURGED;
2032 /* Try to discard unwanted pages */
2034 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2036 struct address_space *mapping;
2038 switch (obj->madv) {
2039 case I915_MADV_DONTNEED:
2040 i915_gem_object_truncate(obj);
2041 case __I915_MADV_PURGED:
2045 if (obj->base.filp == NULL)
2048 mapping = file_inode(obj->base.filp)->i_mapping,
2049 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2053 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2055 struct sgt_iter sgt_iter;
2059 BUG_ON(obj->madv == __I915_MADV_PURGED);
2061 ret = i915_gem_object_set_to_cpu_domain(obj, true);
2063 /* In the event of a disaster, abandon all caches and
2064 * hope for the best.
2066 i915_gem_clflush_object(obj, true);
2067 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2070 i915_gem_gtt_finish_object(obj);
2072 if (i915_gem_object_needs_bit17_swizzle(obj))
2073 i915_gem_object_save_bit_17_swizzle(obj);
2075 if (obj->madv == I915_MADV_DONTNEED)
2078 for_each_sgt_page(page, sgt_iter, obj->pages) {
2080 set_page_dirty(page);
2082 if (obj->madv == I915_MADV_WILLNEED)
2083 mark_page_accessed(page);
2089 sg_free_table(obj->pages);
2094 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2096 const struct drm_i915_gem_object_ops *ops = obj->ops;
2098 if (obj->pages == NULL)
2101 if (obj->pages_pin_count)
2104 GEM_BUG_ON(obj->bind_count);
2106 /* ->put_pages might need to allocate memory for the bit17 swizzle
2107 * array, hence protect them from being reaped by removing them from gtt
2109 list_del(&obj->global_list);
2112 if (is_vmalloc_addr(obj->mapping))
2113 vunmap(obj->mapping);
2115 kunmap(kmap_to_page(obj->mapping));
2116 obj->mapping = NULL;
2119 ops->put_pages(obj);
2122 i915_gem_object_invalidate(obj);
2128 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2130 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2132 struct address_space *mapping;
2133 struct sg_table *st;
2134 struct scatterlist *sg;
2135 struct sgt_iter sgt_iter;
2137 unsigned long last_pfn = 0; /* suppress gcc warning */
2141 /* Assert that the object is not currently in any GPU domain. As it
2142 * wasn't in the GTT, there shouldn't be any way it could have been in
2145 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2146 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2148 st = kmalloc(sizeof(*st), GFP_KERNEL);
2152 page_count = obj->base.size / PAGE_SIZE;
2153 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2158 /* Get the list of pages out of our struct file. They'll be pinned
2159 * at this point until we release them.
2161 * Fail silently without starting the shrinker
2163 mapping = file_inode(obj->base.filp)->i_mapping;
2164 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2165 gfp |= __GFP_NORETRY | __GFP_NOWARN;
2168 for (i = 0; i < page_count; i++) {
2169 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2171 i915_gem_shrink(dev_priv,
2174 I915_SHRINK_UNBOUND |
2175 I915_SHRINK_PURGEABLE);
2176 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2179 /* We've tried hard to allocate the memory by reaping
2180 * our own buffer, now let the real VM do its job and
2181 * go down in flames if truly OOM.
2183 i915_gem_shrink_all(dev_priv);
2184 page = shmem_read_mapping_page(mapping, i);
2186 ret = PTR_ERR(page);
2190 #ifdef CONFIG_SWIOTLB
2191 if (swiotlb_nr_tbl()) {
2193 sg_set_page(sg, page, PAGE_SIZE, 0);
2198 if (!i || page_to_pfn(page) != last_pfn + 1) {
2202 sg_set_page(sg, page, PAGE_SIZE, 0);
2204 sg->length += PAGE_SIZE;
2206 last_pfn = page_to_pfn(page);
2208 /* Check that the i965g/gm workaround works. */
2209 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2211 #ifdef CONFIG_SWIOTLB
2212 if (!swiotlb_nr_tbl())
2217 ret = i915_gem_gtt_prepare_object(obj);
2221 if (i915_gem_object_needs_bit17_swizzle(obj))
2222 i915_gem_object_do_bit_17_swizzle(obj);
2224 if (obj->tiling_mode != I915_TILING_NONE &&
2225 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2226 i915_gem_object_pin_pages(obj);
2232 for_each_sgt_page(page, sgt_iter, st)
2237 /* shmemfs first checks if there is enough memory to allocate the page
2238 * and reports ENOSPC should there be insufficient, along with the usual
2239 * ENOMEM for a genuine allocation failure.
2241 * We use ENOSPC in our driver to mean that we have run out of aperture
2242 * space and so want to translate the error from shmemfs back to our
2243 * usual understanding of ENOMEM.
2251 /* Ensure that the associated pages are gathered from the backing storage
2252 * and pinned into our object. i915_gem_object_get_pages() may be called
2253 * multiple times before they are released by a single call to
2254 * i915_gem_object_put_pages() - once the pages are no longer referenced
2255 * either as a result of memory pressure (reaping pages under the shrinker)
2256 * or as the object is itself released.
2259 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2261 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2262 const struct drm_i915_gem_object_ops *ops = obj->ops;
2268 if (obj->madv != I915_MADV_WILLNEED) {
2269 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2273 BUG_ON(obj->pages_pin_count);
2275 ret = ops->get_pages(obj);
2279 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2281 obj->get_page.sg = obj->pages->sgl;
2282 obj->get_page.last = 0;
2287 /* The 'mapping' part of i915_gem_object_pin_map() below */
2288 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2290 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2291 struct sg_table *sgt = obj->pages;
2292 struct sgt_iter sgt_iter;
2294 struct page *stack_pages[32];
2295 struct page **pages = stack_pages;
2296 unsigned long i = 0;
2299 /* A single page can always be kmapped */
2301 return kmap(sg_page(sgt->sgl));
2303 if (n_pages > ARRAY_SIZE(stack_pages)) {
2304 /* Too big for stack -- allocate temporary array instead */
2305 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2310 for_each_sgt_page(page, sgt_iter, sgt)
2313 /* Check that we have the expected number of pages */
2314 GEM_BUG_ON(i != n_pages);
2316 addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
2318 if (pages != stack_pages)
2319 drm_free_large(pages);
2324 /* get, pin, and map the pages of the object into kernel space */
2325 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2329 lockdep_assert_held(&obj->base.dev->struct_mutex);
2331 ret = i915_gem_object_get_pages(obj);
2333 return ERR_PTR(ret);
2335 i915_gem_object_pin_pages(obj);
2337 if (!obj->mapping) {
2338 obj->mapping = i915_gem_object_map(obj);
2339 if (!obj->mapping) {
2340 i915_gem_object_unpin_pages(obj);
2341 return ERR_PTR(-ENOMEM);
2345 return obj->mapping;
2349 i915_gem_object_retire__write(struct i915_gem_active *active,
2350 struct drm_i915_gem_request *request)
2352 struct drm_i915_gem_object *obj =
2353 container_of(active, struct drm_i915_gem_object, last_write);
2355 intel_fb_obj_flush(obj, true, ORIGIN_CS);
2359 i915_gem_object_retire__read(struct i915_gem_active *active,
2360 struct drm_i915_gem_request *request)
2362 int idx = request->engine->id;
2363 struct drm_i915_gem_object *obj =
2364 container_of(active, struct drm_i915_gem_object, last_read[idx]);
2366 GEM_BUG_ON((obj->active & (1 << idx)) == 0);
2368 obj->active &= ~(1 << idx);
2372 /* Bump our place on the bound list to keep it roughly in LRU order
2373 * so that we don't steal from recently used but inactive objects
2374 * (unless we are forced to ofc!)
2376 if (obj->bind_count)
2377 list_move_tail(&obj->global_list,
2378 &request->i915->mm.bound_list);
2380 i915_gem_object_put(obj);
2383 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2385 unsigned long elapsed;
2387 if (ctx->hang_stats.banned)
2390 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2391 if (ctx->hang_stats.ban_period_seconds &&
2392 elapsed <= ctx->hang_stats.ban_period_seconds) {
2393 DRM_DEBUG("context hanging too fast, banning!\n");
2400 static void i915_set_reset_status(struct i915_gem_context *ctx,
2403 struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2406 hs->banned = i915_context_is_banned(ctx);
2408 hs->guilty_ts = get_seconds();
2410 hs->batch_pending++;
2414 struct drm_i915_gem_request *
2415 i915_gem_find_active_request(struct intel_engine_cs *engine)
2417 struct drm_i915_gem_request *request;
2419 /* We are called by the error capture and reset at a random
2420 * point in time. In particular, note that neither is crucially
2421 * ordered with an interrupt. After a hang, the GPU is dead and we
2422 * assume that no more writes can happen (we waited long enough for
2423 * all writes that were in transaction to be flushed) - adding an
2424 * extra delay for a recent interrupt is pointless. Hence, we do
2425 * not need an engine->irq_seqno_barrier() before the seqno reads.
2427 list_for_each_entry(request, &engine->request_list, link) {
2428 if (i915_gem_request_completed(request))
2437 static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
2439 struct drm_i915_gem_request *request;
2442 request = i915_gem_find_active_request(engine);
2443 if (request == NULL)
2446 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2448 i915_set_reset_status(request->ctx, ring_hung);
2449 list_for_each_entry_continue(request, &engine->request_list, link)
2450 i915_set_reset_status(request->ctx, false);
2453 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
2455 struct intel_ring *ring;
2457 /* Mark all pending requests as complete so that any concurrent
2458 * (lockless) lookup doesn't try and wait upon the request as we
2461 intel_engine_init_seqno(engine, engine->last_submitted_seqno);
2464 * Clear the execlists queue up before freeing the requests, as those
2465 * are the ones that keep the context and ringbuffer backing objects
2469 if (i915.enable_execlists) {
2470 /* Ensure irq handler finishes or is cancelled. */
2471 tasklet_kill(&engine->irq_tasklet);
2473 intel_execlists_cancel_requests(engine);
2477 * We must free the requests after all the corresponding objects have
2478 * been moved off active lists. Which is the same order as the normal
2479 * retire_requests function does. This is important if object hold
2480 * implicit references on things like e.g. ppgtt address spaces through
2483 if (!list_empty(&engine->request_list)) {
2484 struct drm_i915_gem_request *request;
2486 request = list_last_entry(&engine->request_list,
2487 struct drm_i915_gem_request,
2490 i915_gem_request_retire_upto(request);
2493 /* Having flushed all requests from all queues, we know that all
2494 * ringbuffers must now be empty. However, since we do not reclaim
2495 * all space when retiring the request (to prevent HEADs colliding
2496 * with rapid ringbuffer wraparound) the amount of available space
2497 * upon reset is less than when we start. Do one more pass over
2498 * all the ringbuffers to reset last_retired_head.
2500 list_for_each_entry(ring, &engine->buffers, link) {
2501 ring->last_retired_head = ring->tail;
2502 intel_ring_update_space(ring);
2505 engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
2508 void i915_gem_reset(struct drm_device *dev)
2510 struct drm_i915_private *dev_priv = to_i915(dev);
2511 struct intel_engine_cs *engine;
2514 * Before we free the objects from the requests, we need to inspect
2515 * them for finding the guilty party. As the requests only borrow
2516 * their reference to the objects, the inspection must be done first.
2518 for_each_engine(engine, dev_priv)
2519 i915_gem_reset_engine_status(engine);
2521 for_each_engine(engine, dev_priv)
2522 i915_gem_reset_engine_cleanup(engine);
2523 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2525 i915_gem_context_reset(dev);
2527 i915_gem_restore_fences(dev);
2531 i915_gem_retire_work_handler(struct work_struct *work)
2533 struct drm_i915_private *dev_priv =
2534 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2535 struct drm_device *dev = &dev_priv->drm;
2537 /* Come back later if the device is busy... */
2538 if (mutex_trylock(&dev->struct_mutex)) {
2539 i915_gem_retire_requests(dev_priv);
2540 mutex_unlock(&dev->struct_mutex);
2543 /* Keep the retire handler running until we are finally idle.
2544 * We do not need to do this test under locking as in the worst-case
2545 * we queue the retire worker once too often.
2547 if (READ_ONCE(dev_priv->gt.awake)) {
2548 i915_queue_hangcheck(dev_priv);
2549 queue_delayed_work(dev_priv->wq,
2550 &dev_priv->gt.retire_work,
2551 round_jiffies_up_relative(HZ));
2556 i915_gem_idle_work_handler(struct work_struct *work)
2558 struct drm_i915_private *dev_priv =
2559 container_of(work, typeof(*dev_priv), gt.idle_work.work);
2560 struct drm_device *dev = &dev_priv->drm;
2561 struct intel_engine_cs *engine;
2562 unsigned int stuck_engines;
2563 bool rearm_hangcheck;
2565 if (!READ_ONCE(dev_priv->gt.awake))
2568 if (READ_ONCE(dev_priv->gt.active_engines))
2572 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2574 if (!mutex_trylock(&dev->struct_mutex)) {
2575 /* Currently busy, come back later */
2576 mod_delayed_work(dev_priv->wq,
2577 &dev_priv->gt.idle_work,
2578 msecs_to_jiffies(50));
2582 if (dev_priv->gt.active_engines)
2585 for_each_engine(engine, dev_priv)
2586 i915_gem_batch_pool_fini(&engine->batch_pool);
2588 GEM_BUG_ON(!dev_priv->gt.awake);
2589 dev_priv->gt.awake = false;
2590 rearm_hangcheck = false;
2592 /* As we have disabled hangcheck, we need to unstick any waiters still
2593 * hanging around. However, as we may be racing against the interrupt
2594 * handler or the waiters themselves, we skip enabling the fake-irq.
2596 stuck_engines = intel_kick_waiters(dev_priv);
2597 if (unlikely(stuck_engines))
2598 DRM_DEBUG_DRIVER("kicked stuck waiters (%x)...missed irq?\n",
2601 if (INTEL_GEN(dev_priv) >= 6)
2602 gen6_rps_idle(dev_priv);
2603 intel_runtime_pm_put(dev_priv);
2605 mutex_unlock(&dev->struct_mutex);
2608 if (rearm_hangcheck) {
2609 GEM_BUG_ON(!dev_priv->gt.awake);
2610 i915_queue_hangcheck(dev_priv);
2614 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2616 struct drm_i915_gem_object *obj = to_intel_bo(gem);
2617 struct drm_i915_file_private *fpriv = file->driver_priv;
2618 struct i915_vma *vma, *vn;
2620 mutex_lock(&obj->base.dev->struct_mutex);
2621 list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2622 if (vma->vm->file == fpriv)
2623 i915_vma_close(vma);
2624 mutex_unlock(&obj->base.dev->struct_mutex);
2628 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2629 * @dev: drm device pointer
2630 * @data: ioctl data blob
2631 * @file: drm file pointer
2633 * Returns 0 if successful, else an error is returned with the remaining time in
2634 * the timeout parameter.
2635 * -ETIME: object is still busy after timeout
2636 * -ERESTARTSYS: signal interrupted the wait
2637 * -ENONENT: object doesn't exist
2638 * Also possible, but rare:
2639 * -EAGAIN: GPU wedged
2641 * -ENODEV: Internal IRQ fail
2642 * -E?: The add request failed
2644 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2645 * non-zero timeout parameter the wait ioctl will wait for the given number of
2646 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2647 * without holding struct_mutex the object may become re-busied before this
2648 * function completes. A similar but shorter * race condition exists in the busy
2652 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2654 struct drm_i915_gem_wait *args = data;
2655 struct drm_i915_gem_object *obj;
2656 struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
2660 if (args->flags != 0)
2663 ret = i915_mutex_lock_interruptible(dev);
2667 obj = i915_gem_object_lookup(file, args->bo_handle);
2669 mutex_unlock(&dev->struct_mutex);
2676 for (i = 0; i < I915_NUM_ENGINES; i++) {
2677 struct drm_i915_gem_request *req;
2679 req = i915_gem_active_get(&obj->last_read[i],
2680 &obj->base.dev->struct_mutex);
2682 requests[n++] = req;
2686 i915_gem_object_put(obj);
2687 mutex_unlock(&dev->struct_mutex);
2689 for (i = 0; i < n; i++) {
2691 ret = i915_wait_request(requests[i], true,
2692 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
2693 to_rps_client(file));
2694 i915_gem_request_put(requests[i]);
2700 __i915_gem_object_sync(struct drm_i915_gem_request *to,
2701 struct drm_i915_gem_request *from)
2705 if (to->engine == from->engine)
2708 if (!i915.semaphores) {
2709 ret = i915_wait_request(from,
2710 from->i915->mm.interruptible,
2716 int idx = intel_engine_sync_index(from->engine, to->engine);
2717 if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
2720 trace_i915_gem_ring_sync_to(to, from);
2721 ret = to->engine->semaphore.sync_to(to, from);
2725 from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
2732 * i915_gem_object_sync - sync an object to a ring.
2734 * @obj: object which may be in use on another ring.
2735 * @to: request we are wishing to use
2737 * This code is meant to abstract object synchronization with the GPU.
2738 * Conceptually we serialise writes between engines inside the GPU.
2739 * We only allow one engine to write into a buffer at any time, but
2740 * multiple readers. To ensure each has a coherent view of memory, we must:
2742 * - If there is an outstanding write request to the object, the new
2743 * request must wait for it to complete (either CPU or in hw, requests
2744 * on the same ring will be naturally ordered).
2746 * - If we are a write request (pending_write_domain is set), the new
2747 * request must wait for outstanding read requests to complete.
2749 * Returns 0 if successful, else propagates up the lower layer error.
2752 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2753 struct drm_i915_gem_request *to)
2755 struct i915_gem_active *active;
2756 unsigned long active_mask;
2759 lockdep_assert_held(&obj->base.dev->struct_mutex);
2761 active_mask = obj->active;
2765 if (obj->base.pending_write_domain) {
2766 active = obj->last_read;
2769 active = &obj->last_write;
2772 for_each_active(active_mask, idx) {
2773 struct drm_i915_gem_request *request;
2776 request = i915_gem_active_peek(&active[idx],
2777 &obj->base.dev->struct_mutex);
2781 ret = __i915_gem_object_sync(to, request);
2789 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2791 u32 old_write_domain, old_read_domains;
2793 /* Force a pagefault for domain tracking on next user access */
2794 i915_gem_release_mmap(obj);
2796 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2799 old_read_domains = obj->base.read_domains;
2800 old_write_domain = obj->base.write_domain;
2802 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2803 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2805 trace_i915_gem_object_change_domain(obj,
2810 static void __i915_vma_iounmap(struct i915_vma *vma)
2812 GEM_BUG_ON(i915_vma_is_pinned(vma));
2814 if (vma->iomap == NULL)
2817 io_mapping_unmap(vma->iomap);
2821 int i915_vma_unbind(struct i915_vma *vma)
2823 struct drm_i915_gem_object *obj = vma->obj;
2824 unsigned long active;
2827 /* First wait upon any activity as retiring the request may
2828 * have side-effects such as unpinning or even unbinding this vma.
2830 active = i915_vma_get_active(vma);
2834 /* When a closed VMA is retired, it is unbound - eek.
2835 * In order to prevent it from being recursively closed,
2836 * take a pin on the vma so that the second unbind is
2839 __i915_vma_pin(vma);
2841 for_each_active(active, idx) {
2842 ret = i915_gem_active_retire(&vma->last_read[idx],
2843 &vma->vm->dev->struct_mutex);
2848 __i915_vma_unpin(vma);
2852 GEM_BUG_ON(i915_vma_is_active(vma));
2855 if (i915_vma_is_pinned(vma))
2858 if (!drm_mm_node_allocated(&vma->node))
2861 GEM_BUG_ON(obj->bind_count == 0);
2862 GEM_BUG_ON(!obj->pages);
2864 if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
2865 i915_gem_object_finish_gtt(obj);
2867 /* release the fence reg _after_ flushing */
2868 ret = i915_gem_object_put_fence(obj);
2872 __i915_vma_iounmap(vma);
2875 if (likely(!vma->vm->closed)) {
2876 trace_i915_vma_unbind(vma);
2877 vma->vm->unbind_vma(vma);
2881 drm_mm_remove_node(&vma->node);
2882 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
2885 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
2886 obj->map_and_fenceable = false;
2887 } else if (vma->ggtt_view.pages) {
2888 sg_free_table(vma->ggtt_view.pages);
2889 kfree(vma->ggtt_view.pages);
2891 vma->ggtt_view.pages = NULL;
2894 /* Since the unbound list is global, only move to that list if
2895 * no more VMAs exist. */
2896 if (--obj->bind_count == 0)
2897 list_move_tail(&obj->global_list,
2898 &to_i915(obj->base.dev)->mm.unbound_list);
2900 /* And finally now the object is completely decoupled from this vma,
2901 * we can drop its hold on the backing storage and allow it to be
2902 * reaped by the shrinker.
2904 i915_gem_object_unpin_pages(obj);
2907 if (unlikely(vma->closed))
2908 i915_vma_destroy(vma);
2913 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
2915 struct intel_engine_cs *engine;
2918 lockdep_assert_held(&dev_priv->drm.struct_mutex);
2920 for_each_engine(engine, dev_priv) {
2921 if (engine->last_context == NULL)
2924 ret = intel_engine_idle(engine);
2932 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
2933 unsigned long cache_level)
2935 struct drm_mm_node *gtt_space = &vma->node;
2936 struct drm_mm_node *other;
2939 * On some machines we have to be careful when putting differing types
2940 * of snoopable memory together to avoid the prefetcher crossing memory
2941 * domains and dying. During vm initialisation, we decide whether or not
2942 * these constraints apply and set the drm_mm.color_adjust
2945 if (vma->vm->mm.color_adjust == NULL)
2948 if (!drm_mm_node_allocated(gtt_space))
2951 if (list_empty(>t_space->node_list))
2954 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2955 if (other->allocated && !other->hole_follows && other->color != cache_level)
2958 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2959 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2966 * Finds free space in the GTT aperture and binds the object or a view of it
2968 * @obj: object to bind
2969 * @vm: address space to bind into
2970 * @ggtt_view: global gtt view if applicable
2971 * @size: requested size in bytes (can be larger than the VMA)
2972 * @alignment: requested alignment
2973 * @flags: mask of PIN_* flags to use
2975 static struct i915_vma *
2976 i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
2977 struct i915_address_space *vm,
2978 const struct i915_ggtt_view *ggtt_view,
2983 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2984 struct i915_vma *vma;
2990 i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
2991 i915_gem_obj_lookup_or_create_vma(obj, vm);
2995 size = max(size, vma->size);
2996 if (flags & PIN_MAPPABLE)
2997 size = i915_gem_get_ggtt_size(dev_priv, size, obj->tiling_mode);
3000 i915_gem_get_ggtt_alignment(dev_priv, size, obj->tiling_mode,
3001 flags & PIN_MAPPABLE);
3003 alignment = min_alignment;
3004 if (alignment & (min_alignment - 1)) {
3005 DRM_DEBUG("Invalid object alignment requested %llu, minimum %llu\n",
3006 alignment, min_alignment);
3007 return ERR_PTR(-EINVAL);
3010 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3012 end = vma->vm->total;
3013 if (flags & PIN_MAPPABLE)
3014 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
3015 if (flags & PIN_ZONE_4G)
3016 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3018 /* If binding the object/GGTT view requires more space than the entire
3019 * aperture has, reject it early before evicting everything in a vain
3020 * attempt to find space.
3023 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
3024 size, obj->base.size,
3025 flags & PIN_MAPPABLE ? "mappable" : "total",
3027 return ERR_PTR(-E2BIG);
3030 ret = i915_gem_object_get_pages(obj);
3032 return ERR_PTR(ret);
3034 i915_gem_object_pin_pages(obj);
3036 if (flags & PIN_OFFSET_FIXED) {
3037 uint64_t offset = flags & PIN_OFFSET_MASK;
3038 if (offset & (alignment - 1) || offset > end - size) {
3043 vma->node.start = offset;
3044 vma->node.size = size;
3045 vma->node.color = obj->cache_level;
3046 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3048 ret = i915_gem_evict_for_vma(vma);
3050 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3055 u32 search_flag, alloc_flag;
3057 if (flags & PIN_HIGH) {
3058 search_flag = DRM_MM_SEARCH_BELOW;
3059 alloc_flag = DRM_MM_CREATE_TOP;
3061 search_flag = DRM_MM_SEARCH_DEFAULT;
3062 alloc_flag = DRM_MM_CREATE_DEFAULT;
3065 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3066 * so we know that we always have a minimum alignment of 4096.
3067 * The drm_mm range manager is optimised to return results
3068 * with zero alignment, so where possible use the optimal
3071 if (alignment <= 4096)
3075 ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
3083 ret = i915_gem_evict_something(vma->vm, size, alignment,
3093 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
3095 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3096 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3102 i915_gem_object_unpin_pages(obj);
3103 return ERR_PTR(ret);
3107 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3110 /* If we don't have a page list set up, then we're not pinned
3111 * to GPU, and we can ignore the cache flush because it'll happen
3112 * again at bind time.
3114 if (obj->pages == NULL)
3118 * Stolen memory is always coherent with the GPU as it is explicitly
3119 * marked as wc by the system, or the system is cache-coherent.
3121 if (obj->stolen || obj->phys_handle)
3124 /* If the GPU is snooping the contents of the CPU cache,
3125 * we do not need to manually clear the CPU cache lines. However,
3126 * the caches are only snooped when the render cache is
3127 * flushed/invalidated. As we always have to emit invalidations
3128 * and flushes when moving into and out of the RENDER domain, correct
3129 * snooping behaviour occurs naturally as the result of our domain
3132 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3133 obj->cache_dirty = true;
3137 trace_i915_gem_object_clflush(obj);
3138 drm_clflush_sg(obj->pages);
3139 obj->cache_dirty = false;
3144 /** Flushes the GTT write domain for the object if it's dirty. */
3146 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3148 uint32_t old_write_domain;
3150 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3153 /* No actual flushing is required for the GTT write domain. Writes
3154 * to it immediately go to main memory as far as we know, so there's
3155 * no chipset flush. It also doesn't land in render cache.
3157 * However, we do have to enforce the order so that all writes through
3158 * the GTT land before any writes to the device, such as updates to
3163 old_write_domain = obj->base.write_domain;
3164 obj->base.write_domain = 0;
3166 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
3168 trace_i915_gem_object_change_domain(obj,
3169 obj->base.read_domains,
3173 /** Flushes the CPU write domain for the object if it's dirty. */
3175 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3177 uint32_t old_write_domain;
3179 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3182 if (i915_gem_clflush_object(obj, obj->pin_display))
3183 i915_gem_chipset_flush(to_i915(obj->base.dev));
3185 old_write_domain = obj->base.write_domain;
3186 obj->base.write_domain = 0;
3188 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3190 trace_i915_gem_object_change_domain(obj,
3191 obj->base.read_domains,
3196 * Moves a single object to the GTT read, and possibly write domain.
3197 * @obj: object to act on
3198 * @write: ask for write access or read only
3200 * This function returns when the move is complete, including waiting on
3204 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3206 uint32_t old_write_domain, old_read_domains;
3207 struct i915_vma *vma;
3210 ret = i915_gem_object_wait_rendering(obj, !write);
3214 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3217 /* Flush and acquire obj->pages so that we are coherent through
3218 * direct access in memory with previous cached writes through
3219 * shmemfs and that our cache domain tracking remains valid.
3220 * For example, if the obj->filp was moved to swap without us
3221 * being notified and releasing the pages, we would mistakenly
3222 * continue to assume that the obj remained out of the CPU cached
3225 ret = i915_gem_object_get_pages(obj);
3229 i915_gem_object_flush_cpu_write_domain(obj);
3231 /* Serialise direct access to this object with the barriers for
3232 * coherent writes from the GPU, by effectively invalidating the
3233 * GTT domain upon first access.
3235 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3238 old_write_domain = obj->base.write_domain;
3239 old_read_domains = obj->base.read_domains;
3241 /* It should now be out of any other write domains, and we can update
3242 * the domain values for our changes.
3244 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3245 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3247 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3248 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3252 trace_i915_gem_object_change_domain(obj,
3256 /* And bump the LRU for this access */
3257 vma = i915_gem_obj_to_ggtt(obj);
3259 drm_mm_node_allocated(&vma->node) &&
3260 !i915_vma_is_active(vma))
3261 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3267 * Changes the cache-level of an object across all VMA.
3268 * @obj: object to act on
3269 * @cache_level: new cache level to set for the object
3271 * After this function returns, the object will be in the new cache-level
3272 * across all GTT and the contents of the backing storage will be coherent,
3273 * with respect to the new cache-level. In order to keep the backing storage
3274 * coherent for all users, we only allow a single cache level to be set
3275 * globally on the object and prevent it from being changed whilst the
3276 * hardware is reading from the object. That is if the object is currently
3277 * on the scanout it will be set to uncached (or equivalent display
3278 * cache coherency) and all non-MOCS GPU access will also be uncached so
3279 * that all direct access to the scanout remains coherent.
3281 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3282 enum i915_cache_level cache_level)
3284 struct i915_vma *vma;
3287 if (obj->cache_level == cache_level)
3290 /* Inspect the list of currently bound VMA and unbind any that would
3291 * be invalid given the new cache-level. This is principally to
3292 * catch the issue of the CS prefetch crossing page boundaries and
3293 * reading an invalid PTE on older architectures.
3296 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3297 if (!drm_mm_node_allocated(&vma->node))
3300 if (i915_vma_is_pinned(vma)) {
3301 DRM_DEBUG("can not change the cache level of pinned objects\n");
3305 if (i915_gem_valid_gtt_space(vma, cache_level))
3308 ret = i915_vma_unbind(vma);
3312 /* As unbinding may affect other elements in the
3313 * obj->vma_list (due to side-effects from retiring
3314 * an active vma), play safe and restart the iterator.
3319 /* We can reuse the existing drm_mm nodes but need to change the
3320 * cache-level on the PTE. We could simply unbind them all and
3321 * rebind with the correct cache-level on next use. However since
3322 * we already have a valid slot, dma mapping, pages etc, we may as
3323 * rewrite the PTE in the belief that doing so tramples upon less
3324 * state and so involves less work.
3326 if (obj->bind_count) {
3327 /* Before we change the PTE, the GPU must not be accessing it.
3328 * If we wait upon the object, we know that all the bound
3329 * VMA are no longer active.
3331 ret = i915_gem_object_wait_rendering(obj, false);
3335 if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
3336 /* Access to snoopable pages through the GTT is
3337 * incoherent and on some machines causes a hard
3338 * lockup. Relinquish the CPU mmaping to force
3339 * userspace to refault in the pages and we can
3340 * then double check if the GTT mapping is still
3341 * valid for that pointer access.
3343 i915_gem_release_mmap(obj);
3345 /* As we no longer need a fence for GTT access,
3346 * we can relinquish it now (and so prevent having
3347 * to steal a fence from someone else on the next
3348 * fence request). Note GPU activity would have
3349 * dropped the fence as all snoopable access is
3350 * supposed to be linear.
3352 ret = i915_gem_object_put_fence(obj);
3356 /* We either have incoherent backing store and
3357 * so no GTT access or the architecture is fully
3358 * coherent. In such cases, existing GTT mmaps
3359 * ignore the cache bit in the PTE and we can
3360 * rewrite it without confusing the GPU or having
3361 * to force userspace to fault back in its mmaps.
3365 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3366 if (!drm_mm_node_allocated(&vma->node))
3369 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3375 list_for_each_entry(vma, &obj->vma_list, obj_link)
3376 vma->node.color = cache_level;
3377 obj->cache_level = cache_level;
3380 /* Flush the dirty CPU caches to the backing storage so that the
3381 * object is now coherent at its new cache level (with respect
3382 * to the access domain).
3384 if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
3385 if (i915_gem_clflush_object(obj, true))
3386 i915_gem_chipset_flush(to_i915(obj->base.dev));
3392 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3393 struct drm_file *file)
3395 struct drm_i915_gem_caching *args = data;
3396 struct drm_i915_gem_object *obj;
3398 obj = i915_gem_object_lookup(file, args->handle);
3402 switch (obj->cache_level) {
3403 case I915_CACHE_LLC:
3404 case I915_CACHE_L3_LLC:
3405 args->caching = I915_CACHING_CACHED;
3409 args->caching = I915_CACHING_DISPLAY;
3413 args->caching = I915_CACHING_NONE;
3417 i915_gem_object_put_unlocked(obj);
3421 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3422 struct drm_file *file)
3424 struct drm_i915_private *dev_priv = to_i915(dev);
3425 struct drm_i915_gem_caching *args = data;
3426 struct drm_i915_gem_object *obj;
3427 enum i915_cache_level level;
3430 switch (args->caching) {
3431 case I915_CACHING_NONE:
3432 level = I915_CACHE_NONE;
3434 case I915_CACHING_CACHED:
3436 * Due to a HW issue on BXT A stepping, GPU stores via a
3437 * snooped mapping may leave stale data in a corresponding CPU
3438 * cacheline, whereas normally such cachelines would get
3441 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
3444 level = I915_CACHE_LLC;
3446 case I915_CACHING_DISPLAY:
3447 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3453 intel_runtime_pm_get(dev_priv);
3455 ret = i915_mutex_lock_interruptible(dev);
3459 obj = i915_gem_object_lookup(file, args->handle);
3465 ret = i915_gem_object_set_cache_level(obj, level);
3467 i915_gem_object_put(obj);
3469 mutex_unlock(&dev->struct_mutex);
3471 intel_runtime_pm_put(dev_priv);
3477 * Prepare buffer for display plane (scanout, cursors, etc).
3478 * Can be called from an uninterruptible phase (modesetting) and allows
3479 * any flushes to be pipelined (for pageflips).
3482 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3484 const struct i915_ggtt_view *view)
3486 u32 old_read_domains, old_write_domain;
3489 /* Mark the pin_display early so that we account for the
3490 * display coherency whilst setting up the cache domains.
3494 /* The display engine is not coherent with the LLC cache on gen6. As
3495 * a result, we make sure that the pinning that is about to occur is
3496 * done with uncached PTEs. This is lowest common denominator for all
3499 * However for gen6+, we could do better by using the GFDT bit instead
3500 * of uncaching, which would allow us to flush all the LLC-cached data
3501 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3503 ret = i915_gem_object_set_cache_level(obj,
3504 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3506 goto err_unpin_display;
3508 /* As the user may map the buffer once pinned in the display plane
3509 * (e.g. libkms for the bootup splash), we have to ensure that we
3510 * always use map_and_fenceable for all scanout buffers.
3512 ret = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3513 view->type == I915_GGTT_VIEW_NORMAL ?
3516 goto err_unpin_display;
3518 i915_gem_object_flush_cpu_write_domain(obj);
3520 old_write_domain = obj->base.write_domain;
3521 old_read_domains = obj->base.read_domains;
3523 /* It should now be out of any other write domains, and we can update
3524 * the domain values for our changes.
3526 obj->base.write_domain = 0;
3527 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3529 trace_i915_gem_object_change_domain(obj,
3541 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
3542 const struct i915_ggtt_view *view)
3544 if (WARN_ON(obj->pin_display == 0))
3547 i915_gem_object_ggtt_unpin_view(obj, view);
3553 * Moves a single object to the CPU read, and possibly write domain.
3554 * @obj: object to act on
3555 * @write: requesting write or read-only access
3557 * This function returns when the move is complete, including waiting on
3561 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3563 uint32_t old_write_domain, old_read_domains;
3566 ret = i915_gem_object_wait_rendering(obj, !write);
3570 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3573 i915_gem_object_flush_gtt_write_domain(obj);
3575 old_write_domain = obj->base.write_domain;
3576 old_read_domains = obj->base.read_domains;
3578 /* Flush the CPU cache if it's still invalid. */
3579 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3580 i915_gem_clflush_object(obj, false);
3582 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3585 /* It should now be out of any other write domains, and we can update
3586 * the domain values for our changes.
3588 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3590 /* If we're writing through the CPU, then the GPU read domains will
3591 * need to be invalidated at next use.
3594 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3595 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3598 trace_i915_gem_object_change_domain(obj,
3605 /* Throttle our rendering by waiting until the ring has completed our requests
3606 * emitted over 20 msec ago.
3608 * Note that if we were to use the current jiffies each time around the loop,
3609 * we wouldn't escape the function with any frames outstanding if the time to
3610 * render a frame was over 20ms.
3612 * This should get us reasonable parallelism between CPU and GPU but also
3613 * relatively low latency when blocking on a particular request to finish.
3616 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3618 struct drm_i915_private *dev_priv = to_i915(dev);
3619 struct drm_i915_file_private *file_priv = file->driver_priv;
3620 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3621 struct drm_i915_gem_request *request, *target = NULL;
3624 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3628 /* ABI: return -EIO if already wedged */
3629 if (i915_terminally_wedged(&dev_priv->gpu_error))
3632 spin_lock(&file_priv->mm.lock);
3633 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3634 if (time_after_eq(request->emitted_jiffies, recent_enough))
3638 * Note that the request might not have been submitted yet.
3639 * In which case emitted_jiffies will be zero.
3641 if (!request->emitted_jiffies)
3647 i915_gem_request_get(target);
3648 spin_unlock(&file_priv->mm.lock);
3653 ret = i915_wait_request(target, true, NULL, NULL);
3654 i915_gem_request_put(target);
3660 i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
3662 struct drm_i915_gem_object *obj = vma->obj;
3664 if (vma->node.size < size)
3667 if (alignment && vma->node.start & (alignment - 1))
3670 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
3673 if (flags & PIN_OFFSET_BIAS &&
3674 vma->node.start < (flags & PIN_OFFSET_MASK))
3677 if (flags & PIN_OFFSET_FIXED &&
3678 vma->node.start != (flags & PIN_OFFSET_MASK))
3684 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
3686 struct drm_i915_gem_object *obj = vma->obj;
3687 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3688 bool mappable, fenceable;
3689 u32 fence_size, fence_alignment;
3691 fence_size = i915_gem_get_ggtt_size(dev_priv,
3694 fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
3699 fenceable = (vma->node.size == fence_size &&
3700 (vma->node.start & (fence_alignment - 1)) == 0);
3702 mappable = (vma->node.start + fence_size <=
3703 dev_priv->ggtt.mappable_end);
3705 obj->map_and_fenceable = mappable && fenceable;
3709 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
3710 struct i915_address_space *vm,
3711 const struct i915_ggtt_view *ggtt_view,
3716 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3717 struct i915_vma *vma;
3721 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
3724 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
3727 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
3730 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
3733 vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
3734 i915_gem_obj_to_vma(obj, vm);
3737 if (WARN_ON(i915_vma_pin_count(vma) == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3740 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3741 WARN(i915_vma_is_pinned(vma),
3742 "bo is already pinned in %s with incorrect alignment:"
3743 " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
3744 " obj->map_and_fenceable=%d\n",
3745 ggtt_view ? "ggtt" : "ppgtt",
3746 upper_32_bits(vma->node.start),
3747 lower_32_bits(vma->node.start),
3749 !!(flags & PIN_MAPPABLE),
3750 obj->map_and_fenceable);
3751 ret = i915_vma_unbind(vma);
3759 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
3760 vma = i915_gem_object_insert_into_vm(obj, vm, ggtt_view,
3761 size, alignment, flags);
3763 return PTR_ERR(vma);
3767 ret = i915_vma_bind(vma, obj->cache_level, flags);
3771 if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
3772 (bound ^ vma->bound) & GLOBAL_BIND) {
3773 __i915_vma_set_map_and_fenceable(vma);
3774 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3777 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
3779 __i915_vma_pin(vma);
3784 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3785 struct i915_address_space *vm,
3790 return i915_gem_object_do_pin(obj, vm,
3791 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
3792 size, alignment, flags);
3796 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3797 const struct i915_ggtt_view *view,
3802 struct drm_device *dev = obj->base.dev;
3803 struct drm_i915_private *dev_priv = to_i915(dev);
3804 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3808 return i915_gem_object_do_pin(obj, &ggtt->base, view,
3809 size, alignment, flags | PIN_GLOBAL);
3813 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
3814 const struct i915_ggtt_view *view)
3816 struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
3818 WARN_ON(!i915_vma_is_pinned(vma));
3819 WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
3821 __i915_vma_unpin(vma);
3825 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3826 struct drm_file *file)
3828 struct drm_i915_gem_busy *args = data;
3829 struct drm_i915_gem_object *obj;
3832 ret = i915_mutex_lock_interruptible(dev);
3836 obj = i915_gem_object_lookup(file, args->handle);
3842 /* Count all active objects as busy, even if they are currently not used
3843 * by the gpu. Users of this interface expect objects to eventually
3844 * become non-busy without any further actions.
3848 struct drm_i915_gem_request *req;
3851 for (i = 0; i < I915_NUM_ENGINES; i++) {
3852 req = i915_gem_active_peek(&obj->last_read[i],
3853 &obj->base.dev->struct_mutex);
3855 args->busy |= 1 << (16 + req->engine->exec_id);
3857 req = i915_gem_active_peek(&obj->last_write,
3858 &obj->base.dev->struct_mutex);
3860 args->busy |= req->engine->exec_id;
3863 i915_gem_object_put(obj);
3865 mutex_unlock(&dev->struct_mutex);
3870 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3871 struct drm_file *file_priv)
3873 return i915_gem_ring_throttle(dev, file_priv);
3877 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3878 struct drm_file *file_priv)
3880 struct drm_i915_private *dev_priv = to_i915(dev);
3881 struct drm_i915_gem_madvise *args = data;
3882 struct drm_i915_gem_object *obj;
3885 switch (args->madv) {
3886 case I915_MADV_DONTNEED:
3887 case I915_MADV_WILLNEED:
3893 ret = i915_mutex_lock_interruptible(dev);
3897 obj = i915_gem_object_lookup(file_priv, args->handle);
3903 if (i915_gem_obj_is_pinned(obj)) {
3909 obj->tiling_mode != I915_TILING_NONE &&
3910 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
3911 if (obj->madv == I915_MADV_WILLNEED)
3912 i915_gem_object_unpin_pages(obj);
3913 if (args->madv == I915_MADV_WILLNEED)
3914 i915_gem_object_pin_pages(obj);
3917 if (obj->madv != __I915_MADV_PURGED)
3918 obj->madv = args->madv;
3920 /* if the object is no longer attached, discard its backing storage */
3921 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
3922 i915_gem_object_truncate(obj);
3924 args->retained = obj->madv != __I915_MADV_PURGED;
3927 i915_gem_object_put(obj);
3929 mutex_unlock(&dev->struct_mutex);
3933 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3934 const struct drm_i915_gem_object_ops *ops)
3938 INIT_LIST_HEAD(&obj->global_list);
3939 for (i = 0; i < I915_NUM_ENGINES; i++)
3940 init_request_active(&obj->last_read[i],
3941 i915_gem_object_retire__read);
3942 init_request_active(&obj->last_write,
3943 i915_gem_object_retire__write);
3944 init_request_active(&obj->last_fence, NULL);
3945 INIT_LIST_HEAD(&obj->obj_exec_link);
3946 INIT_LIST_HEAD(&obj->vma_list);
3947 INIT_LIST_HEAD(&obj->batch_pool_link);
3951 obj->fence_reg = I915_FENCE_REG_NONE;
3952 obj->madv = I915_MADV_WILLNEED;
3954 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
3957 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3958 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
3959 .get_pages = i915_gem_object_get_pages_gtt,
3960 .put_pages = i915_gem_object_put_pages_gtt,
3963 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
3966 struct drm_i915_gem_object *obj;
3967 struct address_space *mapping;
3971 obj = i915_gem_object_alloc(dev);
3973 return ERR_PTR(-ENOMEM);
3975 ret = drm_gem_object_init(dev, &obj->base, size);
3979 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3980 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3981 /* 965gm cannot relocate objects above 4GiB. */
3982 mask &= ~__GFP_HIGHMEM;
3983 mask |= __GFP_DMA32;
3986 mapping = file_inode(obj->base.filp)->i_mapping;
3987 mapping_set_gfp_mask(mapping, mask);
3989 i915_gem_object_init(obj, &i915_gem_object_ops);
3991 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3992 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3995 /* On some devices, we can have the GPU use the LLC (the CPU
3996 * cache) for about a 10% performance improvement
3997 * compared to uncached. Graphics requests other than
3998 * display scanout are coherent with the CPU in
3999 * accessing this cache. This means in this mode we
4000 * don't need to clflush on the CPU side, and on the
4001 * GPU side we only need to flush internal caches to
4002 * get data visible to the CPU.
4004 * However, we maintain the display planes as UC, and so
4005 * need to rebind when first used as such.
4007 obj->cache_level = I915_CACHE_LLC;
4009 obj->cache_level = I915_CACHE_NONE;
4011 trace_i915_gem_object_create(obj);
4016 i915_gem_object_free(obj);
4018 return ERR_PTR(ret);
4021 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4023 /* If we are the last user of the backing storage (be it shmemfs
4024 * pages or stolen etc), we know that the pages are going to be
4025 * immediately released. In this case, we can then skip copying
4026 * back the contents from the GPU.
4029 if (obj->madv != I915_MADV_WILLNEED)
4032 if (obj->base.filp == NULL)
4035 /* At first glance, this looks racy, but then again so would be
4036 * userspace racing mmap against close. However, the first external
4037 * reference to the filp can only be obtained through the
4038 * i915_gem_mmap_ioctl() which safeguards us against the user
4039 * acquiring such a reference whilst we are in the middle of
4040 * freeing the object.
4042 return atomic_long_read(&obj->base.filp->f_count) == 1;
4045 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4047 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4048 struct drm_device *dev = obj->base.dev;
4049 struct drm_i915_private *dev_priv = to_i915(dev);
4050 struct i915_vma *vma, *next;
4052 intel_runtime_pm_get(dev_priv);
4054 trace_i915_gem_object_destroy(obj);
4056 /* All file-owned VMA should have been released by this point through
4057 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4058 * However, the object may also be bound into the global GTT (e.g.
4059 * older GPUs without per-process support, or for direct access through
4060 * the GTT either for the user or for scanout). Those VMA still need to
4063 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4064 GEM_BUG_ON(!vma->is_ggtt);
4065 GEM_BUG_ON(i915_vma_is_active(vma));
4067 i915_vma_close(vma);
4069 GEM_BUG_ON(obj->bind_count);
4071 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4072 * before progressing. */
4074 i915_gem_object_unpin_pages(obj);
4076 WARN_ON(obj->frontbuffer_bits);
4078 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4079 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4080 obj->tiling_mode != I915_TILING_NONE)
4081 i915_gem_object_unpin_pages(obj);
4083 if (WARN_ON(obj->pages_pin_count))
4084 obj->pages_pin_count = 0;
4085 if (discard_backing_storage(obj))
4086 obj->madv = I915_MADV_DONTNEED;
4087 i915_gem_object_put_pages(obj);
4091 if (obj->base.import_attach)
4092 drm_prime_gem_destroy(&obj->base, NULL);
4094 if (obj->ops->release)
4095 obj->ops->release(obj);
4097 drm_gem_object_release(&obj->base);
4098 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4101 i915_gem_object_free(obj);
4103 intel_runtime_pm_put(dev_priv);
4106 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4107 struct i915_address_space *vm)
4109 struct i915_vma *vma;
4110 list_for_each_entry(vma, &obj->vma_list, obj_link) {
4111 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4118 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4119 const struct i915_ggtt_view *view)
4121 struct i915_vma *vma;
4125 list_for_each_entry(vma, &obj->vma_list, obj_link)
4126 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4132 i915_gem_stop_engines(struct drm_device *dev)
4134 struct drm_i915_private *dev_priv = to_i915(dev);
4135 struct intel_engine_cs *engine;
4137 for_each_engine(engine, dev_priv)
4138 dev_priv->gt.stop_engine(engine);
4142 i915_gem_suspend(struct drm_device *dev)
4144 struct drm_i915_private *dev_priv = to_i915(dev);
4147 intel_suspend_gt_powersave(dev_priv);
4149 mutex_lock(&dev->struct_mutex);
4151 /* We have to flush all the executing contexts to main memory so
4152 * that they can saved in the hibernation image. To ensure the last
4153 * context image is coherent, we have to switch away from it. That
4154 * leaves the dev_priv->kernel_context still active when
4155 * we actually suspend, and its image in memory may not match the GPU
4156 * state. Fortunately, the kernel_context is disposable and we do
4157 * not rely on its state.
4159 ret = i915_gem_switch_to_kernel_context(dev_priv);
4163 ret = i915_gem_wait_for_idle(dev_priv);
4167 i915_gem_retire_requests(dev_priv);
4169 /* Note that rather than stopping the engines, all we have to do
4170 * is assert that every RING_HEAD == RING_TAIL (all execution complete)
4171 * and similar for all logical context images (to ensure they are
4172 * all ready for hibernation).
4174 i915_gem_stop_engines(dev);
4175 i915_gem_context_lost(dev_priv);
4176 mutex_unlock(&dev->struct_mutex);
4178 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4179 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4180 flush_delayed_work(&dev_priv->gt.idle_work);
4182 /* Assert that we sucessfully flushed all the work and
4183 * reset the GPU back to its idle, low power state.
4185 WARN_ON(dev_priv->gt.awake);
4190 mutex_unlock(&dev->struct_mutex);
4194 void i915_gem_resume(struct drm_device *dev)
4196 struct drm_i915_private *dev_priv = to_i915(dev);
4198 mutex_lock(&dev->struct_mutex);
4199 i915_gem_restore_gtt_mappings(dev);
4201 /* As we didn't flush the kernel context before suspend, we cannot
4202 * guarantee that the context image is complete. So let's just reset
4203 * it and start again.
4205 if (i915.enable_execlists)
4206 intel_lr_context_reset(dev_priv, dev_priv->kernel_context);
4208 mutex_unlock(&dev->struct_mutex);
4211 void i915_gem_init_swizzling(struct drm_device *dev)
4213 struct drm_i915_private *dev_priv = to_i915(dev);
4215 if (INTEL_INFO(dev)->gen < 5 ||
4216 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4219 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4220 DISP_TILE_SURFACE_SWIZZLING);
4225 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4227 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4228 else if (IS_GEN7(dev))
4229 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4230 else if (IS_GEN8(dev))
4231 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4236 static void init_unused_ring(struct drm_device *dev, u32 base)
4238 struct drm_i915_private *dev_priv = to_i915(dev);
4240 I915_WRITE(RING_CTL(base), 0);
4241 I915_WRITE(RING_HEAD(base), 0);
4242 I915_WRITE(RING_TAIL(base), 0);
4243 I915_WRITE(RING_START(base), 0);
4246 static void init_unused_rings(struct drm_device *dev)
4249 init_unused_ring(dev, PRB1_BASE);
4250 init_unused_ring(dev, SRB0_BASE);
4251 init_unused_ring(dev, SRB1_BASE);
4252 init_unused_ring(dev, SRB2_BASE);
4253 init_unused_ring(dev, SRB3_BASE);
4254 } else if (IS_GEN2(dev)) {
4255 init_unused_ring(dev, SRB0_BASE);
4256 init_unused_ring(dev, SRB1_BASE);
4257 } else if (IS_GEN3(dev)) {
4258 init_unused_ring(dev, PRB1_BASE);
4259 init_unused_ring(dev, PRB2_BASE);
4264 i915_gem_init_hw(struct drm_device *dev)
4266 struct drm_i915_private *dev_priv = to_i915(dev);
4267 struct intel_engine_cs *engine;
4270 /* Double layer security blanket, see i915_gem_init() */
4271 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4273 if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4274 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4276 if (IS_HASWELL(dev))
4277 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4278 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4280 if (HAS_PCH_NOP(dev)) {
4281 if (IS_IVYBRIDGE(dev)) {
4282 u32 temp = I915_READ(GEN7_MSG_CTL);
4283 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4284 I915_WRITE(GEN7_MSG_CTL, temp);
4285 } else if (INTEL_INFO(dev)->gen >= 7) {
4286 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4287 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4288 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4292 i915_gem_init_swizzling(dev);
4295 * At least 830 can leave some of the unused rings
4296 * "active" (ie. head != tail) after resume which
4297 * will prevent c3 entry. Makes sure all unused rings
4300 init_unused_rings(dev);
4302 BUG_ON(!dev_priv->kernel_context);
4304 ret = i915_ppgtt_init_hw(dev);
4306 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4310 /* Need to do basic initialisation of all rings first: */
4311 for_each_engine(engine, dev_priv) {
4312 ret = engine->init_hw(engine);
4317 intel_mocs_init_l3cc_table(dev);
4319 /* We can't enable contexts until all firmware is loaded */
4320 ret = intel_guc_setup(dev);
4325 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4329 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4331 if (INTEL_INFO(dev_priv)->gen < 6)
4334 /* TODO: make semaphores and Execlists play nicely together */
4335 if (i915.enable_execlists)
4341 #ifdef CONFIG_INTEL_IOMMU
4342 /* Enable semaphores on SNB when IO remapping is off */
4343 if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4350 int i915_gem_init(struct drm_device *dev)
4352 struct drm_i915_private *dev_priv = to_i915(dev);
4355 mutex_lock(&dev->struct_mutex);
4357 if (!i915.enable_execlists) {
4358 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4359 dev_priv->gt.stop_engine = intel_engine_stop;
4361 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4362 dev_priv->gt.stop_engine = intel_logical_ring_stop;
4365 /* This is just a security blanket to placate dragons.
4366 * On some systems, we very sporadically observe that the first TLBs
4367 * used by the CS may be stale, despite us poking the TLB reset. If
4368 * we hold the forcewake during initialisation these problems
4369 * just magically go away.
4371 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4373 i915_gem_init_userptr(dev_priv);
4375 ret = i915_gem_init_ggtt(dev_priv);
4379 ret = i915_gem_context_init(dev);
4383 ret = intel_engines_init(dev);
4387 ret = i915_gem_init_hw(dev);
4389 /* Allow engine initialisation to fail by marking the GPU as
4390 * wedged. But we only want to do this where the GPU is angry,
4391 * for all other failure, such as an allocation failure, bail.
4393 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4394 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4399 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4400 mutex_unlock(&dev->struct_mutex);
4406 i915_gem_cleanup_engines(struct drm_device *dev)
4408 struct drm_i915_private *dev_priv = to_i915(dev);
4409 struct intel_engine_cs *engine;
4411 for_each_engine(engine, dev_priv)
4412 dev_priv->gt.cleanup_engine(engine);
4416 init_engine_lists(struct intel_engine_cs *engine)
4418 INIT_LIST_HEAD(&engine->request_list);
4422 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4424 struct drm_device *dev = &dev_priv->drm;
4426 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4427 !IS_CHERRYVIEW(dev_priv))
4428 dev_priv->num_fence_regs = 32;
4429 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4430 IS_I945GM(dev_priv) || IS_G33(dev_priv))
4431 dev_priv->num_fence_regs = 16;
4433 dev_priv->num_fence_regs = 8;
4435 if (intel_vgpu_active(dev_priv))
4436 dev_priv->num_fence_regs =
4437 I915_READ(vgtif_reg(avail_rs.fence_num));
4439 /* Initialize fence registers to zero */
4440 i915_gem_restore_fences(dev);
4442 i915_gem_detect_bit_6_swizzle(dev);
4446 i915_gem_load_init(struct drm_device *dev)
4448 struct drm_i915_private *dev_priv = to_i915(dev);
4452 kmem_cache_create("i915_gem_object",
4453 sizeof(struct drm_i915_gem_object), 0,
4457 kmem_cache_create("i915_gem_vma",
4458 sizeof(struct i915_vma), 0,
4461 dev_priv->requests =
4462 kmem_cache_create("i915_gem_request",
4463 sizeof(struct drm_i915_gem_request), 0,
4467 INIT_LIST_HEAD(&dev_priv->context_list);
4468 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4469 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4470 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4471 for (i = 0; i < I915_NUM_ENGINES; i++)
4472 init_engine_lists(&dev_priv->engine[i]);
4473 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4474 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4475 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4476 i915_gem_retire_work_handler);
4477 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4478 i915_gem_idle_work_handler);
4479 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4480 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4482 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4484 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4486 init_waitqueue_head(&dev_priv->pending_flip_queue);
4488 dev_priv->mm.interruptible = true;
4490 mutex_init(&dev_priv->fb_tracking.lock);
4493 void i915_gem_load_cleanup(struct drm_device *dev)
4495 struct drm_i915_private *dev_priv = to_i915(dev);
4497 kmem_cache_destroy(dev_priv->requests);
4498 kmem_cache_destroy(dev_priv->vmas);
4499 kmem_cache_destroy(dev_priv->objects);
4502 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4504 struct drm_i915_gem_object *obj;
4506 /* Called just before we write the hibernation image.
4508 * We need to update the domain tracking to reflect that the CPU
4509 * will be accessing all the pages to create and restore from the
4510 * hibernation, and so upon restoration those pages will be in the
4513 * To make sure the hibernation image contains the latest state,
4514 * we update that state just before writing out the image.
4517 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
4518 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4519 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4522 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4523 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4524 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4530 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4532 struct drm_i915_file_private *file_priv = file->driver_priv;
4533 struct drm_i915_gem_request *request;
4535 /* Clean up our request list when the client is going away, so that
4536 * later retire_requests won't dereference our soon-to-be-gone
4539 spin_lock(&file_priv->mm.lock);
4540 list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4541 request->file_priv = NULL;
4542 spin_unlock(&file_priv->mm.lock);
4544 if (!list_empty(&file_priv->rps.link)) {
4545 spin_lock(&to_i915(dev)->rps.client_lock);
4546 list_del(&file_priv->rps.link);
4547 spin_unlock(&to_i915(dev)->rps.client_lock);
4551 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4553 struct drm_i915_file_private *file_priv;
4556 DRM_DEBUG_DRIVER("\n");
4558 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4562 file->driver_priv = file_priv;
4563 file_priv->dev_priv = to_i915(dev);
4564 file_priv->file = file;
4565 INIT_LIST_HEAD(&file_priv->rps.link);
4567 spin_lock_init(&file_priv->mm.lock);
4568 INIT_LIST_HEAD(&file_priv->mm.request_list);
4570 file_priv->bsd_engine = -1;
4572 ret = i915_gem_context_open(dev, file);
4580 * i915_gem_track_fb - update frontbuffer tracking
4581 * @old: current GEM buffer for the frontbuffer slots
4582 * @new: new GEM buffer for the frontbuffer slots
4583 * @frontbuffer_bits: bitmask of frontbuffer slots
4585 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4586 * from @old and setting them in @new. Both @old and @new can be NULL.
4588 void i915_gem_track_fb(struct drm_i915_gem_object *old,
4589 struct drm_i915_gem_object *new,
4590 unsigned frontbuffer_bits)
4593 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
4594 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
4595 old->frontbuffer_bits &= ~frontbuffer_bits;
4599 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
4600 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
4601 new->frontbuffer_bits |= frontbuffer_bits;
4605 /* All the new VM stuff */
4606 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
4607 struct i915_address_space *vm)
4609 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
4610 struct i915_vma *vma;
4612 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
4614 list_for_each_entry(vma, &o->vma_list, obj_link) {
4616 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4619 return vma->node.start;
4622 WARN(1, "%s vma for this object not found.\n",
4623 i915_is_ggtt(vm) ? "global" : "ppgtt");
4627 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
4628 const struct i915_ggtt_view *view)
4630 struct i915_vma *vma;
4632 list_for_each_entry(vma, &o->vma_list, obj_link)
4633 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4634 return vma->node.start;
4636 WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
4640 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4641 struct i915_address_space *vm)
4643 struct i915_vma *vma;
4645 list_for_each_entry(vma, &o->vma_list, obj_link) {
4647 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4649 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4656 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
4657 const struct i915_ggtt_view *view)
4659 struct i915_vma *vma;
4661 list_for_each_entry(vma, &o->vma_list, obj_link)
4663 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
4664 drm_mm_node_allocated(&vma->node))
4670 unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
4672 struct i915_vma *vma;
4674 GEM_BUG_ON(list_empty(&o->vma_list));
4676 list_for_each_entry(vma, &o->vma_list, obj_link) {
4678 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
4679 return vma->node.size;
4685 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
4687 struct i915_vma *vma;
4688 list_for_each_entry(vma, &obj->vma_list, obj_link)
4689 if (i915_vma_is_pinned(vma))
4695 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
4697 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
4701 /* Only default objects have per-page dirty tracking */
4702 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
4705 page = i915_gem_object_get_page(obj, n);
4706 set_page_dirty(page);
4710 /* Allocate a new GEM object and fill it with the supplied data */
4711 struct drm_i915_gem_object *
4712 i915_gem_object_create_from_data(struct drm_device *dev,
4713 const void *data, size_t size)
4715 struct drm_i915_gem_object *obj;
4716 struct sg_table *sg;
4720 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
4724 ret = i915_gem_object_set_to_cpu_domain(obj, true);
4728 ret = i915_gem_object_get_pages(obj);
4732 i915_gem_object_pin_pages(obj);
4734 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
4735 obj->dirty = 1; /* Backing store is now out of date */
4736 i915_gem_object_unpin_pages(obj);
4738 if (WARN_ON(bytes != size)) {
4739 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4747 i915_gem_object_put(obj);
4748 return ERR_PTR(ret);