2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_gem_dmabuf.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_mocs.h"
37 #include <linux/reservation.h>
38 #include <linux/shmem_fs.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/pci.h>
42 #include <linux/dma-buf.h>
44 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
45 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
47 i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
49 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
51 static bool cpu_cache_is_coherent(struct drm_device *dev,
52 enum i915_cache_level level)
54 return HAS_LLC(dev) || level != I915_CACHE_NONE;
57 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
59 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
62 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
65 return obj->pin_display;
69 insert_mappable_node(struct drm_i915_private *i915,
70 struct drm_mm_node *node, u32 size)
72 memset(node, 0, sizeof(*node));
73 return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
75 i915->ggtt.mappable_end,
76 DRM_MM_SEARCH_DEFAULT,
77 DRM_MM_CREATE_DEFAULT);
81 remove_mappable_node(struct drm_mm_node *node)
83 drm_mm_remove_node(node);
86 /* some bookkeeping */
87 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
90 spin_lock(&dev_priv->mm.object_stat_lock);
91 dev_priv->mm.object_count++;
92 dev_priv->mm.object_memory += size;
93 spin_unlock(&dev_priv->mm.object_stat_lock);
96 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
99 spin_lock(&dev_priv->mm.object_stat_lock);
100 dev_priv->mm.object_count--;
101 dev_priv->mm.object_memory -= size;
102 spin_unlock(&dev_priv->mm.object_stat_lock);
106 i915_gem_wait_for_error(struct i915_gpu_error *error)
110 if (!i915_reset_in_progress(error))
114 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
115 * userspace. If it takes that long something really bad is going on and
116 * we should simply try to bail out and fail as gracefully as possible.
118 ret = wait_event_interruptible_timeout(error->reset_queue,
119 !i915_reset_in_progress(error),
122 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
124 } else if (ret < 0) {
131 int i915_mutex_lock_interruptible(struct drm_device *dev)
133 struct drm_i915_private *dev_priv = to_i915(dev);
136 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
140 ret = mutex_lock_interruptible(&dev->struct_mutex);
144 WARN_ON(i915_verify_lists(dev));
149 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
150 struct drm_file *file)
152 struct drm_i915_private *dev_priv = to_i915(dev);
153 struct i915_ggtt *ggtt = &dev_priv->ggtt;
154 struct drm_i915_gem_get_aperture *args = data;
155 struct i915_vma *vma;
159 mutex_lock(&dev->struct_mutex);
160 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
162 pinned += vma->node.size;
163 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
165 pinned += vma->node.size;
166 mutex_unlock(&dev->struct_mutex);
168 args->aper_size = ggtt->base.total;
169 args->aper_available_size = args->aper_size - pinned;
175 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
177 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
178 char *vaddr = obj->phys_handle->vaddr;
180 struct scatterlist *sg;
183 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
186 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
190 page = shmem_read_mapping_page(mapping, i);
192 return PTR_ERR(page);
194 src = kmap_atomic(page);
195 memcpy(vaddr, src, PAGE_SIZE);
196 drm_clflush_virt_range(vaddr, PAGE_SIZE);
203 i915_gem_chipset_flush(to_i915(obj->base.dev));
205 st = kmalloc(sizeof(*st), GFP_KERNEL);
209 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
216 sg->length = obj->base.size;
218 sg_dma_address(sg) = obj->phys_handle->busaddr;
219 sg_dma_len(sg) = obj->base.size;
226 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
230 BUG_ON(obj->madv == __I915_MADV_PURGED);
232 ret = i915_gem_object_set_to_cpu_domain(obj, true);
234 /* In the event of a disaster, abandon all caches and
237 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
240 if (obj->madv == I915_MADV_DONTNEED)
244 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
245 char *vaddr = obj->phys_handle->vaddr;
248 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
252 page = shmem_read_mapping_page(mapping, i);
256 dst = kmap_atomic(page);
257 drm_clflush_virt_range(vaddr, PAGE_SIZE);
258 memcpy(dst, vaddr, PAGE_SIZE);
261 set_page_dirty(page);
262 if (obj->madv == I915_MADV_WILLNEED)
263 mark_page_accessed(page);
270 sg_free_table(obj->pages);
275 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
277 drm_pci_free(obj->base.dev, obj->phys_handle);
280 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
281 .get_pages = i915_gem_object_get_pages_phys,
282 .put_pages = i915_gem_object_put_pages_phys,
283 .release = i915_gem_object_release_phys,
287 drop_pages(struct drm_i915_gem_object *obj)
289 struct i915_vma *vma, *next;
292 i915_gem_object_get(obj);
293 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
294 if (i915_vma_unbind(vma))
297 ret = i915_gem_object_put_pages(obj);
298 i915_gem_object_put(obj);
304 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
307 drm_dma_handle_t *phys;
310 if (obj->phys_handle) {
311 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
317 if (obj->madv != I915_MADV_WILLNEED)
320 if (obj->base.filp == NULL)
323 ret = drop_pages(obj);
327 /* create a new object */
328 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
332 obj->phys_handle = phys;
333 obj->ops = &i915_gem_phys_ops;
335 return i915_gem_object_get_pages(obj);
339 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
340 struct drm_i915_gem_pwrite *args,
341 struct drm_file *file_priv)
343 struct drm_device *dev = obj->base.dev;
344 void *vaddr = obj->phys_handle->vaddr + args->offset;
345 char __user *user_data = u64_to_user_ptr(args->data_ptr);
348 /* We manually control the domain here and pretend that it
349 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
351 ret = i915_gem_object_wait_rendering(obj, false);
355 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
356 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
357 unsigned long unwritten;
359 /* The physical object once assigned is fixed for the lifetime
360 * of the obj, so we can safely drop the lock and continue
363 mutex_unlock(&dev->struct_mutex);
364 unwritten = copy_from_user(vaddr, user_data, args->size);
365 mutex_lock(&dev->struct_mutex);
372 drm_clflush_virt_range(vaddr, args->size);
373 i915_gem_chipset_flush(to_i915(dev));
376 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
380 void *i915_gem_object_alloc(struct drm_device *dev)
382 struct drm_i915_private *dev_priv = to_i915(dev);
383 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
386 void i915_gem_object_free(struct drm_i915_gem_object *obj)
388 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
389 kmem_cache_free(dev_priv->objects, obj);
393 i915_gem_create(struct drm_file *file,
394 struct drm_device *dev,
398 struct drm_i915_gem_object *obj;
402 size = roundup(size, PAGE_SIZE);
406 /* Allocate the new object */
407 obj = i915_gem_object_create(dev, size);
411 ret = drm_gem_handle_create(file, &obj->base, &handle);
412 /* drop reference from allocate - handle holds it now */
413 i915_gem_object_put_unlocked(obj);
422 i915_gem_dumb_create(struct drm_file *file,
423 struct drm_device *dev,
424 struct drm_mode_create_dumb *args)
426 /* have to work out size/pitch and return them */
427 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
428 args->size = args->pitch * args->height;
429 return i915_gem_create(file, dev,
430 args->size, &args->handle);
434 * Creates a new mm object and returns a handle to it.
435 * @dev: drm device pointer
436 * @data: ioctl data blob
437 * @file: drm file pointer
440 i915_gem_create_ioctl(struct drm_device *dev, void *data,
441 struct drm_file *file)
443 struct drm_i915_gem_create *args = data;
445 return i915_gem_create(file, dev,
446 args->size, &args->handle);
450 __copy_to_user_swizzled(char __user *cpu_vaddr,
451 const char *gpu_vaddr, int gpu_offset,
454 int ret, cpu_offset = 0;
457 int cacheline_end = ALIGN(gpu_offset + 1, 64);
458 int this_length = min(cacheline_end - gpu_offset, length);
459 int swizzled_gpu_offset = gpu_offset ^ 64;
461 ret = __copy_to_user(cpu_vaddr + cpu_offset,
462 gpu_vaddr + swizzled_gpu_offset,
467 cpu_offset += this_length;
468 gpu_offset += this_length;
469 length -= this_length;
476 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
477 const char __user *cpu_vaddr,
480 int ret, cpu_offset = 0;
483 int cacheline_end = ALIGN(gpu_offset + 1, 64);
484 int this_length = min(cacheline_end - gpu_offset, length);
485 int swizzled_gpu_offset = gpu_offset ^ 64;
487 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
488 cpu_vaddr + cpu_offset,
493 cpu_offset += this_length;
494 gpu_offset += this_length;
495 length -= this_length;
502 * Pins the specified object's pages and synchronizes the object with
503 * GPU accesses. Sets needs_clflush to non-zero if the caller should
504 * flush the object from the CPU cache.
506 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
513 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
516 ret = i915_gem_object_wait_rendering(obj, true);
520 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
521 /* If we're not in the cpu read domain, set ourself into the gtt
522 * read domain and manually flush cachelines (if required). This
523 * optimizes for the case when the gpu will dirty the data
524 * anyway again before the next pread happens. */
525 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
529 ret = i915_gem_object_get_pages(obj);
533 i915_gem_object_pin_pages(obj);
538 /* Per-page copy function for the shmem pread fastpath.
539 * Flushes invalid cachelines before reading the target if
540 * needs_clflush is set. */
542 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
543 char __user *user_data,
544 bool page_do_bit17_swizzling, bool needs_clflush)
549 if (unlikely(page_do_bit17_swizzling))
552 vaddr = kmap_atomic(page);
554 drm_clflush_virt_range(vaddr + shmem_page_offset,
556 ret = __copy_to_user_inatomic(user_data,
557 vaddr + shmem_page_offset,
559 kunmap_atomic(vaddr);
561 return ret ? -EFAULT : 0;
565 shmem_clflush_swizzled_range(char *addr, unsigned long length,
568 if (unlikely(swizzled)) {
569 unsigned long start = (unsigned long) addr;
570 unsigned long end = (unsigned long) addr + length;
572 /* For swizzling simply ensure that we always flush both
573 * channels. Lame, but simple and it works. Swizzled
574 * pwrite/pread is far from a hotpath - current userspace
575 * doesn't use it at all. */
576 start = round_down(start, 128);
577 end = round_up(end, 128);
579 drm_clflush_virt_range((void *)start, end - start);
581 drm_clflush_virt_range(addr, length);
586 /* Only difference to the fast-path function is that this can handle bit17
587 * and uses non-atomic copy and kmap functions. */
589 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
590 char __user *user_data,
591 bool page_do_bit17_swizzling, bool needs_clflush)
598 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
600 page_do_bit17_swizzling);
602 if (page_do_bit17_swizzling)
603 ret = __copy_to_user_swizzled(user_data,
604 vaddr, shmem_page_offset,
607 ret = __copy_to_user(user_data,
608 vaddr + shmem_page_offset,
612 return ret ? - EFAULT : 0;
615 static inline unsigned long
616 slow_user_access(struct io_mapping *mapping,
617 uint64_t page_base, int page_offset,
618 char __user *user_data,
619 unsigned long length, bool pwrite)
621 void __iomem *ioaddr;
625 ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
626 /* We can use the cpu mem copy function because this is X86. */
627 vaddr = (void __force *)ioaddr + page_offset;
629 unwritten = __copy_from_user(vaddr, user_data, length);
631 unwritten = __copy_to_user(user_data, vaddr, length);
633 io_mapping_unmap(ioaddr);
638 i915_gem_gtt_pread(struct drm_device *dev,
639 struct drm_i915_gem_object *obj, uint64_t size,
640 uint64_t data_offset, uint64_t data_ptr)
642 struct drm_i915_private *dev_priv = to_i915(dev);
643 struct i915_ggtt *ggtt = &dev_priv->ggtt;
644 struct drm_mm_node node;
645 char __user *user_data;
650 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
652 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
656 ret = i915_gem_object_get_pages(obj);
658 remove_mappable_node(&node);
662 i915_gem_object_pin_pages(obj);
664 node.start = i915_gem_obj_ggtt_offset(obj);
665 node.allocated = false;
666 ret = i915_gem_object_put_fence(obj);
671 ret = i915_gem_object_set_to_gtt_domain(obj, false);
675 user_data = u64_to_user_ptr(data_ptr);
677 offset = data_offset;
679 mutex_unlock(&dev->struct_mutex);
680 if (likely(!i915.prefault_disable)) {
681 ret = fault_in_multipages_writeable(user_data, remain);
683 mutex_lock(&dev->struct_mutex);
689 /* Operation in this page
691 * page_base = page offset within aperture
692 * page_offset = offset within page
693 * page_length = bytes to copy for this page
695 u32 page_base = node.start;
696 unsigned page_offset = offset_in_page(offset);
697 unsigned page_length = PAGE_SIZE - page_offset;
698 page_length = remain < page_length ? remain : page_length;
699 if (node.allocated) {
701 ggtt->base.insert_page(&ggtt->base,
702 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
707 page_base += offset & PAGE_MASK;
709 /* This is a slow read/write as it tries to read from
710 * and write to user memory which may result into page
711 * faults, and so we cannot perform this under struct_mutex.
713 if (slow_user_access(ggtt->mappable, page_base,
714 page_offset, user_data,
715 page_length, false)) {
720 remain -= page_length;
721 user_data += page_length;
722 offset += page_length;
725 mutex_lock(&dev->struct_mutex);
726 if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
727 /* The user has modified the object whilst we tried
728 * reading from it, and we now have no idea what domain
729 * the pages should be in. As we have just been touching
730 * them directly, flush everything back to the GTT
733 ret = i915_gem_object_set_to_gtt_domain(obj, false);
737 if (node.allocated) {
739 ggtt->base.clear_range(&ggtt->base,
740 node.start, node.size,
742 i915_gem_object_unpin_pages(obj);
743 remove_mappable_node(&node);
745 i915_gem_object_ggtt_unpin(obj);
752 i915_gem_shmem_pread(struct drm_device *dev,
753 struct drm_i915_gem_object *obj,
754 struct drm_i915_gem_pread *args,
755 struct drm_file *file)
757 char __user *user_data;
760 int shmem_page_offset, page_length, ret = 0;
761 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
763 int needs_clflush = 0;
764 struct sg_page_iter sg_iter;
766 if (!i915_gem_object_has_struct_page(obj))
769 user_data = u64_to_user_ptr(args->data_ptr);
772 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
774 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
778 offset = args->offset;
780 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
781 offset >> PAGE_SHIFT) {
782 struct page *page = sg_page_iter_page(&sg_iter);
787 /* Operation in this page
789 * shmem_page_offset = offset within page in shmem file
790 * page_length = bytes to copy for this page
792 shmem_page_offset = offset_in_page(offset);
793 page_length = remain;
794 if ((shmem_page_offset + page_length) > PAGE_SIZE)
795 page_length = PAGE_SIZE - shmem_page_offset;
797 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
798 (page_to_phys(page) & (1 << 17)) != 0;
800 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
801 user_data, page_do_bit17_swizzling,
806 mutex_unlock(&dev->struct_mutex);
808 if (likely(!i915.prefault_disable) && !prefaulted) {
809 ret = fault_in_multipages_writeable(user_data, remain);
810 /* Userspace is tricking us, but we've already clobbered
811 * its pages with the prefault and promised to write the
812 * data up to the first fault. Hence ignore any errors
813 * and just continue. */
818 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
819 user_data, page_do_bit17_swizzling,
822 mutex_lock(&dev->struct_mutex);
828 remain -= page_length;
829 user_data += page_length;
830 offset += page_length;
834 i915_gem_object_unpin_pages(obj);
840 * Reads data from the object referenced by handle.
841 * @dev: drm device pointer
842 * @data: ioctl data blob
843 * @file: drm file pointer
845 * On error, the contents of *data are undefined.
848 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
849 struct drm_file *file)
851 struct drm_i915_gem_pread *args = data;
852 struct drm_i915_gem_object *obj;
858 if (!access_ok(VERIFY_WRITE,
859 u64_to_user_ptr(args->data_ptr),
863 ret = i915_mutex_lock_interruptible(dev);
867 obj = i915_gem_object_lookup(file, args->handle);
873 /* Bounds check source. */
874 if (args->offset > obj->base.size ||
875 args->size > obj->base.size - args->offset) {
880 trace_i915_gem_object_pread(obj, args->offset, args->size);
882 ret = i915_gem_shmem_pread(dev, obj, args, file);
884 /* pread for non shmem backed objects */
885 if (ret == -EFAULT || ret == -ENODEV)
886 ret = i915_gem_gtt_pread(dev, obj, args->size,
887 args->offset, args->data_ptr);
890 i915_gem_object_put(obj);
892 mutex_unlock(&dev->struct_mutex);
896 /* This is the fast write path which cannot handle
897 * page faults in the source data
901 fast_user_write(struct io_mapping *mapping,
902 loff_t page_base, int page_offset,
903 char __user *user_data,
906 void __iomem *vaddr_atomic;
908 unsigned long unwritten;
910 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
911 /* We can use the cpu mem copy function because this is X86. */
912 vaddr = (void __force*)vaddr_atomic + page_offset;
913 unwritten = __copy_from_user_inatomic_nocache(vaddr,
915 io_mapping_unmap_atomic(vaddr_atomic);
920 * This is the fast pwrite path, where we copy the data directly from the
921 * user into the GTT, uncached.
922 * @i915: i915 device private data
923 * @obj: i915 gem object
924 * @args: pwrite arguments structure
925 * @file: drm file pointer
928 i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
929 struct drm_i915_gem_object *obj,
930 struct drm_i915_gem_pwrite *args,
931 struct drm_file *file)
933 struct i915_ggtt *ggtt = &i915->ggtt;
934 struct drm_device *dev = obj->base.dev;
935 struct drm_mm_node node;
936 uint64_t remain, offset;
937 char __user *user_data;
939 bool hit_slow_path = false;
941 if (obj->tiling_mode != I915_TILING_NONE)
944 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
946 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
950 ret = i915_gem_object_get_pages(obj);
952 remove_mappable_node(&node);
956 i915_gem_object_pin_pages(obj);
958 node.start = i915_gem_obj_ggtt_offset(obj);
959 node.allocated = false;
960 ret = i915_gem_object_put_fence(obj);
965 ret = i915_gem_object_set_to_gtt_domain(obj, true);
969 intel_fb_obj_invalidate(obj, ORIGIN_GTT);
972 user_data = u64_to_user_ptr(args->data_ptr);
973 offset = args->offset;
976 /* Operation in this page
978 * page_base = page offset within aperture
979 * page_offset = offset within page
980 * page_length = bytes to copy for this page
982 u32 page_base = node.start;
983 unsigned page_offset = offset_in_page(offset);
984 unsigned page_length = PAGE_SIZE - page_offset;
985 page_length = remain < page_length ? remain : page_length;
986 if (node.allocated) {
987 wmb(); /* flush the write before we modify the GGTT */
988 ggtt->base.insert_page(&ggtt->base,
989 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
990 node.start, I915_CACHE_NONE, 0);
991 wmb(); /* flush modifications to the GGTT (insert_page) */
993 page_base += offset & PAGE_MASK;
995 /* If we get a fault while copying data, then (presumably) our
996 * source page isn't available. Return the error and we'll
997 * retry in the slow path.
998 * If the object is non-shmem backed, we retry again with the
999 * path that handles page fault.
1001 if (fast_user_write(ggtt->mappable, page_base,
1002 page_offset, user_data, page_length)) {
1003 hit_slow_path = true;
1004 mutex_unlock(&dev->struct_mutex);
1005 if (slow_user_access(ggtt->mappable,
1007 page_offset, user_data,
1008 page_length, true)) {
1010 mutex_lock(&dev->struct_mutex);
1014 mutex_lock(&dev->struct_mutex);
1017 remain -= page_length;
1018 user_data += page_length;
1019 offset += page_length;
1023 if (hit_slow_path) {
1025 (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1026 /* The user has modified the object whilst we tried
1027 * reading from it, and we now have no idea what domain
1028 * the pages should be in. As we have just been touching
1029 * them directly, flush everything back to the GTT
1032 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1036 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
1038 if (node.allocated) {
1040 ggtt->base.clear_range(&ggtt->base,
1041 node.start, node.size,
1043 i915_gem_object_unpin_pages(obj);
1044 remove_mappable_node(&node);
1046 i915_gem_object_ggtt_unpin(obj);
1052 /* Per-page copy function for the shmem pwrite fastpath.
1053 * Flushes invalid cachelines before writing to the target if
1054 * needs_clflush_before is set and flushes out any written cachelines after
1055 * writing if needs_clflush is set. */
1057 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1058 char __user *user_data,
1059 bool page_do_bit17_swizzling,
1060 bool needs_clflush_before,
1061 bool needs_clflush_after)
1066 if (unlikely(page_do_bit17_swizzling))
1069 vaddr = kmap_atomic(page);
1070 if (needs_clflush_before)
1071 drm_clflush_virt_range(vaddr + shmem_page_offset,
1073 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1074 user_data, page_length);
1075 if (needs_clflush_after)
1076 drm_clflush_virt_range(vaddr + shmem_page_offset,
1078 kunmap_atomic(vaddr);
1080 return ret ? -EFAULT : 0;
1083 /* Only difference to the fast-path function is that this can handle bit17
1084 * and uses non-atomic copy and kmap functions. */
1086 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1087 char __user *user_data,
1088 bool page_do_bit17_swizzling,
1089 bool needs_clflush_before,
1090 bool needs_clflush_after)
1096 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1097 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1099 page_do_bit17_swizzling);
1100 if (page_do_bit17_swizzling)
1101 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
1105 ret = __copy_from_user(vaddr + shmem_page_offset,
1108 if (needs_clflush_after)
1109 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1111 page_do_bit17_swizzling);
1114 return ret ? -EFAULT : 0;
1118 i915_gem_shmem_pwrite(struct drm_device *dev,
1119 struct drm_i915_gem_object *obj,
1120 struct drm_i915_gem_pwrite *args,
1121 struct drm_file *file)
1125 char __user *user_data;
1126 int shmem_page_offset, page_length, ret = 0;
1127 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
1128 int hit_slowpath = 0;
1129 int needs_clflush_after = 0;
1130 int needs_clflush_before = 0;
1131 struct sg_page_iter sg_iter;
1133 user_data = u64_to_user_ptr(args->data_ptr);
1134 remain = args->size;
1136 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
1138 ret = i915_gem_object_wait_rendering(obj, false);
1142 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1143 /* If we're not in the cpu write domain, set ourself into the gtt
1144 * write domain and manually flush cachelines (if required). This
1145 * optimizes for the case when the gpu will use the data
1146 * right away and we therefore have to clflush anyway. */
1147 needs_clflush_after = cpu_write_needs_clflush(obj);
1149 /* Same trick applies to invalidate partially written cachelines read
1150 * before writing. */
1151 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
1152 needs_clflush_before =
1153 !cpu_cache_is_coherent(dev, obj->cache_level);
1155 ret = i915_gem_object_get_pages(obj);
1159 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1161 i915_gem_object_pin_pages(obj);
1163 offset = args->offset;
1166 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1167 offset >> PAGE_SHIFT) {
1168 struct page *page = sg_page_iter_page(&sg_iter);
1169 int partial_cacheline_write;
1174 /* Operation in this page
1176 * shmem_page_offset = offset within page in shmem file
1177 * page_length = bytes to copy for this page
1179 shmem_page_offset = offset_in_page(offset);
1181 page_length = remain;
1182 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1183 page_length = PAGE_SIZE - shmem_page_offset;
1185 /* If we don't overwrite a cacheline completely we need to be
1186 * careful to have up-to-date data by first clflushing. Don't
1187 * overcomplicate things and flush the entire patch. */
1188 partial_cacheline_write = needs_clflush_before &&
1189 ((shmem_page_offset | page_length)
1190 & (boot_cpu_data.x86_clflush_size - 1));
1192 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1193 (page_to_phys(page) & (1 << 17)) != 0;
1195 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1196 user_data, page_do_bit17_swizzling,
1197 partial_cacheline_write,
1198 needs_clflush_after);
1203 mutex_unlock(&dev->struct_mutex);
1204 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1205 user_data, page_do_bit17_swizzling,
1206 partial_cacheline_write,
1207 needs_clflush_after);
1209 mutex_lock(&dev->struct_mutex);
1215 remain -= page_length;
1216 user_data += page_length;
1217 offset += page_length;
1221 i915_gem_object_unpin_pages(obj);
1225 * Fixup: Flush cpu caches in case we didn't flush the dirty
1226 * cachelines in-line while writing and the object moved
1227 * out of the cpu write domain while we've dropped the lock.
1229 if (!needs_clflush_after &&
1230 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1231 if (i915_gem_clflush_object(obj, obj->pin_display))
1232 needs_clflush_after = true;
1236 if (needs_clflush_after)
1237 i915_gem_chipset_flush(to_i915(dev));
1239 obj->cache_dirty = true;
1241 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1246 * Writes data to the object referenced by handle.
1248 * @data: ioctl data blob
1251 * On error, the contents of the buffer that were to be modified are undefined.
1254 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1255 struct drm_file *file)
1257 struct drm_i915_private *dev_priv = to_i915(dev);
1258 struct drm_i915_gem_pwrite *args = data;
1259 struct drm_i915_gem_object *obj;
1262 if (args->size == 0)
1265 if (!access_ok(VERIFY_READ,
1266 u64_to_user_ptr(args->data_ptr),
1270 if (likely(!i915.prefault_disable)) {
1271 ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
1277 intel_runtime_pm_get(dev_priv);
1279 ret = i915_mutex_lock_interruptible(dev);
1283 obj = i915_gem_object_lookup(file, args->handle);
1289 /* Bounds check destination. */
1290 if (args->offset > obj->base.size ||
1291 args->size > obj->base.size - args->offset) {
1296 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1299 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1300 * it would end up going through the fenced access, and we'll get
1301 * different detiling behavior between reading and writing.
1302 * pread/pwrite currently are reading and writing from the CPU
1303 * perspective, requiring manual detiling by the client.
1305 if (!i915_gem_object_has_struct_page(obj) ||
1306 cpu_write_needs_clflush(obj)) {
1307 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1308 /* Note that the gtt paths might fail with non-page-backed user
1309 * pointers (e.g. gtt mappings when moving data between
1310 * textures). Fallback to the shmem path in that case. */
1313 if (ret == -EFAULT || ret == -ENOSPC) {
1314 if (obj->phys_handle)
1315 ret = i915_gem_phys_pwrite(obj, args, file);
1316 else if (i915_gem_object_has_struct_page(obj))
1317 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1323 i915_gem_object_put(obj);
1325 mutex_unlock(&dev->struct_mutex);
1327 intel_runtime_pm_put(dev_priv);
1333 * Ensures that all rendering to the object has completed and the object is
1334 * safe to unbind from the GTT or access from the CPU.
1335 * @obj: i915 gem object
1336 * @readonly: waiting for read access or write
1339 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1342 struct reservation_object *resv;
1346 if (obj->last_write_req != NULL) {
1347 ret = i915_wait_request(obj->last_write_req);
1351 i = obj->last_write_req->engine->id;
1352 if (obj->last_read_req[i] == obj->last_write_req)
1353 i915_gem_object_retire__read(obj, i);
1355 i915_gem_object_retire__write(obj);
1358 for (i = 0; i < I915_NUM_ENGINES; i++) {
1359 if (obj->last_read_req[i] == NULL)
1362 ret = i915_wait_request(obj->last_read_req[i]);
1366 i915_gem_object_retire__read(obj, i);
1368 GEM_BUG_ON(obj->active);
1371 resv = i915_gem_object_get_dmabuf_resv(obj);
1375 err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
1376 MAX_SCHEDULE_TIMEOUT);
1385 i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
1386 struct drm_i915_gem_request *req)
1388 int ring = req->engine->id;
1390 if (obj->last_read_req[ring] == req)
1391 i915_gem_object_retire__read(obj, ring);
1392 else if (obj->last_write_req == req)
1393 i915_gem_object_retire__write(obj);
1395 if (!i915_reset_in_progress(&req->i915->gpu_error))
1396 i915_gem_request_retire_upto(req);
1399 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1400 * as the object state may change during this call.
1402 static __must_check int
1403 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1404 struct intel_rps_client *rps,
1407 struct drm_device *dev = obj->base.dev;
1408 struct drm_i915_private *dev_priv = to_i915(dev);
1409 struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
1412 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1413 BUG_ON(!dev_priv->mm.interruptible);
1419 struct drm_i915_gem_request *req;
1421 req = obj->last_write_req;
1425 requests[n++] = i915_gem_request_get(req);
1427 for (i = 0; i < I915_NUM_ENGINES; i++) {
1428 struct drm_i915_gem_request *req;
1430 req = obj->last_read_req[i];
1434 requests[n++] = i915_gem_request_get(req);
1438 mutex_unlock(&dev->struct_mutex);
1440 for (i = 0; ret == 0 && i < n; i++)
1441 ret = __i915_wait_request(requests[i], true, NULL, rps);
1442 mutex_lock(&dev->struct_mutex);
1444 for (i = 0; i < n; i++) {
1446 i915_gem_object_retire_request(obj, requests[i]);
1447 i915_gem_request_put(requests[i]);
1453 static struct intel_rps_client *to_rps_client(struct drm_file *file)
1455 struct drm_i915_file_private *fpriv = file->driver_priv;
1459 static enum fb_op_origin
1460 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1462 return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
1463 ORIGIN_GTT : ORIGIN_CPU;
1467 * Called when user space prepares to use an object with the CPU, either
1468 * through the mmap ioctl's mapping or a GTT mapping.
1470 * @data: ioctl data blob
1474 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1475 struct drm_file *file)
1477 struct drm_i915_gem_set_domain *args = data;
1478 struct drm_i915_gem_object *obj;
1479 uint32_t read_domains = args->read_domains;
1480 uint32_t write_domain = args->write_domain;
1483 /* Only handle setting domains to types used by the CPU. */
1484 if (write_domain & I915_GEM_GPU_DOMAINS)
1487 if (read_domains & I915_GEM_GPU_DOMAINS)
1490 /* Having something in the write domain implies it's in the read
1491 * domain, and only that read domain. Enforce that in the request.
1493 if (write_domain != 0 && read_domains != write_domain)
1496 ret = i915_mutex_lock_interruptible(dev);
1500 obj = i915_gem_object_lookup(file, args->handle);
1506 /* Try to flush the object off the GPU without holding the lock.
1507 * We will repeat the flush holding the lock in the normal manner
1508 * to catch cases where we are gazumped.
1510 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1511 to_rps_client(file),
1516 if (read_domains & I915_GEM_DOMAIN_GTT)
1517 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1519 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1521 if (write_domain != 0)
1522 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1525 i915_gem_object_put(obj);
1527 mutex_unlock(&dev->struct_mutex);
1532 * Called when user space has done writes to this buffer
1534 * @data: ioctl data blob
1538 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1539 struct drm_file *file)
1541 struct drm_i915_gem_sw_finish *args = data;
1542 struct drm_i915_gem_object *obj;
1545 ret = i915_mutex_lock_interruptible(dev);
1549 obj = i915_gem_object_lookup(file, args->handle);
1555 /* Pinned buffers may be scanout, so flush the cache */
1556 if (obj->pin_display)
1557 i915_gem_object_flush_cpu_write_domain(obj);
1559 i915_gem_object_put(obj);
1561 mutex_unlock(&dev->struct_mutex);
1566 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1569 * @data: ioctl data blob
1572 * While the mapping holds a reference on the contents of the object, it doesn't
1573 * imply a ref on the object itself.
1577 * DRM driver writers who look a this function as an example for how to do GEM
1578 * mmap support, please don't implement mmap support like here. The modern way
1579 * to implement DRM mmap support is with an mmap offset ioctl (like
1580 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1581 * That way debug tooling like valgrind will understand what's going on, hiding
1582 * the mmap call in a driver private ioctl will break that. The i915 driver only
1583 * does cpu mmaps this way because we didn't know better.
1586 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1587 struct drm_file *file)
1589 struct drm_i915_gem_mmap *args = data;
1590 struct drm_i915_gem_object *obj;
1593 if (args->flags & ~(I915_MMAP_WC))
1596 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1599 obj = i915_gem_object_lookup(file, args->handle);
1603 /* prime objects have no backing filp to GEM mmap
1606 if (!obj->base.filp) {
1607 i915_gem_object_put_unlocked(obj);
1611 addr = vm_mmap(obj->base.filp, 0, args->size,
1612 PROT_READ | PROT_WRITE, MAP_SHARED,
1614 if (args->flags & I915_MMAP_WC) {
1615 struct mm_struct *mm = current->mm;
1616 struct vm_area_struct *vma;
1618 if (down_write_killable(&mm->mmap_sem)) {
1619 i915_gem_object_put_unlocked(obj);
1622 vma = find_vma(mm, addr);
1625 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1628 up_write(&mm->mmap_sem);
1630 /* This may race, but that's ok, it only gets set */
1631 WRITE_ONCE(obj->has_wc_mmap, true);
1633 i915_gem_object_put_unlocked(obj);
1634 if (IS_ERR((void *)addr))
1637 args->addr_ptr = (uint64_t) addr;
1643 * i915_gem_fault - fault a page into the GTT
1644 * @vma: VMA in question
1647 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1648 * from userspace. The fault handler takes care of binding the object to
1649 * the GTT (if needed), allocating and programming a fence register (again,
1650 * only if needed based on whether the old reg is still valid or the object
1651 * is tiled) and inserting a new PTE into the faulting process.
1653 * Note that the faulting process may involve evicting existing objects
1654 * from the GTT and/or fence registers to make room. So performance may
1655 * suffer if the GTT working set is large or there are few fence registers
1658 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1660 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1661 struct drm_device *dev = obj->base.dev;
1662 struct drm_i915_private *dev_priv = to_i915(dev);
1663 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1664 struct i915_ggtt_view view = i915_ggtt_view_normal;
1665 pgoff_t page_offset;
1668 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1670 intel_runtime_pm_get(dev_priv);
1672 /* We don't use vmf->pgoff since that has the fake offset */
1673 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1676 ret = i915_mutex_lock_interruptible(dev);
1680 trace_i915_gem_object_fault(obj, page_offset, true, write);
1682 /* Try to flush the object off the GPU first without holding the lock.
1683 * Upon reacquiring the lock, we will perform our sanity checks and then
1684 * repeat the flush holding the lock in the normal manner to catch cases
1685 * where we are gazumped.
1687 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1691 /* Access to snoopable pages through the GTT is incoherent. */
1692 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1697 /* Use a partial view if the object is bigger than the aperture. */
1698 if (obj->base.size >= ggtt->mappable_end &&
1699 obj->tiling_mode == I915_TILING_NONE) {
1700 static const unsigned int chunk_size = 256; // 1 MiB
1702 memset(&view, 0, sizeof(view));
1703 view.type = I915_GGTT_VIEW_PARTIAL;
1704 view.params.partial.offset = rounddown(page_offset, chunk_size);
1705 view.params.partial.size =
1708 (vma->vm_end - vma->vm_start)/PAGE_SIZE -
1709 view.params.partial.offset);
1712 /* Now pin it into the GTT if needed */
1713 ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
1717 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1721 ret = i915_gem_object_get_fence(obj);
1725 /* Finally, remap it using the new GTT offset */
1726 pfn = ggtt->mappable_base +
1727 i915_gem_obj_ggtt_offset_view(obj, &view);
1730 if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
1731 /* Overriding existing pages in partial view does not cause
1732 * us any trouble as TLBs are still valid because the fault
1733 * is due to userspace losing part of the mapping or never
1734 * having accessed it before (at this partials' range).
1736 unsigned long base = vma->vm_start +
1737 (view.params.partial.offset << PAGE_SHIFT);
1740 for (i = 0; i < view.params.partial.size; i++) {
1741 ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
1746 obj->fault_mappable = true;
1748 if (!obj->fault_mappable) {
1749 unsigned long size = min_t(unsigned long,
1750 vma->vm_end - vma->vm_start,
1754 for (i = 0; i < size >> PAGE_SHIFT; i++) {
1755 ret = vm_insert_pfn(vma,
1756 (unsigned long)vma->vm_start + i * PAGE_SIZE,
1762 obj->fault_mappable = true;
1764 ret = vm_insert_pfn(vma,
1765 (unsigned long)vmf->virtual_address,
1769 i915_gem_object_ggtt_unpin_view(obj, &view);
1771 mutex_unlock(&dev->struct_mutex);
1776 * We eat errors when the gpu is terminally wedged to avoid
1777 * userspace unduly crashing (gl has no provisions for mmaps to
1778 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1779 * and so needs to be reported.
1781 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1782 ret = VM_FAULT_SIGBUS;
1787 * EAGAIN means the gpu is hung and we'll wait for the error
1788 * handler to reset everything when re-faulting in
1789 * i915_mutex_lock_interruptible.
1796 * EBUSY is ok: this just means that another thread
1797 * already did the job.
1799 ret = VM_FAULT_NOPAGE;
1806 ret = VM_FAULT_SIGBUS;
1809 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1810 ret = VM_FAULT_SIGBUS;
1814 intel_runtime_pm_put(dev_priv);
1819 * i915_gem_release_mmap - remove physical page mappings
1820 * @obj: obj in question
1822 * Preserve the reservation of the mmapping with the DRM core code, but
1823 * relinquish ownership of the pages back to the system.
1825 * It is vital that we remove the page mapping if we have mapped a tiled
1826 * object through the GTT and then lose the fence register due to
1827 * resource pressure. Similarly if the object has been moved out of the
1828 * aperture, than pages mapped into userspace must be revoked. Removing the
1829 * mapping will then trigger a page fault on the next user access, allowing
1830 * fixup by i915_gem_fault().
1833 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1835 /* Serialisation between user GTT access and our code depends upon
1836 * revoking the CPU's PTE whilst the mutex is held. The next user
1837 * pagefault then has to wait until we release the mutex.
1839 lockdep_assert_held(&obj->base.dev->struct_mutex);
1841 if (!obj->fault_mappable)
1844 drm_vma_node_unmap(&obj->base.vma_node,
1845 obj->base.dev->anon_inode->i_mapping);
1847 /* Ensure that the CPU's PTE are revoked and there are not outstanding
1848 * memory transactions from userspace before we return. The TLB
1849 * flushing implied above by changing the PTE above *should* be
1850 * sufficient, an extra barrier here just provides us with a bit
1851 * of paranoid documentation about our requirement to serialise
1852 * memory writes before touching registers / GSM.
1856 obj->fault_mappable = false;
1860 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1862 struct drm_i915_gem_object *obj;
1864 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1865 i915_gem_release_mmap(obj);
1869 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1873 if (INTEL_INFO(dev)->gen >= 4 ||
1874 tiling_mode == I915_TILING_NONE)
1877 /* Previous chips need a power-of-two fence region when tiling */
1879 gtt_size = 1024*1024;
1881 gtt_size = 512*1024;
1883 while (gtt_size < size)
1890 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1892 * @size: object size
1893 * @tiling_mode: tiling mode
1894 * @fenced: is fenced alignemned required or not
1896 * Return the required GTT alignment for an object, taking into account
1897 * potential fence register mapping.
1900 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1901 int tiling_mode, bool fenced)
1904 * Minimum alignment is 4k (GTT page size), but might be greater
1905 * if a fence register is needed for the object.
1907 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1908 tiling_mode == I915_TILING_NONE)
1912 * Previous chips need to be aligned to the size of the smallest
1913 * fence register that can contain the object.
1915 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1918 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1920 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1923 dev_priv->mm.shrinker_no_lock_stealing = true;
1925 ret = drm_gem_create_mmap_offset(&obj->base);
1929 /* Badly fragmented mmap space? The only way we can recover
1930 * space is by destroying unwanted objects. We can't randomly release
1931 * mmap_offsets as userspace expects them to be persistent for the
1932 * lifetime of the objects. The closest we can is to release the
1933 * offsets on purgeable objects by truncating it and marking it purged,
1934 * which prevents userspace from ever using that object again.
1936 i915_gem_shrink(dev_priv,
1937 obj->base.size >> PAGE_SHIFT,
1939 I915_SHRINK_UNBOUND |
1940 I915_SHRINK_PURGEABLE);
1941 ret = drm_gem_create_mmap_offset(&obj->base);
1945 i915_gem_shrink_all(dev_priv);
1946 ret = drm_gem_create_mmap_offset(&obj->base);
1948 dev_priv->mm.shrinker_no_lock_stealing = false;
1953 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1955 drm_gem_free_mmap_offset(&obj->base);
1959 i915_gem_mmap_gtt(struct drm_file *file,
1960 struct drm_device *dev,
1964 struct drm_i915_gem_object *obj;
1967 ret = i915_mutex_lock_interruptible(dev);
1971 obj = i915_gem_object_lookup(file, handle);
1977 if (obj->madv != I915_MADV_WILLNEED) {
1978 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1983 ret = i915_gem_object_create_mmap_offset(obj);
1987 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1990 i915_gem_object_put(obj);
1992 mutex_unlock(&dev->struct_mutex);
1997 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1999 * @data: GTT mapping ioctl data
2000 * @file: GEM object info
2002 * Simply returns the fake offset to userspace so it can mmap it.
2003 * The mmap call will end up in drm_gem_mmap(), which will set things
2004 * up so we can get faults in the handler above.
2006 * The fault handler will take care of binding the object into the GTT
2007 * (since it may have been evicted to make room for something), allocating
2008 * a fence register, and mapping the appropriate aperture address into
2012 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2013 struct drm_file *file)
2015 struct drm_i915_gem_mmap_gtt *args = data;
2017 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2020 /* Immediately discard the backing storage */
2022 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2024 i915_gem_object_free_mmap_offset(obj);
2026 if (obj->base.filp == NULL)
2029 /* Our goal here is to return as much of the memory as
2030 * is possible back to the system as we are called from OOM.
2031 * To do this we must instruct the shmfs to drop all of its
2032 * backing pages, *now*.
2034 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2035 obj->madv = __I915_MADV_PURGED;
2038 /* Try to discard unwanted pages */
2040 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2042 struct address_space *mapping;
2044 switch (obj->madv) {
2045 case I915_MADV_DONTNEED:
2046 i915_gem_object_truncate(obj);
2047 case __I915_MADV_PURGED:
2051 if (obj->base.filp == NULL)
2054 mapping = file_inode(obj->base.filp)->i_mapping,
2055 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2059 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2061 struct sgt_iter sgt_iter;
2065 BUG_ON(obj->madv == __I915_MADV_PURGED);
2067 ret = i915_gem_object_set_to_cpu_domain(obj, true);
2069 /* In the event of a disaster, abandon all caches and
2070 * hope for the best.
2072 i915_gem_clflush_object(obj, true);
2073 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2076 i915_gem_gtt_finish_object(obj);
2078 if (i915_gem_object_needs_bit17_swizzle(obj))
2079 i915_gem_object_save_bit_17_swizzle(obj);
2081 if (obj->madv == I915_MADV_DONTNEED)
2084 for_each_sgt_page(page, sgt_iter, obj->pages) {
2086 set_page_dirty(page);
2088 if (obj->madv == I915_MADV_WILLNEED)
2089 mark_page_accessed(page);
2095 sg_free_table(obj->pages);
2100 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2102 const struct drm_i915_gem_object_ops *ops = obj->ops;
2104 if (obj->pages == NULL)
2107 if (obj->pages_pin_count)
2110 BUG_ON(i915_gem_obj_bound_any(obj));
2112 /* ->put_pages might need to allocate memory for the bit17 swizzle
2113 * array, hence protect them from being reaped by removing them from gtt
2115 list_del(&obj->global_list);
2118 if (is_vmalloc_addr(obj->mapping))
2119 vunmap(obj->mapping);
2121 kunmap(kmap_to_page(obj->mapping));
2122 obj->mapping = NULL;
2125 ops->put_pages(obj);
2128 i915_gem_object_invalidate(obj);
2134 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2136 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2138 struct address_space *mapping;
2139 struct sg_table *st;
2140 struct scatterlist *sg;
2141 struct sgt_iter sgt_iter;
2143 unsigned long last_pfn = 0; /* suppress gcc warning */
2147 /* Assert that the object is not currently in any GPU domain. As it
2148 * wasn't in the GTT, there shouldn't be any way it could have been in
2151 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2152 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2154 st = kmalloc(sizeof(*st), GFP_KERNEL);
2158 page_count = obj->base.size / PAGE_SIZE;
2159 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2164 /* Get the list of pages out of our struct file. They'll be pinned
2165 * at this point until we release them.
2167 * Fail silently without starting the shrinker
2169 mapping = file_inode(obj->base.filp)->i_mapping;
2170 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2171 gfp |= __GFP_NORETRY | __GFP_NOWARN;
2174 for (i = 0; i < page_count; i++) {
2175 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2177 i915_gem_shrink(dev_priv,
2180 I915_SHRINK_UNBOUND |
2181 I915_SHRINK_PURGEABLE);
2182 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2185 /* We've tried hard to allocate the memory by reaping
2186 * our own buffer, now let the real VM do its job and
2187 * go down in flames if truly OOM.
2189 i915_gem_shrink_all(dev_priv);
2190 page = shmem_read_mapping_page(mapping, i);
2192 ret = PTR_ERR(page);
2196 #ifdef CONFIG_SWIOTLB
2197 if (swiotlb_nr_tbl()) {
2199 sg_set_page(sg, page, PAGE_SIZE, 0);
2204 if (!i || page_to_pfn(page) != last_pfn + 1) {
2208 sg_set_page(sg, page, PAGE_SIZE, 0);
2210 sg->length += PAGE_SIZE;
2212 last_pfn = page_to_pfn(page);
2214 /* Check that the i965g/gm workaround works. */
2215 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2217 #ifdef CONFIG_SWIOTLB
2218 if (!swiotlb_nr_tbl())
2223 ret = i915_gem_gtt_prepare_object(obj);
2227 if (i915_gem_object_needs_bit17_swizzle(obj))
2228 i915_gem_object_do_bit_17_swizzle(obj);
2230 if (obj->tiling_mode != I915_TILING_NONE &&
2231 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2232 i915_gem_object_pin_pages(obj);
2238 for_each_sgt_page(page, sgt_iter, st)
2243 /* shmemfs first checks if there is enough memory to allocate the page
2244 * and reports ENOSPC should there be insufficient, along with the usual
2245 * ENOMEM for a genuine allocation failure.
2247 * We use ENOSPC in our driver to mean that we have run out of aperture
2248 * space and so want to translate the error from shmemfs back to our
2249 * usual understanding of ENOMEM.
2257 /* Ensure that the associated pages are gathered from the backing storage
2258 * and pinned into our object. i915_gem_object_get_pages() may be called
2259 * multiple times before they are released by a single call to
2260 * i915_gem_object_put_pages() - once the pages are no longer referenced
2261 * either as a result of memory pressure (reaping pages under the shrinker)
2262 * or as the object is itself released.
2265 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2267 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2268 const struct drm_i915_gem_object_ops *ops = obj->ops;
2274 if (obj->madv != I915_MADV_WILLNEED) {
2275 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2279 BUG_ON(obj->pages_pin_count);
2281 ret = ops->get_pages(obj);
2285 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2287 obj->get_page.sg = obj->pages->sgl;
2288 obj->get_page.last = 0;
2293 /* The 'mapping' part of i915_gem_object_pin_map() below */
2294 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2296 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2297 struct sg_table *sgt = obj->pages;
2298 struct sgt_iter sgt_iter;
2300 struct page *stack_pages[32];
2301 struct page **pages = stack_pages;
2302 unsigned long i = 0;
2305 /* A single page can always be kmapped */
2307 return kmap(sg_page(sgt->sgl));
2309 if (n_pages > ARRAY_SIZE(stack_pages)) {
2310 /* Too big for stack -- allocate temporary array instead */
2311 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2316 for_each_sgt_page(page, sgt_iter, sgt)
2319 /* Check that we have the expected number of pages */
2320 GEM_BUG_ON(i != n_pages);
2322 addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
2324 if (pages != stack_pages)
2325 drm_free_large(pages);
2330 /* get, pin, and map the pages of the object into kernel space */
2331 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2335 lockdep_assert_held(&obj->base.dev->struct_mutex);
2337 ret = i915_gem_object_get_pages(obj);
2339 return ERR_PTR(ret);
2341 i915_gem_object_pin_pages(obj);
2343 if (!obj->mapping) {
2344 obj->mapping = i915_gem_object_map(obj);
2345 if (!obj->mapping) {
2346 i915_gem_object_unpin_pages(obj);
2347 return ERR_PTR(-ENOMEM);
2351 return obj->mapping;
2354 void i915_vma_move_to_active(struct i915_vma *vma,
2355 struct drm_i915_gem_request *req)
2357 struct drm_i915_gem_object *obj = vma->obj;
2358 struct intel_engine_cs *engine;
2360 engine = i915_gem_request_get_engine(req);
2362 /* Add a reference if we're newly entering the active list. */
2363 if (obj->active == 0)
2364 i915_gem_object_get(obj);
2365 obj->active |= intel_engine_flag(engine);
2367 list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
2368 i915_gem_request_assign(&obj->last_read_req[engine->id], req);
2370 list_move_tail(&vma->vm_link, &vma->vm->active_list);
2374 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
2376 GEM_BUG_ON(obj->last_write_req == NULL);
2377 GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
2379 i915_gem_request_assign(&obj->last_write_req, NULL);
2380 intel_fb_obj_flush(obj, true, ORIGIN_CS);
2384 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
2386 struct i915_vma *vma;
2388 GEM_BUG_ON(obj->last_read_req[ring] == NULL);
2389 GEM_BUG_ON(!(obj->active & (1 << ring)));
2391 list_del_init(&obj->engine_list[ring]);
2392 i915_gem_request_assign(&obj->last_read_req[ring], NULL);
2394 if (obj->last_write_req && obj->last_write_req->engine->id == ring)
2395 i915_gem_object_retire__write(obj);
2397 obj->active &= ~(1 << ring);
2401 /* Bump our place on the bound list to keep it roughly in LRU order
2402 * so that we don't steal from recently used but inactive objects
2403 * (unless we are forced to ofc!)
2405 list_move_tail(&obj->global_list,
2406 &to_i915(obj->base.dev)->mm.bound_list);
2408 list_for_each_entry(vma, &obj->vma_list, obj_link) {
2409 if (!list_empty(&vma->vm_link))
2410 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
2413 i915_gem_request_assign(&obj->last_fenced_req, NULL);
2414 i915_gem_object_put(obj);
2417 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2419 unsigned long elapsed;
2421 if (ctx->hang_stats.banned)
2424 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2425 if (ctx->hang_stats.ban_period_seconds &&
2426 elapsed <= ctx->hang_stats.ban_period_seconds) {
2427 DRM_DEBUG("context hanging too fast, banning!\n");
2434 static void i915_set_reset_status(struct i915_gem_context *ctx,
2437 struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2440 hs->banned = i915_context_is_banned(ctx);
2442 hs->guilty_ts = get_seconds();
2444 hs->batch_pending++;
2448 struct drm_i915_gem_request *
2449 i915_gem_find_active_request(struct intel_engine_cs *engine)
2451 struct drm_i915_gem_request *request;
2453 /* We are called by the error capture and reset at a random
2454 * point in time. In particular, note that neither is crucially
2455 * ordered with an interrupt. After a hang, the GPU is dead and we
2456 * assume that no more writes can happen (we waited long enough for
2457 * all writes that were in transaction to be flushed) - adding an
2458 * extra delay for a recent interrupt is pointless. Hence, we do
2459 * not need an engine->irq_seqno_barrier() before the seqno reads.
2461 list_for_each_entry(request, &engine->request_list, list) {
2462 if (i915_gem_request_completed(request))
2471 static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
2473 struct drm_i915_gem_request *request;
2476 request = i915_gem_find_active_request(engine);
2477 if (request == NULL)
2480 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2482 i915_set_reset_status(request->ctx, ring_hung);
2483 list_for_each_entry_continue(request, &engine->request_list, list)
2484 i915_set_reset_status(request->ctx, false);
2487 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
2489 struct intel_ringbuffer *buffer;
2491 while (!list_empty(&engine->active_list)) {
2492 struct drm_i915_gem_object *obj;
2494 obj = list_first_entry(&engine->active_list,
2495 struct drm_i915_gem_object,
2496 engine_list[engine->id]);
2498 i915_gem_object_retire__read(obj, engine->id);
2501 /* Mark all pending requests as complete so that any concurrent
2502 * (lockless) lookup doesn't try and wait upon the request as we
2505 intel_ring_init_seqno(engine, engine->last_submitted_seqno);
2508 * Clear the execlists queue up before freeing the requests, as those
2509 * are the ones that keep the context and ringbuffer backing objects
2513 if (i915.enable_execlists) {
2514 /* Ensure irq handler finishes or is cancelled. */
2515 tasklet_kill(&engine->irq_tasklet);
2517 intel_execlists_cancel_requests(engine);
2521 * We must free the requests after all the corresponding objects have
2522 * been moved off active lists. Which is the same order as the normal
2523 * retire_requests function does. This is important if object hold
2524 * implicit references on things like e.g. ppgtt address spaces through
2527 if (!list_empty(&engine->request_list)) {
2528 struct drm_i915_gem_request *request;
2530 request = list_last_entry(&engine->request_list,
2531 struct drm_i915_gem_request,
2534 i915_gem_request_retire_upto(request);
2537 /* Having flushed all requests from all queues, we know that all
2538 * ringbuffers must now be empty. However, since we do not reclaim
2539 * all space when retiring the request (to prevent HEADs colliding
2540 * with rapid ringbuffer wraparound) the amount of available space
2541 * upon reset is less than when we start. Do one more pass over
2542 * all the ringbuffers to reset last_retired_head.
2544 list_for_each_entry(buffer, &engine->buffers, link) {
2545 buffer->last_retired_head = buffer->tail;
2546 intel_ring_update_space(buffer);
2549 engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
2552 void i915_gem_reset(struct drm_device *dev)
2554 struct drm_i915_private *dev_priv = to_i915(dev);
2555 struct intel_engine_cs *engine;
2558 * Before we free the objects from the requests, we need to inspect
2559 * them for finding the guilty party. As the requests only borrow
2560 * their reference to the objects, the inspection must be done first.
2562 for_each_engine(engine, dev_priv)
2563 i915_gem_reset_engine_status(engine);
2565 for_each_engine(engine, dev_priv)
2566 i915_gem_reset_engine_cleanup(engine);
2567 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2569 i915_gem_context_reset(dev);
2571 i915_gem_restore_fences(dev);
2573 WARN_ON(i915_verify_lists(dev));
2577 * This function clears the request list as sequence numbers are passed.
2578 * @engine: engine to retire requests on
2581 i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
2583 WARN_ON(i915_verify_lists(engine->dev));
2585 /* Retire requests first as we use it above for the early return.
2586 * If we retire requests last, we may use a later seqno and so clear
2587 * the requests lists without clearing the active list, leading to
2590 while (!list_empty(&engine->request_list)) {
2591 struct drm_i915_gem_request *request;
2593 request = list_first_entry(&engine->request_list,
2594 struct drm_i915_gem_request,
2597 if (!i915_gem_request_completed(request))
2600 i915_gem_request_retire_upto(request);
2603 /* Move any buffers on the active list that are no longer referenced
2604 * by the ringbuffer to the flushing/inactive lists as appropriate,
2605 * before we free the context associated with the requests.
2607 while (!list_empty(&engine->active_list)) {
2608 struct drm_i915_gem_object *obj;
2610 obj = list_first_entry(&engine->active_list,
2611 struct drm_i915_gem_object,
2612 engine_list[engine->id]);
2614 if (!list_empty(&obj->last_read_req[engine->id]->list))
2617 i915_gem_object_retire__read(obj, engine->id);
2620 WARN_ON(i915_verify_lists(engine->dev));
2623 void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
2625 struct intel_engine_cs *engine;
2627 lockdep_assert_held(&dev_priv->drm.struct_mutex);
2629 if (dev_priv->gt.active_engines == 0)
2632 GEM_BUG_ON(!dev_priv->gt.awake);
2634 for_each_engine(engine, dev_priv) {
2635 i915_gem_retire_requests_ring(engine);
2636 if (list_empty(&engine->request_list))
2637 dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
2640 if (dev_priv->gt.active_engines == 0)
2641 queue_delayed_work(dev_priv->wq,
2642 &dev_priv->gt.idle_work,
2643 msecs_to_jiffies(100));
2647 i915_gem_retire_work_handler(struct work_struct *work)
2649 struct drm_i915_private *dev_priv =
2650 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2651 struct drm_device *dev = &dev_priv->drm;
2653 /* Come back later if the device is busy... */
2654 if (mutex_trylock(&dev->struct_mutex)) {
2655 i915_gem_retire_requests(dev_priv);
2656 mutex_unlock(&dev->struct_mutex);
2659 /* Keep the retire handler running until we are finally idle.
2660 * We do not need to do this test under locking as in the worst-case
2661 * we queue the retire worker once too often.
2663 if (READ_ONCE(dev_priv->gt.awake)) {
2664 i915_queue_hangcheck(dev_priv);
2665 queue_delayed_work(dev_priv->wq,
2666 &dev_priv->gt.retire_work,
2667 round_jiffies_up_relative(HZ));
2672 i915_gem_idle_work_handler(struct work_struct *work)
2674 struct drm_i915_private *dev_priv =
2675 container_of(work, typeof(*dev_priv), gt.idle_work.work);
2676 struct drm_device *dev = &dev_priv->drm;
2677 struct intel_engine_cs *engine;
2678 unsigned int stuck_engines;
2679 bool rearm_hangcheck;
2681 if (!READ_ONCE(dev_priv->gt.awake))
2684 if (READ_ONCE(dev_priv->gt.active_engines))
2688 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2690 if (!mutex_trylock(&dev->struct_mutex)) {
2691 /* Currently busy, come back later */
2692 mod_delayed_work(dev_priv->wq,
2693 &dev_priv->gt.idle_work,
2694 msecs_to_jiffies(50));
2698 if (dev_priv->gt.active_engines)
2701 for_each_engine(engine, dev_priv)
2702 i915_gem_batch_pool_fini(&engine->batch_pool);
2704 GEM_BUG_ON(!dev_priv->gt.awake);
2705 dev_priv->gt.awake = false;
2706 rearm_hangcheck = false;
2708 stuck_engines = intel_kick_waiters(dev_priv);
2709 if (unlikely(stuck_engines)) {
2710 DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n");
2711 dev_priv->gpu_error.missed_irq_rings |= stuck_engines;
2714 if (INTEL_GEN(dev_priv) >= 6)
2715 gen6_rps_idle(dev_priv);
2716 intel_runtime_pm_put(dev_priv);
2718 mutex_unlock(&dev->struct_mutex);
2721 if (rearm_hangcheck) {
2722 GEM_BUG_ON(!dev_priv->gt.awake);
2723 i915_queue_hangcheck(dev_priv);
2728 * Ensures that an object will eventually get non-busy by flushing any required
2729 * write domains, emitting any outstanding lazy request and retiring and
2730 * completed requests.
2731 * @obj: object to flush
2734 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2741 for (i = 0; i < I915_NUM_ENGINES; i++) {
2742 struct drm_i915_gem_request *req;
2744 req = obj->last_read_req[i];
2748 if (i915_gem_request_completed(req))
2749 i915_gem_object_retire__read(obj, i);
2756 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2757 * @dev: drm device pointer
2758 * @data: ioctl data blob
2759 * @file: drm file pointer
2761 * Returns 0 if successful, else an error is returned with the remaining time in
2762 * the timeout parameter.
2763 * -ETIME: object is still busy after timeout
2764 * -ERESTARTSYS: signal interrupted the wait
2765 * -ENONENT: object doesn't exist
2766 * Also possible, but rare:
2767 * -EAGAIN: GPU wedged
2769 * -ENODEV: Internal IRQ fail
2770 * -E?: The add request failed
2772 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2773 * non-zero timeout parameter the wait ioctl will wait for the given number of
2774 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2775 * without holding struct_mutex the object may become re-busied before this
2776 * function completes. A similar but shorter * race condition exists in the busy
2780 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2782 struct drm_i915_gem_wait *args = data;
2783 struct drm_i915_gem_object *obj;
2784 struct drm_i915_gem_request *req[I915_NUM_ENGINES];
2788 if (args->flags != 0)
2791 ret = i915_mutex_lock_interruptible(dev);
2795 obj = i915_gem_object_lookup(file, args->bo_handle);
2797 mutex_unlock(&dev->struct_mutex);
2801 /* Need to make sure the object gets inactive eventually. */
2802 ret = i915_gem_object_flush_active(obj);
2809 /* Do this after OLR check to make sure we make forward progress polling
2810 * on this IOCTL with a timeout == 0 (like busy ioctl)
2812 if (args->timeout_ns == 0) {
2817 i915_gem_object_put(obj);
2819 for (i = 0; i < I915_NUM_ENGINES; i++) {
2820 if (obj->last_read_req[i] == NULL)
2823 req[n++] = i915_gem_request_get(obj->last_read_req[i]);
2826 mutex_unlock(&dev->struct_mutex);
2828 for (i = 0; i < n; i++) {
2830 ret = __i915_wait_request(req[i], true,
2831 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
2832 to_rps_client(file));
2833 i915_gem_request_put(req[i]);
2838 i915_gem_object_put(obj);
2839 mutex_unlock(&dev->struct_mutex);
2844 __i915_gem_object_sync(struct drm_i915_gem_object *obj,
2845 struct intel_engine_cs *to,
2846 struct drm_i915_gem_request *from_req,
2847 struct drm_i915_gem_request **to_req)
2849 struct intel_engine_cs *from;
2852 from = i915_gem_request_get_engine(from_req);
2856 if (i915_gem_request_completed(from_req))
2859 if (!i915.semaphores) {
2860 struct drm_i915_private *i915 = to_i915(obj->base.dev);
2861 ret = __i915_wait_request(from_req,
2862 i915->mm.interruptible,
2868 i915_gem_object_retire_request(obj, from_req);
2870 int idx = intel_ring_sync_index(from, to);
2871 u32 seqno = i915_gem_request_get_seqno(from_req);
2875 if (seqno <= from->semaphore.sync_seqno[idx])
2878 if (*to_req == NULL) {
2879 struct drm_i915_gem_request *req;
2881 req = i915_gem_request_alloc(to, NULL);
2883 return PTR_ERR(req);
2888 trace_i915_gem_ring_sync_to(*to_req, from, from_req);
2889 ret = to->semaphore.sync_to(*to_req, from, seqno);
2893 /* We use last_read_req because sync_to()
2894 * might have just caused seqno wrap under
2897 from->semaphore.sync_seqno[idx] =
2898 i915_gem_request_get_seqno(obj->last_read_req[from->id]);
2905 * i915_gem_object_sync - sync an object to a ring.
2907 * @obj: object which may be in use on another ring.
2908 * @to: ring we wish to use the object on. May be NULL.
2909 * @to_req: request we wish to use the object for. See below.
2910 * This will be allocated and returned if a request is
2911 * required but not passed in.
2913 * This code is meant to abstract object synchronization with the GPU.
2914 * Calling with NULL implies synchronizing the object with the CPU
2915 * rather than a particular GPU ring. Conceptually we serialise writes
2916 * between engines inside the GPU. We only allow one engine to write
2917 * into a buffer at any time, but multiple readers. To ensure each has
2918 * a coherent view of memory, we must:
2920 * - If there is an outstanding write request to the object, the new
2921 * request must wait for it to complete (either CPU or in hw, requests
2922 * on the same ring will be naturally ordered).
2924 * - If we are a write request (pending_write_domain is set), the new
2925 * request must wait for outstanding read requests to complete.
2927 * For CPU synchronisation (NULL to) no request is required. For syncing with
2928 * rings to_req must be non-NULL. However, a request does not have to be
2929 * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
2930 * request will be allocated automatically and returned through *to_req. Note
2931 * that it is not guaranteed that commands will be emitted (because the system
2932 * might already be idle). Hence there is no need to create a request that
2933 * might never have any work submitted. Note further that if a request is
2934 * returned in *to_req, it is the responsibility of the caller to submit
2935 * that request (after potentially adding more work to it).
2937 * Returns 0 if successful, else propagates up the lower layer error.
2940 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2941 struct intel_engine_cs *to,
2942 struct drm_i915_gem_request **to_req)
2944 const bool readonly = obj->base.pending_write_domain == 0;
2945 struct drm_i915_gem_request *req[I915_NUM_ENGINES];
2952 return i915_gem_object_wait_rendering(obj, readonly);
2956 if (obj->last_write_req)
2957 req[n++] = obj->last_write_req;
2959 for (i = 0; i < I915_NUM_ENGINES; i++)
2960 if (obj->last_read_req[i])
2961 req[n++] = obj->last_read_req[i];
2963 for (i = 0; i < n; i++) {
2964 ret = __i915_gem_object_sync(obj, to, req[i], to_req);
2972 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2974 u32 old_write_domain, old_read_domains;
2976 /* Force a pagefault for domain tracking on next user access */
2977 i915_gem_release_mmap(obj);
2979 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2982 old_read_domains = obj->base.read_domains;
2983 old_write_domain = obj->base.write_domain;
2985 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2986 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2988 trace_i915_gem_object_change_domain(obj,
2993 static void __i915_vma_iounmap(struct i915_vma *vma)
2995 GEM_BUG_ON(vma->pin_count);
2997 if (vma->iomap == NULL)
3000 io_mapping_unmap(vma->iomap);
3004 static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3006 struct drm_i915_gem_object *obj = vma->obj;
3007 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3010 if (list_empty(&vma->obj_link))
3013 if (!drm_mm_node_allocated(&vma->node)) {
3014 i915_gem_vma_destroy(vma);
3021 BUG_ON(obj->pages == NULL);
3024 ret = i915_gem_object_wait_rendering(obj, false);
3029 if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3030 i915_gem_object_finish_gtt(obj);
3032 /* release the fence reg _after_ flushing */
3033 ret = i915_gem_object_put_fence(obj);
3037 __i915_vma_iounmap(vma);
3040 trace_i915_vma_unbind(vma);
3042 vma->vm->unbind_vma(vma);
3045 list_del_init(&vma->vm_link);
3047 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3048 obj->map_and_fenceable = false;
3049 } else if (vma->ggtt_view.pages) {
3050 sg_free_table(vma->ggtt_view.pages);
3051 kfree(vma->ggtt_view.pages);
3053 vma->ggtt_view.pages = NULL;
3056 drm_mm_remove_node(&vma->node);
3057 i915_gem_vma_destroy(vma);
3059 /* Since the unbound list is global, only move to that list if
3060 * no more VMAs exist. */
3061 if (list_empty(&obj->vma_list))
3062 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3064 /* And finally now the object is completely decoupled from this vma,
3065 * we can drop its hold on the backing storage and allow it to be
3066 * reaped by the shrinker.
3068 i915_gem_object_unpin_pages(obj);
3073 int i915_vma_unbind(struct i915_vma *vma)
3075 return __i915_vma_unbind(vma, true);
3078 int __i915_vma_unbind_no_wait(struct i915_vma *vma)
3080 return __i915_vma_unbind(vma, false);
3083 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
3085 struct intel_engine_cs *engine;
3088 lockdep_assert_held(&dev_priv->drm.struct_mutex);
3090 for_each_engine(engine, dev_priv) {
3091 if (engine->last_context == NULL)
3094 ret = intel_engine_idle(engine);
3099 WARN_ON(i915_verify_lists(dev));
3103 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3104 unsigned long cache_level)
3106 struct drm_mm_node *gtt_space = &vma->node;
3107 struct drm_mm_node *other;
3110 * On some machines we have to be careful when putting differing types
3111 * of snoopable memory together to avoid the prefetcher crossing memory
3112 * domains and dying. During vm initialisation, we decide whether or not
3113 * these constraints apply and set the drm_mm.color_adjust
3116 if (vma->vm->mm.color_adjust == NULL)
3119 if (!drm_mm_node_allocated(gtt_space))
3122 if (list_empty(>t_space->node_list))
3125 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3126 if (other->allocated && !other->hole_follows && other->color != cache_level)
3129 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3130 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3137 * Finds free space in the GTT aperture and binds the object or a view of it
3139 * @obj: object to bind
3140 * @vm: address space to bind into
3141 * @ggtt_view: global gtt view if applicable
3142 * @alignment: requested alignment
3143 * @flags: mask of PIN_* flags to use
3145 static struct i915_vma *
3146 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3147 struct i915_address_space *vm,
3148 const struct i915_ggtt_view *ggtt_view,
3152 struct drm_device *dev = obj->base.dev;
3153 struct drm_i915_private *dev_priv = to_i915(dev);
3154 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3155 u32 fence_alignment, unfenced_alignment;
3156 u32 search_flag, alloc_flag;
3158 u64 size, fence_size;
3159 struct i915_vma *vma;
3162 if (i915_is_ggtt(vm)) {
3165 if (WARN_ON(!ggtt_view))
3166 return ERR_PTR(-EINVAL);
3168 view_size = i915_ggtt_view_size(obj, ggtt_view);
3170 fence_size = i915_gem_get_gtt_size(dev,
3173 fence_alignment = i915_gem_get_gtt_alignment(dev,
3177 unfenced_alignment = i915_gem_get_gtt_alignment(dev,
3181 size = flags & PIN_MAPPABLE ? fence_size : view_size;
3183 fence_size = i915_gem_get_gtt_size(dev,
3186 fence_alignment = i915_gem_get_gtt_alignment(dev,
3190 unfenced_alignment =
3191 i915_gem_get_gtt_alignment(dev,
3195 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3198 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3200 if (flags & PIN_MAPPABLE)
3201 end = min_t(u64, end, ggtt->mappable_end);
3202 if (flags & PIN_ZONE_4G)
3203 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3206 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3208 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3209 DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
3210 ggtt_view ? ggtt_view->type : 0,
3212 return ERR_PTR(-EINVAL);
3215 /* If binding the object/GGTT view requires more space than the entire
3216 * aperture has, reject it early before evicting everything in a vain
3217 * attempt to find space.
3220 DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
3221 ggtt_view ? ggtt_view->type : 0,
3223 flags & PIN_MAPPABLE ? "mappable" : "total",
3225 return ERR_PTR(-E2BIG);
3228 ret = i915_gem_object_get_pages(obj);
3230 return ERR_PTR(ret);
3232 i915_gem_object_pin_pages(obj);
3234 vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
3235 i915_gem_obj_lookup_or_create_vma(obj, vm);
3240 if (flags & PIN_OFFSET_FIXED) {
3241 uint64_t offset = flags & PIN_OFFSET_MASK;
3243 if (offset & (alignment - 1) || offset + size > end) {
3247 vma->node.start = offset;
3248 vma->node.size = size;
3249 vma->node.color = obj->cache_level;
3250 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3252 ret = i915_gem_evict_for_vma(vma);
3254 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3259 if (flags & PIN_HIGH) {
3260 search_flag = DRM_MM_SEARCH_BELOW;
3261 alloc_flag = DRM_MM_CREATE_TOP;
3263 search_flag = DRM_MM_SEARCH_DEFAULT;
3264 alloc_flag = DRM_MM_CREATE_DEFAULT;
3268 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3275 ret = i915_gem_evict_something(dev, vm, size, alignment,
3285 if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3287 goto err_remove_node;
3290 trace_i915_vma_bind(vma, flags);
3291 ret = i915_vma_bind(vma, obj->cache_level, flags);
3293 goto err_remove_node;
3295 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3296 list_add_tail(&vma->vm_link, &vm->inactive_list);
3301 drm_mm_remove_node(&vma->node);
3303 i915_gem_vma_destroy(vma);
3306 i915_gem_object_unpin_pages(obj);
3311 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3314 /* If we don't have a page list set up, then we're not pinned
3315 * to GPU, and we can ignore the cache flush because it'll happen
3316 * again at bind time.
3318 if (obj->pages == NULL)
3322 * Stolen memory is always coherent with the GPU as it is explicitly
3323 * marked as wc by the system, or the system is cache-coherent.
3325 if (obj->stolen || obj->phys_handle)
3328 /* If the GPU is snooping the contents of the CPU cache,
3329 * we do not need to manually clear the CPU cache lines. However,
3330 * the caches are only snooped when the render cache is
3331 * flushed/invalidated. As we always have to emit invalidations
3332 * and flushes when moving into and out of the RENDER domain, correct
3333 * snooping behaviour occurs naturally as the result of our domain
3336 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3337 obj->cache_dirty = true;
3341 trace_i915_gem_object_clflush(obj);
3342 drm_clflush_sg(obj->pages);
3343 obj->cache_dirty = false;
3348 /** Flushes the GTT write domain for the object if it's dirty. */
3350 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3352 uint32_t old_write_domain;
3354 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3357 /* No actual flushing is required for the GTT write domain. Writes
3358 * to it immediately go to main memory as far as we know, so there's
3359 * no chipset flush. It also doesn't land in render cache.
3361 * However, we do have to enforce the order so that all writes through
3362 * the GTT land before any writes to the device, such as updates to
3367 old_write_domain = obj->base.write_domain;
3368 obj->base.write_domain = 0;
3370 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
3372 trace_i915_gem_object_change_domain(obj,
3373 obj->base.read_domains,
3377 /** Flushes the CPU write domain for the object if it's dirty. */
3379 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3381 uint32_t old_write_domain;
3383 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3386 if (i915_gem_clflush_object(obj, obj->pin_display))
3387 i915_gem_chipset_flush(to_i915(obj->base.dev));
3389 old_write_domain = obj->base.write_domain;
3390 obj->base.write_domain = 0;
3392 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3394 trace_i915_gem_object_change_domain(obj,
3395 obj->base.read_domains,
3400 * Moves a single object to the GTT read, and possibly write domain.
3401 * @obj: object to act on
3402 * @write: ask for write access or read only
3404 * This function returns when the move is complete, including waiting on
3408 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3410 struct drm_device *dev = obj->base.dev;
3411 struct drm_i915_private *dev_priv = to_i915(dev);
3412 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3413 uint32_t old_write_domain, old_read_domains;
3414 struct i915_vma *vma;
3417 ret = i915_gem_object_wait_rendering(obj, !write);
3421 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3424 /* Flush and acquire obj->pages so that we are coherent through
3425 * direct access in memory with previous cached writes through
3426 * shmemfs and that our cache domain tracking remains valid.
3427 * For example, if the obj->filp was moved to swap without us
3428 * being notified and releasing the pages, we would mistakenly
3429 * continue to assume that the obj remained out of the CPU cached
3432 ret = i915_gem_object_get_pages(obj);
3436 i915_gem_object_flush_cpu_write_domain(obj);
3438 /* Serialise direct access to this object with the barriers for
3439 * coherent writes from the GPU, by effectively invalidating the
3440 * GTT domain upon first access.
3442 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3445 old_write_domain = obj->base.write_domain;
3446 old_read_domains = obj->base.read_domains;
3448 /* It should now be out of any other write domains, and we can update
3449 * the domain values for our changes.
3451 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3452 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3454 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3455 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3459 trace_i915_gem_object_change_domain(obj,
3463 /* And bump the LRU for this access */
3464 vma = i915_gem_obj_to_ggtt(obj);
3465 if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
3466 list_move_tail(&vma->vm_link,
3467 &ggtt->base.inactive_list);
3473 * Changes the cache-level of an object across all VMA.
3474 * @obj: object to act on
3475 * @cache_level: new cache level to set for the object
3477 * After this function returns, the object will be in the new cache-level
3478 * across all GTT and the contents of the backing storage will be coherent,
3479 * with respect to the new cache-level. In order to keep the backing storage
3480 * coherent for all users, we only allow a single cache level to be set
3481 * globally on the object and prevent it from being changed whilst the
3482 * hardware is reading from the object. That is if the object is currently
3483 * on the scanout it will be set to uncached (or equivalent display
3484 * cache coherency) and all non-MOCS GPU access will also be uncached so
3485 * that all direct access to the scanout remains coherent.
3487 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3488 enum i915_cache_level cache_level)
3490 struct drm_device *dev = obj->base.dev;
3491 struct i915_vma *vma, *next;
3495 if (obj->cache_level == cache_level)
3498 /* Inspect the list of currently bound VMA and unbind any that would
3499 * be invalid given the new cache-level. This is principally to
3500 * catch the issue of the CS prefetch crossing page boundaries and
3501 * reading an invalid PTE on older architectures.
3503 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
3504 if (!drm_mm_node_allocated(&vma->node))
3507 if (vma->pin_count) {
3508 DRM_DEBUG("can not change the cache level of pinned objects\n");
3512 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
3513 ret = i915_vma_unbind(vma);
3520 /* We can reuse the existing drm_mm nodes but need to change the
3521 * cache-level on the PTE. We could simply unbind them all and
3522 * rebind with the correct cache-level on next use. However since
3523 * we already have a valid slot, dma mapping, pages etc, we may as
3524 * rewrite the PTE in the belief that doing so tramples upon less
3525 * state and so involves less work.
3528 /* Before we change the PTE, the GPU must not be accessing it.
3529 * If we wait upon the object, we know that all the bound
3530 * VMA are no longer active.
3532 ret = i915_gem_object_wait_rendering(obj, false);
3536 if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
3537 /* Access to snoopable pages through the GTT is
3538 * incoherent and on some machines causes a hard
3539 * lockup. Relinquish the CPU mmaping to force
3540 * userspace to refault in the pages and we can
3541 * then double check if the GTT mapping is still
3542 * valid for that pointer access.
3544 i915_gem_release_mmap(obj);
3546 /* As we no longer need a fence for GTT access,
3547 * we can relinquish it now (and so prevent having
3548 * to steal a fence from someone else on the next
3549 * fence request). Note GPU activity would have
3550 * dropped the fence as all snoopable access is
3551 * supposed to be linear.
3553 ret = i915_gem_object_put_fence(obj);
3557 /* We either have incoherent backing store and
3558 * so no GTT access or the architecture is fully
3559 * coherent. In such cases, existing GTT mmaps
3560 * ignore the cache bit in the PTE and we can
3561 * rewrite it without confusing the GPU or having
3562 * to force userspace to fault back in its mmaps.
3566 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3567 if (!drm_mm_node_allocated(&vma->node))
3570 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3576 list_for_each_entry(vma, &obj->vma_list, obj_link)
3577 vma->node.color = cache_level;
3578 obj->cache_level = cache_level;
3581 /* Flush the dirty CPU caches to the backing storage so that the
3582 * object is now coherent at its new cache level (with respect
3583 * to the access domain).
3585 if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
3586 if (i915_gem_clflush_object(obj, true))
3587 i915_gem_chipset_flush(to_i915(obj->base.dev));
3593 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3594 struct drm_file *file)
3596 struct drm_i915_gem_caching *args = data;
3597 struct drm_i915_gem_object *obj;
3599 obj = i915_gem_object_lookup(file, args->handle);
3603 switch (obj->cache_level) {
3604 case I915_CACHE_LLC:
3605 case I915_CACHE_L3_LLC:
3606 args->caching = I915_CACHING_CACHED;
3610 args->caching = I915_CACHING_DISPLAY;
3614 args->caching = I915_CACHING_NONE;
3618 i915_gem_object_put_unlocked(obj);
3622 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3623 struct drm_file *file)
3625 struct drm_i915_private *dev_priv = to_i915(dev);
3626 struct drm_i915_gem_caching *args = data;
3627 struct drm_i915_gem_object *obj;
3628 enum i915_cache_level level;
3631 switch (args->caching) {
3632 case I915_CACHING_NONE:
3633 level = I915_CACHE_NONE;
3635 case I915_CACHING_CACHED:
3637 * Due to a HW issue on BXT A stepping, GPU stores via a
3638 * snooped mapping may leave stale data in a corresponding CPU
3639 * cacheline, whereas normally such cachelines would get
3642 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
3645 level = I915_CACHE_LLC;
3647 case I915_CACHING_DISPLAY:
3648 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3654 intel_runtime_pm_get(dev_priv);
3656 ret = i915_mutex_lock_interruptible(dev);
3660 obj = i915_gem_object_lookup(file, args->handle);
3666 ret = i915_gem_object_set_cache_level(obj, level);
3668 i915_gem_object_put(obj);
3670 mutex_unlock(&dev->struct_mutex);
3672 intel_runtime_pm_put(dev_priv);
3678 * Prepare buffer for display plane (scanout, cursors, etc).
3679 * Can be called from an uninterruptible phase (modesetting) and allows
3680 * any flushes to be pipelined (for pageflips).
3683 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3685 const struct i915_ggtt_view *view)
3687 u32 old_read_domains, old_write_domain;
3690 /* Mark the pin_display early so that we account for the
3691 * display coherency whilst setting up the cache domains.
3695 /* The display engine is not coherent with the LLC cache on gen6. As
3696 * a result, we make sure that the pinning that is about to occur is
3697 * done with uncached PTEs. This is lowest common denominator for all
3700 * However for gen6+, we could do better by using the GFDT bit instead
3701 * of uncaching, which would allow us to flush all the LLC-cached data
3702 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3704 ret = i915_gem_object_set_cache_level(obj,
3705 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3707 goto err_unpin_display;
3709 /* As the user may map the buffer once pinned in the display plane
3710 * (e.g. libkms for the bootup splash), we have to ensure that we
3711 * always use map_and_fenceable for all scanout buffers.
3713 ret = i915_gem_object_ggtt_pin(obj, view, alignment,
3714 view->type == I915_GGTT_VIEW_NORMAL ?
3717 goto err_unpin_display;
3719 i915_gem_object_flush_cpu_write_domain(obj);
3721 old_write_domain = obj->base.write_domain;
3722 old_read_domains = obj->base.read_domains;
3724 /* It should now be out of any other write domains, and we can update
3725 * the domain values for our changes.
3727 obj->base.write_domain = 0;
3728 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3730 trace_i915_gem_object_change_domain(obj,
3742 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
3743 const struct i915_ggtt_view *view)
3745 if (WARN_ON(obj->pin_display == 0))
3748 i915_gem_object_ggtt_unpin_view(obj, view);
3754 * Moves a single object to the CPU read, and possibly write domain.
3755 * @obj: object to act on
3756 * @write: requesting write or read-only access
3758 * This function returns when the move is complete, including waiting on
3762 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3764 uint32_t old_write_domain, old_read_domains;
3767 ret = i915_gem_object_wait_rendering(obj, !write);
3771 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3774 i915_gem_object_flush_gtt_write_domain(obj);
3776 old_write_domain = obj->base.write_domain;
3777 old_read_domains = obj->base.read_domains;
3779 /* Flush the CPU cache if it's still invalid. */
3780 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3781 i915_gem_clflush_object(obj, false);
3783 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3786 /* It should now be out of any other write domains, and we can update
3787 * the domain values for our changes.
3789 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3791 /* If we're writing through the CPU, then the GPU read domains will
3792 * need to be invalidated at next use.
3795 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3796 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3799 trace_i915_gem_object_change_domain(obj,
3806 /* Throttle our rendering by waiting until the ring has completed our requests
3807 * emitted over 20 msec ago.
3809 * Note that if we were to use the current jiffies each time around the loop,
3810 * we wouldn't escape the function with any frames outstanding if the time to
3811 * render a frame was over 20ms.
3813 * This should get us reasonable parallelism between CPU and GPU but also
3814 * relatively low latency when blocking on a particular request to finish.
3817 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3819 struct drm_i915_private *dev_priv = to_i915(dev);
3820 struct drm_i915_file_private *file_priv = file->driver_priv;
3821 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3822 struct drm_i915_gem_request *request, *target = NULL;
3825 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3829 /* ABI: return -EIO if already wedged */
3830 if (i915_terminally_wedged(&dev_priv->gpu_error))
3833 spin_lock(&file_priv->mm.lock);
3834 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3835 if (time_after_eq(request->emitted_jiffies, recent_enough))
3839 * Note that the request might not have been submitted yet.
3840 * In which case emitted_jiffies will be zero.
3842 if (!request->emitted_jiffies)
3848 i915_gem_request_get(target);
3849 spin_unlock(&file_priv->mm.lock);
3854 ret = __i915_wait_request(target, true, NULL, NULL);
3855 i915_gem_request_put(target);
3861 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
3863 struct drm_i915_gem_object *obj = vma->obj;
3866 vma->node.start & (alignment - 1))
3869 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
3872 if (flags & PIN_OFFSET_BIAS &&
3873 vma->node.start < (flags & PIN_OFFSET_MASK))
3876 if (flags & PIN_OFFSET_FIXED &&
3877 vma->node.start != (flags & PIN_OFFSET_MASK))
3883 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
3885 struct drm_i915_gem_object *obj = vma->obj;
3886 bool mappable, fenceable;
3887 u32 fence_size, fence_alignment;
3889 fence_size = i915_gem_get_gtt_size(obj->base.dev,
3892 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
3897 fenceable = (vma->node.size == fence_size &&
3898 (vma->node.start & (fence_alignment - 1)) == 0);
3900 mappable = (vma->node.start + fence_size <=
3901 to_i915(obj->base.dev)->ggtt.mappable_end);
3903 obj->map_and_fenceable = mappable && fenceable;
3907 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
3908 struct i915_address_space *vm,
3909 const struct i915_ggtt_view *ggtt_view,
3913 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3914 struct i915_vma *vma;
3918 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
3921 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
3924 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
3927 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
3930 vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
3931 i915_gem_obj_to_vma(obj, vm);
3934 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3937 if (i915_vma_misplaced(vma, alignment, flags)) {
3938 WARN(vma->pin_count,
3939 "bo is already pinned in %s with incorrect alignment:"
3940 " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
3941 " obj->map_and_fenceable=%d\n",
3942 ggtt_view ? "ggtt" : "ppgtt",
3943 upper_32_bits(vma->node.start),
3944 lower_32_bits(vma->node.start),
3946 !!(flags & PIN_MAPPABLE),
3947 obj->map_and_fenceable);
3948 ret = i915_vma_unbind(vma);
3956 bound = vma ? vma->bound : 0;
3957 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
3958 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
3961 return PTR_ERR(vma);
3963 ret = i915_vma_bind(vma, obj->cache_level, flags);
3968 if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
3969 (bound ^ vma->bound) & GLOBAL_BIND) {
3970 __i915_vma_set_map_and_fenceable(vma);
3971 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3979 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3980 struct i915_address_space *vm,
3984 return i915_gem_object_do_pin(obj, vm,
3985 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
3990 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3991 const struct i915_ggtt_view *view,
3995 struct drm_device *dev = obj->base.dev;
3996 struct drm_i915_private *dev_priv = to_i915(dev);
3997 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4001 return i915_gem_object_do_pin(obj, &ggtt->base, view,
4002 alignment, flags | PIN_GLOBAL);
4006 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
4007 const struct i915_ggtt_view *view)
4009 struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
4011 WARN_ON(vma->pin_count == 0);
4012 WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
4018 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4019 struct drm_file *file)
4021 struct drm_i915_gem_busy *args = data;
4022 struct drm_i915_gem_object *obj;
4025 ret = i915_mutex_lock_interruptible(dev);
4029 obj = i915_gem_object_lookup(file, args->handle);
4035 /* Count all active objects as busy, even if they are currently not used
4036 * by the gpu. Users of this interface expect objects to eventually
4037 * become non-busy without any further actions, therefore emit any
4038 * necessary flushes here.
4040 ret = i915_gem_object_flush_active(obj);
4048 for (i = 0; i < I915_NUM_ENGINES; i++) {
4049 struct drm_i915_gem_request *req;
4051 req = obj->last_read_req[i];
4053 args->busy |= 1 << (16 + req->engine->exec_id);
4055 if (obj->last_write_req)
4056 args->busy |= obj->last_write_req->engine->exec_id;
4060 i915_gem_object_put(obj);
4062 mutex_unlock(&dev->struct_mutex);
4067 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4068 struct drm_file *file_priv)
4070 return i915_gem_ring_throttle(dev, file_priv);
4074 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4075 struct drm_file *file_priv)
4077 struct drm_i915_private *dev_priv = to_i915(dev);
4078 struct drm_i915_gem_madvise *args = data;
4079 struct drm_i915_gem_object *obj;
4082 switch (args->madv) {
4083 case I915_MADV_DONTNEED:
4084 case I915_MADV_WILLNEED:
4090 ret = i915_mutex_lock_interruptible(dev);
4094 obj = i915_gem_object_lookup(file_priv, args->handle);
4100 if (i915_gem_obj_is_pinned(obj)) {
4106 obj->tiling_mode != I915_TILING_NONE &&
4107 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4108 if (obj->madv == I915_MADV_WILLNEED)
4109 i915_gem_object_unpin_pages(obj);
4110 if (args->madv == I915_MADV_WILLNEED)
4111 i915_gem_object_pin_pages(obj);
4114 if (obj->madv != __I915_MADV_PURGED)
4115 obj->madv = args->madv;
4117 /* if the object is no longer attached, discard its backing storage */
4118 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4119 i915_gem_object_truncate(obj);
4121 args->retained = obj->madv != __I915_MADV_PURGED;
4124 i915_gem_object_put(obj);
4126 mutex_unlock(&dev->struct_mutex);
4130 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4131 const struct drm_i915_gem_object_ops *ops)
4135 INIT_LIST_HEAD(&obj->global_list);
4136 for (i = 0; i < I915_NUM_ENGINES; i++)
4137 INIT_LIST_HEAD(&obj->engine_list[i]);
4138 INIT_LIST_HEAD(&obj->obj_exec_link);
4139 INIT_LIST_HEAD(&obj->vma_list);
4140 INIT_LIST_HEAD(&obj->batch_pool_link);
4144 obj->fence_reg = I915_FENCE_REG_NONE;
4145 obj->madv = I915_MADV_WILLNEED;
4147 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4150 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4151 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
4152 .get_pages = i915_gem_object_get_pages_gtt,
4153 .put_pages = i915_gem_object_put_pages_gtt,
4156 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
4159 struct drm_i915_gem_object *obj;
4160 struct address_space *mapping;
4164 obj = i915_gem_object_alloc(dev);
4166 return ERR_PTR(-ENOMEM);
4168 ret = drm_gem_object_init(dev, &obj->base, size);
4172 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4173 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4174 /* 965gm cannot relocate objects above 4GiB. */
4175 mask &= ~__GFP_HIGHMEM;
4176 mask |= __GFP_DMA32;
4179 mapping = file_inode(obj->base.filp)->i_mapping;
4180 mapping_set_gfp_mask(mapping, mask);
4182 i915_gem_object_init(obj, &i915_gem_object_ops);
4184 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4185 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4188 /* On some devices, we can have the GPU use the LLC (the CPU
4189 * cache) for about a 10% performance improvement
4190 * compared to uncached. Graphics requests other than
4191 * display scanout are coherent with the CPU in
4192 * accessing this cache. This means in this mode we
4193 * don't need to clflush on the CPU side, and on the
4194 * GPU side we only need to flush internal caches to
4195 * get data visible to the CPU.
4197 * However, we maintain the display planes as UC, and so
4198 * need to rebind when first used as such.
4200 obj->cache_level = I915_CACHE_LLC;
4202 obj->cache_level = I915_CACHE_NONE;
4204 trace_i915_gem_object_create(obj);
4209 i915_gem_object_free(obj);
4211 return ERR_PTR(ret);
4214 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4216 /* If we are the last user of the backing storage (be it shmemfs
4217 * pages or stolen etc), we know that the pages are going to be
4218 * immediately released. In this case, we can then skip copying
4219 * back the contents from the GPU.
4222 if (obj->madv != I915_MADV_WILLNEED)
4225 if (obj->base.filp == NULL)
4228 /* At first glance, this looks racy, but then again so would be
4229 * userspace racing mmap against close. However, the first external
4230 * reference to the filp can only be obtained through the
4231 * i915_gem_mmap_ioctl() which safeguards us against the user
4232 * acquiring such a reference whilst we are in the middle of
4233 * freeing the object.
4235 return atomic_long_read(&obj->base.filp->f_count) == 1;
4238 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4240 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4241 struct drm_device *dev = obj->base.dev;
4242 struct drm_i915_private *dev_priv = to_i915(dev);
4243 struct i915_vma *vma, *next;
4245 intel_runtime_pm_get(dev_priv);
4247 trace_i915_gem_object_destroy(obj);
4249 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4253 ret = __i915_vma_unbind_no_wait(vma);
4254 if (WARN_ON(ret == -ERESTARTSYS)) {
4255 bool was_interruptible;
4257 was_interruptible = dev_priv->mm.interruptible;
4258 dev_priv->mm.interruptible = false;
4260 WARN_ON(i915_vma_unbind(vma));
4262 dev_priv->mm.interruptible = was_interruptible;
4266 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4267 * before progressing. */
4269 i915_gem_object_unpin_pages(obj);
4271 WARN_ON(obj->frontbuffer_bits);
4273 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4274 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4275 obj->tiling_mode != I915_TILING_NONE)
4276 i915_gem_object_unpin_pages(obj);
4278 if (WARN_ON(obj->pages_pin_count))
4279 obj->pages_pin_count = 0;
4280 if (discard_backing_storage(obj))
4281 obj->madv = I915_MADV_DONTNEED;
4282 i915_gem_object_put_pages(obj);
4286 if (obj->base.import_attach)
4287 drm_prime_gem_destroy(&obj->base, NULL);
4289 if (obj->ops->release)
4290 obj->ops->release(obj);
4292 drm_gem_object_release(&obj->base);
4293 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4296 i915_gem_object_free(obj);
4298 intel_runtime_pm_put(dev_priv);
4301 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4302 struct i915_address_space *vm)
4304 struct i915_vma *vma;
4305 list_for_each_entry(vma, &obj->vma_list, obj_link) {
4306 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4313 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4314 const struct i915_ggtt_view *view)
4316 struct i915_vma *vma;
4320 list_for_each_entry(vma, &obj->vma_list, obj_link)
4321 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4326 void i915_gem_vma_destroy(struct i915_vma *vma)
4328 WARN_ON(vma->node.allocated);
4330 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4331 if (!list_empty(&vma->exec_list))
4335 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
4337 list_del(&vma->obj_link);
4339 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
4343 i915_gem_stop_engines(struct drm_device *dev)
4345 struct drm_i915_private *dev_priv = to_i915(dev);
4346 struct intel_engine_cs *engine;
4348 for_each_engine(engine, dev_priv)
4349 dev_priv->gt.stop_engine(engine);
4353 i915_gem_suspend(struct drm_device *dev)
4355 struct drm_i915_private *dev_priv = to_i915(dev);
4358 intel_suspend_gt_powersave(dev_priv);
4360 mutex_lock(&dev->struct_mutex);
4362 /* We have to flush all the executing contexts to main memory so
4363 * that they can saved in the hibernation image. To ensure the last
4364 * context image is coherent, we have to switch away from it. That
4365 * leaves the dev_priv->kernel_context still active when
4366 * we actually suspend, and its image in memory may not match the GPU
4367 * state. Fortunately, the kernel_context is disposable and we do
4368 * not rely on its state.
4370 ret = i915_gem_switch_to_kernel_context(dev_priv);
4374 ret = i915_gem_wait_for_idle(dev_priv);
4378 i915_gem_retire_requests(dev_priv);
4380 /* Note that rather than stopping the engines, all we have to do
4381 * is assert that every RING_HEAD == RING_TAIL (all execution complete)
4382 * and similar for all logical context images (to ensure they are
4383 * all ready for hibernation).
4385 i915_gem_stop_engines(dev);
4386 i915_gem_context_lost(dev_priv);
4387 mutex_unlock(&dev->struct_mutex);
4389 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4390 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4391 flush_delayed_work(&dev_priv->gt.idle_work);
4393 /* Assert that we sucessfully flushed all the work and
4394 * reset the GPU back to its idle, low power state.
4396 WARN_ON(dev_priv->gt.awake);
4401 mutex_unlock(&dev->struct_mutex);
4405 void i915_gem_resume(struct drm_device *dev)
4407 struct drm_i915_private *dev_priv = to_i915(dev);
4409 mutex_lock(&dev->struct_mutex);
4410 i915_gem_restore_gtt_mappings(dev);
4412 /* As we didn't flush the kernel context before suspend, we cannot
4413 * guarantee that the context image is complete. So let's just reset
4414 * it and start again.
4416 if (i915.enable_execlists)
4417 intel_lr_context_reset(dev_priv, dev_priv->kernel_context);
4419 mutex_unlock(&dev->struct_mutex);
4422 void i915_gem_init_swizzling(struct drm_device *dev)
4424 struct drm_i915_private *dev_priv = to_i915(dev);
4426 if (INTEL_INFO(dev)->gen < 5 ||
4427 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4430 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4431 DISP_TILE_SURFACE_SWIZZLING);
4436 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4438 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4439 else if (IS_GEN7(dev))
4440 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4441 else if (IS_GEN8(dev))
4442 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4447 static void init_unused_ring(struct drm_device *dev, u32 base)
4449 struct drm_i915_private *dev_priv = to_i915(dev);
4451 I915_WRITE(RING_CTL(base), 0);
4452 I915_WRITE(RING_HEAD(base), 0);
4453 I915_WRITE(RING_TAIL(base), 0);
4454 I915_WRITE(RING_START(base), 0);
4457 static void init_unused_rings(struct drm_device *dev)
4460 init_unused_ring(dev, PRB1_BASE);
4461 init_unused_ring(dev, SRB0_BASE);
4462 init_unused_ring(dev, SRB1_BASE);
4463 init_unused_ring(dev, SRB2_BASE);
4464 init_unused_ring(dev, SRB3_BASE);
4465 } else if (IS_GEN2(dev)) {
4466 init_unused_ring(dev, SRB0_BASE);
4467 init_unused_ring(dev, SRB1_BASE);
4468 } else if (IS_GEN3(dev)) {
4469 init_unused_ring(dev, PRB1_BASE);
4470 init_unused_ring(dev, PRB2_BASE);
4475 i915_gem_init_hw(struct drm_device *dev)
4477 struct drm_i915_private *dev_priv = to_i915(dev);
4478 struct intel_engine_cs *engine;
4481 /* Double layer security blanket, see i915_gem_init() */
4482 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4484 if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4485 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4487 if (IS_HASWELL(dev))
4488 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4489 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4491 if (HAS_PCH_NOP(dev)) {
4492 if (IS_IVYBRIDGE(dev)) {
4493 u32 temp = I915_READ(GEN7_MSG_CTL);
4494 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4495 I915_WRITE(GEN7_MSG_CTL, temp);
4496 } else if (INTEL_INFO(dev)->gen >= 7) {
4497 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4498 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4499 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4503 i915_gem_init_swizzling(dev);
4506 * At least 830 can leave some of the unused rings
4507 * "active" (ie. head != tail) after resume which
4508 * will prevent c3 entry. Makes sure all unused rings
4511 init_unused_rings(dev);
4513 BUG_ON(!dev_priv->kernel_context);
4515 ret = i915_ppgtt_init_hw(dev);
4517 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4521 /* Need to do basic initialisation of all rings first: */
4522 for_each_engine(engine, dev_priv) {
4523 ret = engine->init_hw(engine);
4528 intel_mocs_init_l3cc_table(dev);
4530 /* We can't enable contexts until all firmware is loaded */
4531 ret = intel_guc_setup(dev);
4536 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4540 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4542 if (INTEL_INFO(dev_priv)->gen < 6)
4545 /* TODO: make semaphores and Execlists play nicely together */
4546 if (i915.enable_execlists)
4552 #ifdef CONFIG_INTEL_IOMMU
4553 /* Enable semaphores on SNB when IO remapping is off */
4554 if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4561 int i915_gem_init(struct drm_device *dev)
4563 struct drm_i915_private *dev_priv = to_i915(dev);
4566 mutex_lock(&dev->struct_mutex);
4568 if (!i915.enable_execlists) {
4569 dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
4570 dev_priv->gt.cleanup_engine = intel_cleanup_engine;
4571 dev_priv->gt.stop_engine = intel_stop_engine;
4573 dev_priv->gt.execbuf_submit = intel_execlists_submission;
4574 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4575 dev_priv->gt.stop_engine = intel_logical_ring_stop;
4578 /* This is just a security blanket to placate dragons.
4579 * On some systems, we very sporadically observe that the first TLBs
4580 * used by the CS may be stale, despite us poking the TLB reset. If
4581 * we hold the forcewake during initialisation these problems
4582 * just magically go away.
4584 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4586 i915_gem_init_userptr(dev_priv);
4587 i915_gem_init_ggtt(dev);
4589 ret = i915_gem_context_init(dev);
4593 ret = intel_engines_init(dev);
4597 ret = i915_gem_init_hw(dev);
4599 /* Allow ring initialisation to fail by marking the GPU as
4600 * wedged. But we only want to do this where the GPU is angry,
4601 * for all other failure, such as an allocation failure, bail.
4603 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4604 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4609 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4610 mutex_unlock(&dev->struct_mutex);
4616 i915_gem_cleanup_engines(struct drm_device *dev)
4618 struct drm_i915_private *dev_priv = to_i915(dev);
4619 struct intel_engine_cs *engine;
4621 for_each_engine(engine, dev_priv)
4622 dev_priv->gt.cleanup_engine(engine);
4626 init_engine_lists(struct intel_engine_cs *engine)
4628 INIT_LIST_HEAD(&engine->active_list);
4629 INIT_LIST_HEAD(&engine->request_list);
4633 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4635 struct drm_device *dev = &dev_priv->drm;
4637 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4638 !IS_CHERRYVIEW(dev_priv))
4639 dev_priv->num_fence_regs = 32;
4640 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4641 IS_I945GM(dev_priv) || IS_G33(dev_priv))
4642 dev_priv->num_fence_regs = 16;
4644 dev_priv->num_fence_regs = 8;
4646 if (intel_vgpu_active(dev_priv))
4647 dev_priv->num_fence_regs =
4648 I915_READ(vgtif_reg(avail_rs.fence_num));
4650 /* Initialize fence registers to zero */
4651 i915_gem_restore_fences(dev);
4653 i915_gem_detect_bit_6_swizzle(dev);
4657 i915_gem_load_init(struct drm_device *dev)
4659 struct drm_i915_private *dev_priv = to_i915(dev);
4663 kmem_cache_create("i915_gem_object",
4664 sizeof(struct drm_i915_gem_object), 0,
4668 kmem_cache_create("i915_gem_vma",
4669 sizeof(struct i915_vma), 0,
4672 dev_priv->requests =
4673 kmem_cache_create("i915_gem_request",
4674 sizeof(struct drm_i915_gem_request), 0,
4678 INIT_LIST_HEAD(&dev_priv->vm_list);
4679 INIT_LIST_HEAD(&dev_priv->context_list);
4680 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4681 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4682 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4683 for (i = 0; i < I915_NUM_ENGINES; i++)
4684 init_engine_lists(&dev_priv->engine[i]);
4685 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4686 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4687 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4688 i915_gem_retire_work_handler);
4689 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4690 i915_gem_idle_work_handler);
4691 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4692 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4694 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4696 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4698 init_waitqueue_head(&dev_priv->pending_flip_queue);
4700 dev_priv->mm.interruptible = true;
4702 mutex_init(&dev_priv->fb_tracking.lock);
4705 void i915_gem_load_cleanup(struct drm_device *dev)
4707 struct drm_i915_private *dev_priv = to_i915(dev);
4709 kmem_cache_destroy(dev_priv->requests);
4710 kmem_cache_destroy(dev_priv->vmas);
4711 kmem_cache_destroy(dev_priv->objects);
4714 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4716 struct drm_i915_gem_object *obj;
4718 /* Called just before we write the hibernation image.
4720 * We need to update the domain tracking to reflect that the CPU
4721 * will be accessing all the pages to create and restore from the
4722 * hibernation, and so upon restoration those pages will be in the
4725 * To make sure the hibernation image contains the latest state,
4726 * we update that state just before writing out the image.
4729 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
4730 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4731 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4734 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4735 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4736 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4742 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4744 struct drm_i915_file_private *file_priv = file->driver_priv;
4746 /* Clean up our request list when the client is going away, so that
4747 * later retire_requests won't dereference our soon-to-be-gone
4750 spin_lock(&file_priv->mm.lock);
4751 while (!list_empty(&file_priv->mm.request_list)) {
4752 struct drm_i915_gem_request *request;
4754 request = list_first_entry(&file_priv->mm.request_list,
4755 struct drm_i915_gem_request,
4757 list_del(&request->client_list);
4758 request->file_priv = NULL;
4760 spin_unlock(&file_priv->mm.lock);
4762 if (!list_empty(&file_priv->rps.link)) {
4763 spin_lock(&to_i915(dev)->rps.client_lock);
4764 list_del(&file_priv->rps.link);
4765 spin_unlock(&to_i915(dev)->rps.client_lock);
4769 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4771 struct drm_i915_file_private *file_priv;
4774 DRM_DEBUG_DRIVER("\n");
4776 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4780 file->driver_priv = file_priv;
4781 file_priv->dev_priv = to_i915(dev);
4782 file_priv->file = file;
4783 INIT_LIST_HEAD(&file_priv->rps.link);
4785 spin_lock_init(&file_priv->mm.lock);
4786 INIT_LIST_HEAD(&file_priv->mm.request_list);
4788 file_priv->bsd_ring = -1;
4790 ret = i915_gem_context_open(dev, file);
4798 * i915_gem_track_fb - update frontbuffer tracking
4799 * @old: current GEM buffer for the frontbuffer slots
4800 * @new: new GEM buffer for the frontbuffer slots
4801 * @frontbuffer_bits: bitmask of frontbuffer slots
4803 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4804 * from @old and setting them in @new. Both @old and @new can be NULL.
4806 void i915_gem_track_fb(struct drm_i915_gem_object *old,
4807 struct drm_i915_gem_object *new,
4808 unsigned frontbuffer_bits)
4811 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
4812 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
4813 old->frontbuffer_bits &= ~frontbuffer_bits;
4817 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
4818 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
4819 new->frontbuffer_bits |= frontbuffer_bits;
4823 /* All the new VM stuff */
4824 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
4825 struct i915_address_space *vm)
4827 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
4828 struct i915_vma *vma;
4830 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
4832 list_for_each_entry(vma, &o->vma_list, obj_link) {
4834 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4837 return vma->node.start;
4840 WARN(1, "%s vma for this object not found.\n",
4841 i915_is_ggtt(vm) ? "global" : "ppgtt");
4845 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
4846 const struct i915_ggtt_view *view)
4848 struct i915_vma *vma;
4850 list_for_each_entry(vma, &o->vma_list, obj_link)
4851 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4852 return vma->node.start;
4854 WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
4858 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4859 struct i915_address_space *vm)
4861 struct i915_vma *vma;
4863 list_for_each_entry(vma, &o->vma_list, obj_link) {
4865 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4867 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4874 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
4875 const struct i915_ggtt_view *view)
4877 struct i915_vma *vma;
4879 list_for_each_entry(vma, &o->vma_list, obj_link)
4881 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
4882 drm_mm_node_allocated(&vma->node))
4888 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4890 struct i915_vma *vma;
4892 list_for_each_entry(vma, &o->vma_list, obj_link)
4893 if (drm_mm_node_allocated(&vma->node))
4899 unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
4901 struct i915_vma *vma;
4903 GEM_BUG_ON(list_empty(&o->vma_list));
4905 list_for_each_entry(vma, &o->vma_list, obj_link) {
4907 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
4908 return vma->node.size;
4914 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
4916 struct i915_vma *vma;
4917 list_for_each_entry(vma, &obj->vma_list, obj_link)
4918 if (vma->pin_count > 0)
4924 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
4926 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
4930 /* Only default objects have per-page dirty tracking */
4931 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
4934 page = i915_gem_object_get_page(obj, n);
4935 set_page_dirty(page);
4939 /* Allocate a new GEM object and fill it with the supplied data */
4940 struct drm_i915_gem_object *
4941 i915_gem_object_create_from_data(struct drm_device *dev,
4942 const void *data, size_t size)
4944 struct drm_i915_gem_object *obj;
4945 struct sg_table *sg;
4949 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
4953 ret = i915_gem_object_set_to_cpu_domain(obj, true);
4957 ret = i915_gem_object_get_pages(obj);
4961 i915_gem_object_pin_pages(obj);
4963 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
4964 obj->dirty = 1; /* Backing store is now out of date */
4965 i915_gem_object_unpin_pages(obj);
4967 if (WARN_ON(bytes != size)) {
4968 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4976 i915_gem_object_put(obj);
4977 return ERR_PTR(ret);