2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
43 static __must_check int
44 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
46 static int i915_gem_phys_pwrite(struct drm_device *dev,
47 struct drm_i915_gem_object *obj,
48 struct drm_i915_gem_pwrite *args,
49 struct drm_file *file);
51 static void i915_gem_write_fence(struct drm_device *dev, int reg,
52 struct drm_i915_gem_object *obj);
53 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
54 struct drm_i915_fence_reg *fence,
57 static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
58 struct shrink_control *sc);
59 static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
60 struct shrink_control *sc);
61 static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
62 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
63 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
65 static bool cpu_cache_is_coherent(struct drm_device *dev,
66 enum i915_cache_level level)
68 return HAS_LLC(dev) || level != I915_CACHE_NONE;
71 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
73 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
76 return obj->pin_display;
79 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
82 i915_gem_release_mmap(obj);
84 /* As we do not have an associated fence register, we will force
85 * a tiling change if we ever need to acquire one.
87 obj->fence_dirty = false;
88 obj->fence_reg = I915_FENCE_REG_NONE;
91 /* some bookkeeping */
92 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
95 spin_lock(&dev_priv->mm.object_stat_lock);
96 dev_priv->mm.object_count++;
97 dev_priv->mm.object_memory += size;
98 spin_unlock(&dev_priv->mm.object_stat_lock);
101 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
104 spin_lock(&dev_priv->mm.object_stat_lock);
105 dev_priv->mm.object_count--;
106 dev_priv->mm.object_memory -= size;
107 spin_unlock(&dev_priv->mm.object_stat_lock);
111 i915_gem_wait_for_error(struct i915_gpu_error *error)
115 #define EXIT_COND (!i915_reset_in_progress(error) || \
116 i915_terminally_wedged(error))
121 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
122 * userspace. If it takes that long something really bad is going on and
123 * we should simply try to bail out and fail as gracefully as possible.
125 ret = wait_event_interruptible_timeout(error->reset_queue,
129 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
131 } else if (ret < 0) {
139 int i915_mutex_lock_interruptible(struct drm_device *dev)
141 struct drm_i915_private *dev_priv = dev->dev_private;
144 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
148 ret = mutex_lock_interruptible(&dev->struct_mutex);
152 WARN_ON(i915_verify_lists(dev));
157 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
159 return i915_gem_obj_bound_any(obj) && !obj->active;
163 i915_gem_init_ioctl(struct drm_device *dev, void *data,
164 struct drm_file *file)
166 struct drm_i915_private *dev_priv = dev->dev_private;
167 struct drm_i915_gem_init *args = data;
169 if (drm_core_check_feature(dev, DRIVER_MODESET))
172 if (args->gtt_start >= args->gtt_end ||
173 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
176 /* GEM with user mode setting was never supported on ilk and later. */
177 if (INTEL_INFO(dev)->gen >= 5)
180 mutex_lock(&dev->struct_mutex);
181 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
183 dev_priv->gtt.mappable_end = args->gtt_end;
184 mutex_unlock(&dev->struct_mutex);
190 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
191 struct drm_file *file)
193 struct drm_i915_private *dev_priv = dev->dev_private;
194 struct drm_i915_gem_get_aperture *args = data;
195 struct drm_i915_gem_object *obj;
199 mutex_lock(&dev->struct_mutex);
200 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
201 if (i915_gem_obj_is_pinned(obj))
202 pinned += i915_gem_obj_ggtt_size(obj);
203 mutex_unlock(&dev->struct_mutex);
205 args->aper_size = dev_priv->gtt.base.total;
206 args->aper_available_size = args->aper_size - pinned;
211 void *i915_gem_object_alloc(struct drm_device *dev)
213 struct drm_i915_private *dev_priv = dev->dev_private;
214 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
217 void i915_gem_object_free(struct drm_i915_gem_object *obj)
219 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
220 kmem_cache_free(dev_priv->slab, obj);
224 i915_gem_create(struct drm_file *file,
225 struct drm_device *dev,
229 struct drm_i915_gem_object *obj;
233 size = roundup(size, PAGE_SIZE);
237 /* Allocate the new object */
238 obj = i915_gem_alloc_object(dev, size);
242 ret = drm_gem_handle_create(file, &obj->base, &handle);
243 /* drop reference from allocate - handle holds it now */
244 drm_gem_object_unreference_unlocked(&obj->base);
253 i915_gem_dumb_create(struct drm_file *file,
254 struct drm_device *dev,
255 struct drm_mode_create_dumb *args)
257 /* have to work out size/pitch and return them */
258 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
259 args->size = args->pitch * args->height;
260 return i915_gem_create(file, dev,
261 args->size, &args->handle);
265 * Creates a new mm object and returns a handle to it.
268 i915_gem_create_ioctl(struct drm_device *dev, void *data,
269 struct drm_file *file)
271 struct drm_i915_gem_create *args = data;
273 return i915_gem_create(file, dev,
274 args->size, &args->handle);
278 __copy_to_user_swizzled(char __user *cpu_vaddr,
279 const char *gpu_vaddr, int gpu_offset,
282 int ret, cpu_offset = 0;
285 int cacheline_end = ALIGN(gpu_offset + 1, 64);
286 int this_length = min(cacheline_end - gpu_offset, length);
287 int swizzled_gpu_offset = gpu_offset ^ 64;
289 ret = __copy_to_user(cpu_vaddr + cpu_offset,
290 gpu_vaddr + swizzled_gpu_offset,
295 cpu_offset += this_length;
296 gpu_offset += this_length;
297 length -= this_length;
304 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
305 const char __user *cpu_vaddr,
308 int ret, cpu_offset = 0;
311 int cacheline_end = ALIGN(gpu_offset + 1, 64);
312 int this_length = min(cacheline_end - gpu_offset, length);
313 int swizzled_gpu_offset = gpu_offset ^ 64;
315 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
316 cpu_vaddr + cpu_offset,
321 cpu_offset += this_length;
322 gpu_offset += this_length;
323 length -= this_length;
329 /* Per-page copy function for the shmem pread fastpath.
330 * Flushes invalid cachelines before reading the target if
331 * needs_clflush is set. */
333 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
334 char __user *user_data,
335 bool page_do_bit17_swizzling, bool needs_clflush)
340 if (unlikely(page_do_bit17_swizzling))
343 vaddr = kmap_atomic(page);
345 drm_clflush_virt_range(vaddr + shmem_page_offset,
347 ret = __copy_to_user_inatomic(user_data,
348 vaddr + shmem_page_offset,
350 kunmap_atomic(vaddr);
352 return ret ? -EFAULT : 0;
356 shmem_clflush_swizzled_range(char *addr, unsigned long length,
359 if (unlikely(swizzled)) {
360 unsigned long start = (unsigned long) addr;
361 unsigned long end = (unsigned long) addr + length;
363 /* For swizzling simply ensure that we always flush both
364 * channels. Lame, but simple and it works. Swizzled
365 * pwrite/pread is far from a hotpath - current userspace
366 * doesn't use it at all. */
367 start = round_down(start, 128);
368 end = round_up(end, 128);
370 drm_clflush_virt_range((void *)start, end - start);
372 drm_clflush_virt_range(addr, length);
377 /* Only difference to the fast-path function is that this can handle bit17
378 * and uses non-atomic copy and kmap functions. */
380 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
381 char __user *user_data,
382 bool page_do_bit17_swizzling, bool needs_clflush)
389 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
391 page_do_bit17_swizzling);
393 if (page_do_bit17_swizzling)
394 ret = __copy_to_user_swizzled(user_data,
395 vaddr, shmem_page_offset,
398 ret = __copy_to_user(user_data,
399 vaddr + shmem_page_offset,
403 return ret ? - EFAULT : 0;
407 i915_gem_shmem_pread(struct drm_device *dev,
408 struct drm_i915_gem_object *obj,
409 struct drm_i915_gem_pread *args,
410 struct drm_file *file)
412 char __user *user_data;
415 int shmem_page_offset, page_length, ret = 0;
416 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
418 int needs_clflush = 0;
419 struct sg_page_iter sg_iter;
421 user_data = to_user_ptr(args->data_ptr);
424 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
426 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
427 /* If we're not in the cpu read domain, set ourself into the gtt
428 * read domain and manually flush cachelines (if required). This
429 * optimizes for the case when the gpu will dirty the data
430 * anyway again before the next pread happens. */
431 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
432 ret = i915_gem_object_wait_rendering(obj, true);
437 ret = i915_gem_object_get_pages(obj);
441 i915_gem_object_pin_pages(obj);
443 offset = args->offset;
445 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
446 offset >> PAGE_SHIFT) {
447 struct page *page = sg_page_iter_page(&sg_iter);
452 /* Operation in this page
454 * shmem_page_offset = offset within page in shmem file
455 * page_length = bytes to copy for this page
457 shmem_page_offset = offset_in_page(offset);
458 page_length = remain;
459 if ((shmem_page_offset + page_length) > PAGE_SIZE)
460 page_length = PAGE_SIZE - shmem_page_offset;
462 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
463 (page_to_phys(page) & (1 << 17)) != 0;
465 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
466 user_data, page_do_bit17_swizzling,
471 mutex_unlock(&dev->struct_mutex);
473 if (likely(!i915.prefault_disable) && !prefaulted) {
474 ret = fault_in_multipages_writeable(user_data, remain);
475 /* Userspace is tricking us, but we've already clobbered
476 * its pages with the prefault and promised to write the
477 * data up to the first fault. Hence ignore any errors
478 * and just continue. */
483 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
484 user_data, page_do_bit17_swizzling,
487 mutex_lock(&dev->struct_mutex);
490 mark_page_accessed(page);
495 remain -= page_length;
496 user_data += page_length;
497 offset += page_length;
501 i915_gem_object_unpin_pages(obj);
507 * Reads data from the object referenced by handle.
509 * On error, the contents of *data are undefined.
512 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
513 struct drm_file *file)
515 struct drm_i915_gem_pread *args = data;
516 struct drm_i915_gem_object *obj;
522 if (!access_ok(VERIFY_WRITE,
523 to_user_ptr(args->data_ptr),
527 ret = i915_mutex_lock_interruptible(dev);
531 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
532 if (&obj->base == NULL) {
537 /* Bounds check source. */
538 if (args->offset > obj->base.size ||
539 args->size > obj->base.size - args->offset) {
544 /* prime objects have no backing filp to GEM pread/pwrite
547 if (!obj->base.filp) {
552 trace_i915_gem_object_pread(obj, args->offset, args->size);
554 ret = i915_gem_shmem_pread(dev, obj, args, file);
557 drm_gem_object_unreference(&obj->base);
559 mutex_unlock(&dev->struct_mutex);
563 /* This is the fast write path which cannot handle
564 * page faults in the source data
568 fast_user_write(struct io_mapping *mapping,
569 loff_t page_base, int page_offset,
570 char __user *user_data,
573 void __iomem *vaddr_atomic;
575 unsigned long unwritten;
577 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
578 /* We can use the cpu mem copy function because this is X86. */
579 vaddr = (void __force*)vaddr_atomic + page_offset;
580 unwritten = __copy_from_user_inatomic_nocache(vaddr,
582 io_mapping_unmap_atomic(vaddr_atomic);
587 * This is the fast pwrite path, where we copy the data directly from the
588 * user into the GTT, uncached.
591 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
592 struct drm_i915_gem_object *obj,
593 struct drm_i915_gem_pwrite *args,
594 struct drm_file *file)
596 drm_i915_private_t *dev_priv = dev->dev_private;
598 loff_t offset, page_base;
599 char __user *user_data;
600 int page_offset, page_length, ret;
602 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
606 ret = i915_gem_object_set_to_gtt_domain(obj, true);
610 ret = i915_gem_object_put_fence(obj);
614 user_data = to_user_ptr(args->data_ptr);
617 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
620 /* Operation in this page
622 * page_base = page offset within aperture
623 * page_offset = offset within page
624 * page_length = bytes to copy for this page
626 page_base = offset & PAGE_MASK;
627 page_offset = offset_in_page(offset);
628 page_length = remain;
629 if ((page_offset + remain) > PAGE_SIZE)
630 page_length = PAGE_SIZE - page_offset;
632 /* If we get a fault while copying data, then (presumably) our
633 * source page isn't available. Return the error and we'll
634 * retry in the slow path.
636 if (fast_user_write(dev_priv->gtt.mappable, page_base,
637 page_offset, user_data, page_length)) {
642 remain -= page_length;
643 user_data += page_length;
644 offset += page_length;
648 i915_gem_object_ggtt_unpin(obj);
653 /* Per-page copy function for the shmem pwrite fastpath.
654 * Flushes invalid cachelines before writing to the target if
655 * needs_clflush_before is set and flushes out any written cachelines after
656 * writing if needs_clflush is set. */
658 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
659 char __user *user_data,
660 bool page_do_bit17_swizzling,
661 bool needs_clflush_before,
662 bool needs_clflush_after)
667 if (unlikely(page_do_bit17_swizzling))
670 vaddr = kmap_atomic(page);
671 if (needs_clflush_before)
672 drm_clflush_virt_range(vaddr + shmem_page_offset,
674 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
677 if (needs_clflush_after)
678 drm_clflush_virt_range(vaddr + shmem_page_offset,
680 kunmap_atomic(vaddr);
682 return ret ? -EFAULT : 0;
685 /* Only difference to the fast-path function is that this can handle bit17
686 * and uses non-atomic copy and kmap functions. */
688 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
689 char __user *user_data,
690 bool page_do_bit17_swizzling,
691 bool needs_clflush_before,
692 bool needs_clflush_after)
698 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
699 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
701 page_do_bit17_swizzling);
702 if (page_do_bit17_swizzling)
703 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
707 ret = __copy_from_user(vaddr + shmem_page_offset,
710 if (needs_clflush_after)
711 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
713 page_do_bit17_swizzling);
716 return ret ? -EFAULT : 0;
720 i915_gem_shmem_pwrite(struct drm_device *dev,
721 struct drm_i915_gem_object *obj,
722 struct drm_i915_gem_pwrite *args,
723 struct drm_file *file)
727 char __user *user_data;
728 int shmem_page_offset, page_length, ret = 0;
729 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
730 int hit_slowpath = 0;
731 int needs_clflush_after = 0;
732 int needs_clflush_before = 0;
733 struct sg_page_iter sg_iter;
735 user_data = to_user_ptr(args->data_ptr);
738 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
740 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
741 /* If we're not in the cpu write domain, set ourself into the gtt
742 * write domain and manually flush cachelines (if required). This
743 * optimizes for the case when the gpu will use the data
744 * right away and we therefore have to clflush anyway. */
745 needs_clflush_after = cpu_write_needs_clflush(obj);
746 ret = i915_gem_object_wait_rendering(obj, false);
750 /* Same trick applies to invalidate partially written cachelines read
752 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
753 needs_clflush_before =
754 !cpu_cache_is_coherent(dev, obj->cache_level);
756 ret = i915_gem_object_get_pages(obj);
760 i915_gem_object_pin_pages(obj);
762 offset = args->offset;
765 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
766 offset >> PAGE_SHIFT) {
767 struct page *page = sg_page_iter_page(&sg_iter);
768 int partial_cacheline_write;
773 /* Operation in this page
775 * shmem_page_offset = offset within page in shmem file
776 * page_length = bytes to copy for this page
778 shmem_page_offset = offset_in_page(offset);
780 page_length = remain;
781 if ((shmem_page_offset + page_length) > PAGE_SIZE)
782 page_length = PAGE_SIZE - shmem_page_offset;
784 /* If we don't overwrite a cacheline completely we need to be
785 * careful to have up-to-date data by first clflushing. Don't
786 * overcomplicate things and flush the entire patch. */
787 partial_cacheline_write = needs_clflush_before &&
788 ((shmem_page_offset | page_length)
789 & (boot_cpu_data.x86_clflush_size - 1));
791 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
792 (page_to_phys(page) & (1 << 17)) != 0;
794 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
795 user_data, page_do_bit17_swizzling,
796 partial_cacheline_write,
797 needs_clflush_after);
802 mutex_unlock(&dev->struct_mutex);
803 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
804 user_data, page_do_bit17_swizzling,
805 partial_cacheline_write,
806 needs_clflush_after);
808 mutex_lock(&dev->struct_mutex);
811 set_page_dirty(page);
812 mark_page_accessed(page);
817 remain -= page_length;
818 user_data += page_length;
819 offset += page_length;
823 i915_gem_object_unpin_pages(obj);
827 * Fixup: Flush cpu caches in case we didn't flush the dirty
828 * cachelines in-line while writing and the object moved
829 * out of the cpu write domain while we've dropped the lock.
831 if (!needs_clflush_after &&
832 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
833 if (i915_gem_clflush_object(obj, obj->pin_display))
834 i915_gem_chipset_flush(dev);
838 if (needs_clflush_after)
839 i915_gem_chipset_flush(dev);
845 * Writes data to the object referenced by handle.
847 * On error, the contents of the buffer that were to be modified are undefined.
850 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
851 struct drm_file *file)
853 struct drm_i915_gem_pwrite *args = data;
854 struct drm_i915_gem_object *obj;
860 if (!access_ok(VERIFY_READ,
861 to_user_ptr(args->data_ptr),
865 if (likely(!i915.prefault_disable)) {
866 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
872 ret = i915_mutex_lock_interruptible(dev);
876 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
877 if (&obj->base == NULL) {
882 /* Bounds check destination. */
883 if (args->offset > obj->base.size ||
884 args->size > obj->base.size - args->offset) {
889 /* prime objects have no backing filp to GEM pread/pwrite
892 if (!obj->base.filp) {
897 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
900 /* We can only do the GTT pwrite on untiled buffers, as otherwise
901 * it would end up going through the fenced access, and we'll get
902 * different detiling behavior between reading and writing.
903 * pread/pwrite currently are reading and writing from the CPU
904 * perspective, requiring manual detiling by the client.
907 ret = i915_gem_phys_pwrite(dev, obj, args, file);
911 if (obj->tiling_mode == I915_TILING_NONE &&
912 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
913 cpu_write_needs_clflush(obj)) {
914 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
915 /* Note that the gtt paths might fail with non-page-backed user
916 * pointers (e.g. gtt mappings when moving data between
917 * textures). Fallback to the shmem path in that case. */
920 if (ret == -EFAULT || ret == -ENOSPC)
921 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
924 drm_gem_object_unreference(&obj->base);
926 mutex_unlock(&dev->struct_mutex);
931 i915_gem_check_wedge(struct i915_gpu_error *error,
934 if (i915_reset_in_progress(error)) {
935 /* Non-interruptible callers can't handle -EAGAIN, hence return
936 * -EIO unconditionally for these. */
940 /* Recovery complete, but the reset failed ... */
941 if (i915_terminally_wedged(error))
951 * Compare seqno against outstanding lazy request. Emit a request if they are
955 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
959 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
962 if (seqno == ring->outstanding_lazy_seqno)
963 ret = i915_add_request(ring, NULL);
968 static void fake_irq(unsigned long data)
970 wake_up_process((struct task_struct *)data);
973 static bool missed_irq(struct drm_i915_private *dev_priv,
974 struct intel_ring_buffer *ring)
976 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
979 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
981 if (file_priv == NULL)
984 return !atomic_xchg(&file_priv->rps_wait_boost, true);
988 * __wait_seqno - wait until execution of seqno has finished
989 * @ring: the ring expected to report seqno
991 * @reset_counter: reset sequence associated with the given seqno
992 * @interruptible: do an interruptible wait (normally yes)
993 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
995 * Note: It is of utmost importance that the passed in seqno and reset_counter
996 * values have been read by the caller in an smp safe manner. Where read-side
997 * locks are involved, it is sufficient to read the reset_counter before
998 * unlocking the lock that protects the seqno. For lockless tricks, the
999 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1002 * Returns 0 if the seqno was found within the alloted time. Else returns the
1003 * errno with remaining time filled in timeout argument.
1005 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1006 unsigned reset_counter,
1008 struct timespec *timeout,
1009 struct drm_i915_file_private *file_priv)
1011 struct drm_device *dev = ring->dev;
1012 drm_i915_private_t *dev_priv = dev->dev_private;
1013 const bool irq_test_in_progress =
1014 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1015 struct timespec before, now;
1017 unsigned long timeout_expire;
1020 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1022 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1025 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
1027 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
1028 gen6_rps_boost(dev_priv);
1030 mod_delayed_work(dev_priv->wq,
1031 &file_priv->mm.idle_work,
1032 msecs_to_jiffies(100));
1035 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1038 /* Record current time in case interrupted by signal, or wedged */
1039 trace_i915_gem_request_wait_begin(ring, seqno);
1040 getrawmonotonic(&before);
1042 struct timer_list timer;
1044 prepare_to_wait(&ring->irq_queue, &wait,
1045 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1047 /* We need to check whether any gpu reset happened in between
1048 * the caller grabbing the seqno and now ... */
1049 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1050 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1051 * is truely gone. */
1052 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1058 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1063 if (interruptible && signal_pending(current)) {
1068 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1073 timer.function = NULL;
1074 if (timeout || missed_irq(dev_priv, ring)) {
1075 unsigned long expire;
1077 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1078 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1079 mod_timer(&timer, expire);
1084 if (timer.function) {
1085 del_singleshot_timer_sync(&timer);
1086 destroy_timer_on_stack(&timer);
1089 getrawmonotonic(&now);
1090 trace_i915_gem_request_wait_end(ring, seqno);
1092 if (!irq_test_in_progress)
1093 ring->irq_put(ring);
1095 finish_wait(&ring->irq_queue, &wait);
1098 struct timespec sleep_time = timespec_sub(now, before);
1099 *timeout = timespec_sub(*timeout, sleep_time);
1100 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1101 set_normalized_timespec(timeout, 0, 0);
1108 * Waits for a sequence number to be signaled, and cleans up the
1109 * request and object lists appropriately for that event.
1112 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1114 struct drm_device *dev = ring->dev;
1115 struct drm_i915_private *dev_priv = dev->dev_private;
1116 bool interruptible = dev_priv->mm.interruptible;
1119 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1122 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1126 ret = i915_gem_check_olr(ring, seqno);
1130 return __wait_seqno(ring, seqno,
1131 atomic_read(&dev_priv->gpu_error.reset_counter),
1132 interruptible, NULL, NULL);
1136 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1137 struct intel_ring_buffer *ring)
1139 i915_gem_retire_requests_ring(ring);
1141 /* Manually manage the write flush as we may have not yet
1142 * retired the buffer.
1144 * Note that the last_write_seqno is always the earlier of
1145 * the two (read/write) seqno, so if we haved successfully waited,
1146 * we know we have passed the last write.
1148 obj->last_write_seqno = 0;
1149 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1155 * Ensures that all rendering to the object has completed and the object is
1156 * safe to unbind from the GTT or access from the CPU.
1158 static __must_check int
1159 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1162 struct intel_ring_buffer *ring = obj->ring;
1166 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1170 ret = i915_wait_seqno(ring, seqno);
1174 return i915_gem_object_wait_rendering__tail(obj, ring);
1177 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1178 * as the object state may change during this call.
1180 static __must_check int
1181 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1182 struct drm_i915_file_private *file_priv,
1185 struct drm_device *dev = obj->base.dev;
1186 struct drm_i915_private *dev_priv = dev->dev_private;
1187 struct intel_ring_buffer *ring = obj->ring;
1188 unsigned reset_counter;
1192 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1193 BUG_ON(!dev_priv->mm.interruptible);
1195 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1199 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1203 ret = i915_gem_check_olr(ring, seqno);
1207 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1208 mutex_unlock(&dev->struct_mutex);
1209 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
1210 mutex_lock(&dev->struct_mutex);
1214 return i915_gem_object_wait_rendering__tail(obj, ring);
1218 * Called when user space prepares to use an object with the CPU, either
1219 * through the mmap ioctl's mapping or a GTT mapping.
1222 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1223 struct drm_file *file)
1225 struct drm_i915_gem_set_domain *args = data;
1226 struct drm_i915_gem_object *obj;
1227 uint32_t read_domains = args->read_domains;
1228 uint32_t write_domain = args->write_domain;
1231 /* Only handle setting domains to types used by the CPU. */
1232 if (write_domain & I915_GEM_GPU_DOMAINS)
1235 if (read_domains & I915_GEM_GPU_DOMAINS)
1238 /* Having something in the write domain implies it's in the read
1239 * domain, and only that read domain. Enforce that in the request.
1241 if (write_domain != 0 && read_domains != write_domain)
1244 ret = i915_mutex_lock_interruptible(dev);
1248 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1249 if (&obj->base == NULL) {
1254 /* Try to flush the object off the GPU without holding the lock.
1255 * We will repeat the flush holding the lock in the normal manner
1256 * to catch cases where we are gazumped.
1258 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1264 if (read_domains & I915_GEM_DOMAIN_GTT) {
1265 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1267 /* Silently promote "you're not bound, there was nothing to do"
1268 * to success, since the client was just asking us to
1269 * make sure everything was done.
1274 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1278 drm_gem_object_unreference(&obj->base);
1280 mutex_unlock(&dev->struct_mutex);
1285 * Called when user space has done writes to this buffer
1288 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1289 struct drm_file *file)
1291 struct drm_i915_gem_sw_finish *args = data;
1292 struct drm_i915_gem_object *obj;
1295 ret = i915_mutex_lock_interruptible(dev);
1299 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1300 if (&obj->base == NULL) {
1305 /* Pinned buffers may be scanout, so flush the cache */
1306 if (obj->pin_display)
1307 i915_gem_object_flush_cpu_write_domain(obj, true);
1309 drm_gem_object_unreference(&obj->base);
1311 mutex_unlock(&dev->struct_mutex);
1316 * Maps the contents of an object, returning the address it is mapped
1319 * While the mapping holds a reference on the contents of the object, it doesn't
1320 * imply a ref on the object itself.
1323 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1324 struct drm_file *file)
1326 struct drm_i915_gem_mmap *args = data;
1327 struct drm_gem_object *obj;
1330 obj = drm_gem_object_lookup(dev, file, args->handle);
1334 /* prime objects have no backing filp to GEM mmap
1338 drm_gem_object_unreference_unlocked(obj);
1342 addr = vm_mmap(obj->filp, 0, args->size,
1343 PROT_READ | PROT_WRITE, MAP_SHARED,
1345 drm_gem_object_unreference_unlocked(obj);
1346 if (IS_ERR((void *)addr))
1349 args->addr_ptr = (uint64_t) addr;
1355 * i915_gem_fault - fault a page into the GTT
1356 * vma: VMA in question
1359 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1360 * from userspace. The fault handler takes care of binding the object to
1361 * the GTT (if needed), allocating and programming a fence register (again,
1362 * only if needed based on whether the old reg is still valid or the object
1363 * is tiled) and inserting a new PTE into the faulting process.
1365 * Note that the faulting process may involve evicting existing objects
1366 * from the GTT and/or fence registers to make room. So performance may
1367 * suffer if the GTT working set is large or there are few fence registers
1370 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1372 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1373 struct drm_device *dev = obj->base.dev;
1374 drm_i915_private_t *dev_priv = dev->dev_private;
1375 pgoff_t page_offset;
1378 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1380 intel_runtime_pm_get(dev_priv);
1382 /* We don't use vmf->pgoff since that has the fake offset */
1383 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1386 ret = i915_mutex_lock_interruptible(dev);
1390 trace_i915_gem_object_fault(obj, page_offset, true, write);
1392 /* Try to flush the object off the GPU first without holding the lock.
1393 * Upon reacquiring the lock, we will perform our sanity checks and then
1394 * repeat the flush holding the lock in the normal manner to catch cases
1395 * where we are gazumped.
1397 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1401 /* Access to snoopable pages through the GTT is incoherent. */
1402 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1407 /* Now bind it into the GTT if needed */
1408 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1412 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1416 ret = i915_gem_object_get_fence(obj);
1420 obj->fault_mappable = true;
1422 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1426 /* Finally, remap it using the new GTT offset */
1427 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1429 i915_gem_object_ggtt_unpin(obj);
1431 mutex_unlock(&dev->struct_mutex);
1435 /* If this -EIO is due to a gpu hang, give the reset code a
1436 * chance to clean up the mess. Otherwise return the proper
1438 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1439 ret = VM_FAULT_SIGBUS;
1444 * EAGAIN means the gpu is hung and we'll wait for the error
1445 * handler to reset everything when re-faulting in
1446 * i915_mutex_lock_interruptible.
1453 * EBUSY is ok: this just means that another thread
1454 * already did the job.
1456 ret = VM_FAULT_NOPAGE;
1463 ret = VM_FAULT_SIGBUS;
1466 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1467 ret = VM_FAULT_SIGBUS;
1471 intel_runtime_pm_put(dev_priv);
1475 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1477 struct i915_vma *vma;
1480 * Only the global gtt is relevant for gtt memory mappings, so restrict
1481 * list traversal to objects bound into the global address space. Note
1482 * that the active list should be empty, but better safe than sorry.
1484 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1485 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1486 i915_gem_release_mmap(vma->obj);
1487 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1488 i915_gem_release_mmap(vma->obj);
1492 * i915_gem_release_mmap - remove physical page mappings
1493 * @obj: obj in question
1495 * Preserve the reservation of the mmapping with the DRM core code, but
1496 * relinquish ownership of the pages back to the system.
1498 * It is vital that we remove the page mapping if we have mapped a tiled
1499 * object through the GTT and then lose the fence register due to
1500 * resource pressure. Similarly if the object has been moved out of the
1501 * aperture, than pages mapped into userspace must be revoked. Removing the
1502 * mapping will then trigger a page fault on the next user access, allowing
1503 * fixup by i915_gem_fault().
1506 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1508 if (!obj->fault_mappable)
1511 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
1512 obj->fault_mappable = false;
1516 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1520 if (INTEL_INFO(dev)->gen >= 4 ||
1521 tiling_mode == I915_TILING_NONE)
1524 /* Previous chips need a power-of-two fence region when tiling */
1525 if (INTEL_INFO(dev)->gen == 3)
1526 gtt_size = 1024*1024;
1528 gtt_size = 512*1024;
1530 while (gtt_size < size)
1537 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1538 * @obj: object to check
1540 * Return the required GTT alignment for an object, taking into account
1541 * potential fence register mapping.
1544 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1545 int tiling_mode, bool fenced)
1548 * Minimum alignment is 4k (GTT page size), but might be greater
1549 * if a fence register is needed for the object.
1551 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1552 tiling_mode == I915_TILING_NONE)
1556 * Previous chips need to be aligned to the size of the smallest
1557 * fence register that can contain the object.
1559 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1562 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1564 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1567 if (drm_vma_node_has_offset(&obj->base.vma_node))
1570 dev_priv->mm.shrinker_no_lock_stealing = true;
1572 ret = drm_gem_create_mmap_offset(&obj->base);
1576 /* Badly fragmented mmap space? The only way we can recover
1577 * space is by destroying unwanted objects. We can't randomly release
1578 * mmap_offsets as userspace expects them to be persistent for the
1579 * lifetime of the objects. The closest we can is to release the
1580 * offsets on purgeable objects by truncating it and marking it purged,
1581 * which prevents userspace from ever using that object again.
1583 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1584 ret = drm_gem_create_mmap_offset(&obj->base);
1588 i915_gem_shrink_all(dev_priv);
1589 ret = drm_gem_create_mmap_offset(&obj->base);
1591 dev_priv->mm.shrinker_no_lock_stealing = false;
1596 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1598 drm_gem_free_mmap_offset(&obj->base);
1602 i915_gem_mmap_gtt(struct drm_file *file,
1603 struct drm_device *dev,
1607 struct drm_i915_private *dev_priv = dev->dev_private;
1608 struct drm_i915_gem_object *obj;
1611 ret = i915_mutex_lock_interruptible(dev);
1615 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1616 if (&obj->base == NULL) {
1621 if (obj->base.size > dev_priv->gtt.mappable_end) {
1626 if (obj->madv != I915_MADV_WILLNEED) {
1627 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1632 ret = i915_gem_object_create_mmap_offset(obj);
1636 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1639 drm_gem_object_unreference(&obj->base);
1641 mutex_unlock(&dev->struct_mutex);
1646 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1648 * @data: GTT mapping ioctl data
1649 * @file: GEM object info
1651 * Simply returns the fake offset to userspace so it can mmap it.
1652 * The mmap call will end up in drm_gem_mmap(), which will set things
1653 * up so we can get faults in the handler above.
1655 * The fault handler will take care of binding the object into the GTT
1656 * (since it may have been evicted to make room for something), allocating
1657 * a fence register, and mapping the appropriate aperture address into
1661 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1662 struct drm_file *file)
1664 struct drm_i915_gem_mmap_gtt *args = data;
1666 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1669 /* Immediately discard the backing storage */
1671 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1673 struct inode *inode;
1675 i915_gem_object_free_mmap_offset(obj);
1677 if (obj->base.filp == NULL)
1680 /* Our goal here is to return as much of the memory as
1681 * is possible back to the system as we are called from OOM.
1682 * To do this we must instruct the shmfs to drop all of its
1683 * backing pages, *now*.
1685 inode = file_inode(obj->base.filp);
1686 shmem_truncate_range(inode, 0, (loff_t)-1);
1688 obj->madv = __I915_MADV_PURGED;
1692 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1694 return obj->madv == I915_MADV_DONTNEED;
1698 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1700 struct sg_page_iter sg_iter;
1703 BUG_ON(obj->madv == __I915_MADV_PURGED);
1705 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1707 /* In the event of a disaster, abandon all caches and
1708 * hope for the best.
1710 WARN_ON(ret != -EIO);
1711 i915_gem_clflush_object(obj, true);
1712 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1715 if (i915_gem_object_needs_bit17_swizzle(obj))
1716 i915_gem_object_save_bit_17_swizzle(obj);
1718 if (obj->madv == I915_MADV_DONTNEED)
1721 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1722 struct page *page = sg_page_iter_page(&sg_iter);
1725 set_page_dirty(page);
1727 if (obj->madv == I915_MADV_WILLNEED)
1728 mark_page_accessed(page);
1730 page_cache_release(page);
1734 sg_free_table(obj->pages);
1739 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1741 const struct drm_i915_gem_object_ops *ops = obj->ops;
1743 if (obj->pages == NULL)
1746 if (obj->pages_pin_count)
1749 BUG_ON(i915_gem_obj_bound_any(obj));
1751 /* ->put_pages might need to allocate memory for the bit17 swizzle
1752 * array, hence protect them from being reaped by removing them from gtt
1754 list_del(&obj->global_list);
1756 ops->put_pages(obj);
1759 if (i915_gem_object_is_purgeable(obj))
1760 i915_gem_object_truncate(obj);
1765 static unsigned long
1766 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1767 bool purgeable_only)
1769 struct list_head still_bound_list;
1770 struct drm_i915_gem_object *obj, *next;
1771 unsigned long count = 0;
1773 list_for_each_entry_safe(obj, next,
1774 &dev_priv->mm.unbound_list,
1776 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1777 i915_gem_object_put_pages(obj) == 0) {
1778 count += obj->base.size >> PAGE_SHIFT;
1779 if (count >= target)
1785 * As we may completely rewrite the bound list whilst unbinding
1786 * (due to retiring requests) we have to strictly process only
1787 * one element of the list at the time, and recheck the list
1788 * on every iteration.
1790 INIT_LIST_HEAD(&still_bound_list);
1791 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1792 struct i915_vma *vma, *v;
1794 obj = list_first_entry(&dev_priv->mm.bound_list,
1795 typeof(*obj), global_list);
1796 list_move_tail(&obj->global_list, &still_bound_list);
1798 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1802 * Hold a reference whilst we unbind this object, as we may
1803 * end up waiting for and retiring requests. This might
1804 * release the final reference (held by the active list)
1805 * and result in the object being freed from under us.
1806 * in this object being freed.
1808 * Note 1: Shrinking the bound list is special since only active
1809 * (and hence bound objects) can contain such limbo objects, so
1810 * we don't need special tricks for shrinking the unbound list.
1811 * The only other place where we have to be careful with active
1812 * objects suddenly disappearing due to retiring requests is the
1815 * Note 2: Even though the bound list doesn't hold a reference
1816 * to the object we can safely grab one here: The final object
1817 * unreferencing and the bound_list are both protected by the
1818 * dev->struct_mutex and so we won't ever be able to observe an
1819 * object on the bound_list with a reference count equals 0.
1821 drm_gem_object_reference(&obj->base);
1823 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1824 if (i915_vma_unbind(vma))
1827 if (i915_gem_object_put_pages(obj) == 0)
1828 count += obj->base.size >> PAGE_SHIFT;
1830 drm_gem_object_unreference(&obj->base);
1832 list_splice(&still_bound_list, &dev_priv->mm.bound_list);
1837 static unsigned long
1838 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1840 return __i915_gem_shrink(dev_priv, target, true);
1843 static unsigned long
1844 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1846 struct drm_i915_gem_object *obj, *next;
1849 i915_gem_evict_everything(dev_priv->dev);
1851 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1853 if (i915_gem_object_put_pages(obj) == 0)
1854 freed += obj->base.size >> PAGE_SHIFT;
1860 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1862 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1864 struct address_space *mapping;
1865 struct sg_table *st;
1866 struct scatterlist *sg;
1867 struct sg_page_iter sg_iter;
1869 unsigned long last_pfn = 0; /* suppress gcc warning */
1872 /* Assert that the object is not currently in any GPU domain. As it
1873 * wasn't in the GTT, there shouldn't be any way it could have been in
1876 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1877 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1879 st = kmalloc(sizeof(*st), GFP_KERNEL);
1883 page_count = obj->base.size / PAGE_SIZE;
1884 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1889 /* Get the list of pages out of our struct file. They'll be pinned
1890 * at this point until we release them.
1892 * Fail silently without starting the shrinker
1894 mapping = file_inode(obj->base.filp)->i_mapping;
1895 gfp = mapping_gfp_mask(mapping);
1896 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1897 gfp &= ~(__GFP_IO | __GFP_WAIT);
1900 for (i = 0; i < page_count; i++) {
1901 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1903 i915_gem_purge(dev_priv, page_count);
1904 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1907 /* We've tried hard to allocate the memory by reaping
1908 * our own buffer, now let the real VM do its job and
1909 * go down in flames if truly OOM.
1911 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
1912 gfp |= __GFP_IO | __GFP_WAIT;
1914 i915_gem_shrink_all(dev_priv);
1915 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1919 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1920 gfp &= ~(__GFP_IO | __GFP_WAIT);
1922 #ifdef CONFIG_SWIOTLB
1923 if (swiotlb_nr_tbl()) {
1925 sg_set_page(sg, page, PAGE_SIZE, 0);
1930 if (!i || page_to_pfn(page) != last_pfn + 1) {
1934 sg_set_page(sg, page, PAGE_SIZE, 0);
1936 sg->length += PAGE_SIZE;
1938 last_pfn = page_to_pfn(page);
1940 /* Check that the i965g/gm workaround works. */
1941 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
1943 #ifdef CONFIG_SWIOTLB
1944 if (!swiotlb_nr_tbl())
1949 if (i915_gem_object_needs_bit17_swizzle(obj))
1950 i915_gem_object_do_bit_17_swizzle(obj);
1956 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1957 page_cache_release(sg_page_iter_page(&sg_iter));
1960 return PTR_ERR(page);
1963 /* Ensure that the associated pages are gathered from the backing storage
1964 * and pinned into our object. i915_gem_object_get_pages() may be called
1965 * multiple times before they are released by a single call to
1966 * i915_gem_object_put_pages() - once the pages are no longer referenced
1967 * either as a result of memory pressure (reaping pages under the shrinker)
1968 * or as the object is itself released.
1971 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1973 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1974 const struct drm_i915_gem_object_ops *ops = obj->ops;
1980 if (obj->madv != I915_MADV_WILLNEED) {
1981 DRM_DEBUG("Attempting to obtain a purgeable object\n");
1985 BUG_ON(obj->pages_pin_count);
1987 ret = ops->get_pages(obj);
1991 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
1996 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1997 struct intel_ring_buffer *ring)
1999 struct drm_device *dev = obj->base.dev;
2000 struct drm_i915_private *dev_priv = dev->dev_private;
2001 u32 seqno = intel_ring_get_seqno(ring);
2003 BUG_ON(ring == NULL);
2004 if (obj->ring != ring && obj->last_write_seqno) {
2005 /* Keep the seqno relative to the current ring */
2006 obj->last_write_seqno = seqno;
2010 /* Add a reference if we're newly entering the active list. */
2012 drm_gem_object_reference(&obj->base);
2016 list_move_tail(&obj->ring_list, &ring->active_list);
2018 obj->last_read_seqno = seqno;
2020 if (obj->fenced_gpu_access) {
2021 obj->last_fenced_seqno = seqno;
2023 /* Bump MRU to take account of the delayed flush */
2024 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2025 struct drm_i915_fence_reg *reg;
2027 reg = &dev_priv->fence_regs[obj->fence_reg];
2028 list_move_tail(®->lru_list,
2029 &dev_priv->mm.fence_list);
2034 void i915_vma_move_to_active(struct i915_vma *vma,
2035 struct intel_ring_buffer *ring)
2037 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2038 return i915_gem_object_move_to_active(vma->obj, ring);
2042 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2044 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2045 struct i915_address_space *vm;
2046 struct i915_vma *vma;
2048 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2049 BUG_ON(!obj->active);
2051 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2052 vma = i915_gem_obj_to_vma(obj, vm);
2053 if (vma && !list_empty(&vma->mm_list))
2054 list_move_tail(&vma->mm_list, &vm->inactive_list);
2057 list_del_init(&obj->ring_list);
2060 obj->last_read_seqno = 0;
2061 obj->last_write_seqno = 0;
2062 obj->base.write_domain = 0;
2064 obj->last_fenced_seqno = 0;
2065 obj->fenced_gpu_access = false;
2068 drm_gem_object_unreference(&obj->base);
2070 WARN_ON(i915_verify_lists(dev));
2074 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2076 struct drm_i915_private *dev_priv = dev->dev_private;
2077 struct intel_ring_buffer *ring;
2080 /* Carefully retire all requests without writing to the rings */
2081 for_each_ring(ring, dev_priv, i) {
2082 ret = intel_ring_idle(ring);
2086 i915_gem_retire_requests(dev);
2088 /* Finally reset hw state */
2089 for_each_ring(ring, dev_priv, i) {
2090 intel_ring_init_seqno(ring, seqno);
2092 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2093 ring->sync_seqno[j] = 0;
2099 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2101 struct drm_i915_private *dev_priv = dev->dev_private;
2107 /* HWS page needs to be set less than what we
2108 * will inject to ring
2110 ret = i915_gem_init_seqno(dev, seqno - 1);
2114 /* Carefully set the last_seqno value so that wrap
2115 * detection still works
2117 dev_priv->next_seqno = seqno;
2118 dev_priv->last_seqno = seqno - 1;
2119 if (dev_priv->last_seqno == 0)
2120 dev_priv->last_seqno--;
2126 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2128 struct drm_i915_private *dev_priv = dev->dev_private;
2130 /* reserve 0 for non-seqno */
2131 if (dev_priv->next_seqno == 0) {
2132 int ret = i915_gem_init_seqno(dev, 0);
2136 dev_priv->next_seqno = 1;
2139 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2143 int __i915_add_request(struct intel_ring_buffer *ring,
2144 struct drm_file *file,
2145 struct drm_i915_gem_object *obj,
2148 drm_i915_private_t *dev_priv = ring->dev->dev_private;
2149 struct drm_i915_gem_request *request;
2150 u32 request_ring_position, request_start;
2154 request_start = intel_ring_get_tail(ring);
2156 * Emit any outstanding flushes - execbuf can fail to emit the flush
2157 * after having emitted the batchbuffer command. Hence we need to fix
2158 * things up similar to emitting the lazy request. The difference here
2159 * is that the flush _must_ happen before the next request, no matter
2162 ret = intel_ring_flush_all_caches(ring);
2166 request = ring->preallocated_lazy_request;
2167 if (WARN_ON(request == NULL))
2170 /* Record the position of the start of the request so that
2171 * should we detect the updated seqno part-way through the
2172 * GPU processing the request, we never over-estimate the
2173 * position of the head.
2175 request_ring_position = intel_ring_get_tail(ring);
2177 ret = ring->add_request(ring);
2181 request->seqno = intel_ring_get_seqno(ring);
2182 request->ring = ring;
2183 request->head = request_start;
2184 request->tail = request_ring_position;
2186 /* Whilst this request exists, batch_obj will be on the
2187 * active_list, and so will hold the active reference. Only when this
2188 * request is retired will the the batch_obj be moved onto the
2189 * inactive_list and lose its active reference. Hence we do not need
2190 * to explicitly hold another reference here.
2192 request->batch_obj = obj;
2194 /* Hold a reference to the current context so that we can inspect
2195 * it later in case a hangcheck error event fires.
2197 request->ctx = ring->last_context;
2199 i915_gem_context_reference(request->ctx);
2201 request->emitted_jiffies = jiffies;
2202 was_empty = list_empty(&ring->request_list);
2203 list_add_tail(&request->list, &ring->request_list);
2204 request->file_priv = NULL;
2207 struct drm_i915_file_private *file_priv = file->driver_priv;
2209 spin_lock(&file_priv->mm.lock);
2210 request->file_priv = file_priv;
2211 list_add_tail(&request->client_list,
2212 &file_priv->mm.request_list);
2213 spin_unlock(&file_priv->mm.lock);
2216 trace_i915_gem_request_add(ring, request->seqno);
2217 ring->outstanding_lazy_seqno = 0;
2218 ring->preallocated_lazy_request = NULL;
2220 if (!dev_priv->ums.mm_suspended) {
2221 i915_queue_hangcheck(ring->dev);
2224 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2225 queue_delayed_work(dev_priv->wq,
2226 &dev_priv->mm.retire_work,
2227 round_jiffies_up_relative(HZ));
2228 intel_mark_busy(dev_priv->dev);
2233 *out_seqno = request->seqno;
2238 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2240 struct drm_i915_file_private *file_priv = request->file_priv;
2245 spin_lock(&file_priv->mm.lock);
2246 list_del(&request->client_list);
2247 request->file_priv = NULL;
2248 spin_unlock(&file_priv->mm.lock);
2251 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2252 const struct i915_hw_context *ctx)
2254 unsigned long elapsed;
2256 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2258 if (ctx->hang_stats.banned)
2261 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2262 if (dev_priv->gpu_error.stop_rings == 0 &&
2263 i915_gem_context_is_default(ctx)) {
2264 DRM_ERROR("gpu hanging too fast, banning!\n");
2266 DRM_DEBUG("context hanging too fast, banning!\n");
2275 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2276 struct i915_hw_context *ctx,
2279 struct i915_ctx_hang_stats *hs;
2284 hs = &ctx->hang_stats;
2287 hs->banned = i915_context_is_banned(dev_priv, ctx);
2289 hs->guilty_ts = get_seconds();
2291 hs->batch_pending++;
2295 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2297 list_del(&request->list);
2298 i915_gem_request_remove_from_client(request);
2301 i915_gem_context_unreference(request->ctx);
2306 static struct drm_i915_gem_request *
2307 i915_gem_find_first_non_complete(struct intel_ring_buffer *ring)
2309 struct drm_i915_gem_request *request;
2310 const u32 completed_seqno = ring->get_seqno(ring, false);
2312 list_for_each_entry(request, &ring->request_list, list) {
2313 if (i915_seqno_passed(completed_seqno, request->seqno))
2322 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2323 struct intel_ring_buffer *ring)
2325 struct drm_i915_gem_request *request;
2328 request = i915_gem_find_first_non_complete(ring);
2330 if (request == NULL)
2333 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2335 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2337 list_for_each_entry_continue(request, &ring->request_list, list)
2338 i915_set_reset_status(dev_priv, request->ctx, false);
2341 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2342 struct intel_ring_buffer *ring)
2344 while (!list_empty(&ring->active_list)) {
2345 struct drm_i915_gem_object *obj;
2347 obj = list_first_entry(&ring->active_list,
2348 struct drm_i915_gem_object,
2351 i915_gem_object_move_to_inactive(obj);
2355 * We must free the requests after all the corresponding objects have
2356 * been moved off active lists. Which is the same order as the normal
2357 * retire_requests function does. This is important if object hold
2358 * implicit references on things like e.g. ppgtt address spaces through
2361 while (!list_empty(&ring->request_list)) {
2362 struct drm_i915_gem_request *request;
2364 request = list_first_entry(&ring->request_list,
2365 struct drm_i915_gem_request,
2368 i915_gem_free_request(request);
2372 void i915_gem_restore_fences(struct drm_device *dev)
2374 struct drm_i915_private *dev_priv = dev->dev_private;
2377 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2378 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2381 * Commit delayed tiling changes if we have an object still
2382 * attached to the fence, otherwise just clear the fence.
2385 i915_gem_object_update_fence(reg->obj, reg,
2386 reg->obj->tiling_mode);
2388 i915_gem_write_fence(dev, i, NULL);
2393 void i915_gem_reset(struct drm_device *dev)
2395 struct drm_i915_private *dev_priv = dev->dev_private;
2396 struct intel_ring_buffer *ring;
2400 * Before we free the objects from the requests, we need to inspect
2401 * them for finding the guilty party. As the requests only borrow
2402 * their reference to the objects, the inspection must be done first.
2404 for_each_ring(ring, dev_priv, i)
2405 i915_gem_reset_ring_status(dev_priv, ring);
2407 for_each_ring(ring, dev_priv, i)
2408 i915_gem_reset_ring_cleanup(dev_priv, ring);
2410 i915_gem_cleanup_ringbuffer(dev);
2412 i915_gem_context_reset(dev);
2414 i915_gem_restore_fences(dev);
2418 * This function clears the request list as sequence numbers are passed.
2421 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2425 if (list_empty(&ring->request_list))
2428 WARN_ON(i915_verify_lists(ring->dev));
2430 seqno = ring->get_seqno(ring, true);
2432 /* Move any buffers on the active list that are no longer referenced
2433 * by the ringbuffer to the flushing/inactive lists as appropriate,
2434 * before we free the context associated with the requests.
2436 while (!list_empty(&ring->active_list)) {
2437 struct drm_i915_gem_object *obj;
2439 obj = list_first_entry(&ring->active_list,
2440 struct drm_i915_gem_object,
2443 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2446 i915_gem_object_move_to_inactive(obj);
2450 while (!list_empty(&ring->request_list)) {
2451 struct drm_i915_gem_request *request;
2453 request = list_first_entry(&ring->request_list,
2454 struct drm_i915_gem_request,
2457 if (!i915_seqno_passed(seqno, request->seqno))
2460 trace_i915_gem_request_retire(ring, request->seqno);
2461 /* We know the GPU must have read the request to have
2462 * sent us the seqno + interrupt, so use the position
2463 * of tail of the request to update the last known position
2466 ring->last_retired_head = request->tail;
2468 i915_gem_free_request(request);
2471 if (unlikely(ring->trace_irq_seqno &&
2472 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2473 ring->irq_put(ring);
2474 ring->trace_irq_seqno = 0;
2477 WARN_ON(i915_verify_lists(ring->dev));
2481 i915_gem_retire_requests(struct drm_device *dev)
2483 drm_i915_private_t *dev_priv = dev->dev_private;
2484 struct intel_ring_buffer *ring;
2488 for_each_ring(ring, dev_priv, i) {
2489 i915_gem_retire_requests_ring(ring);
2490 idle &= list_empty(&ring->request_list);
2494 mod_delayed_work(dev_priv->wq,
2495 &dev_priv->mm.idle_work,
2496 msecs_to_jiffies(100));
2502 i915_gem_retire_work_handler(struct work_struct *work)
2504 struct drm_i915_private *dev_priv =
2505 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2506 struct drm_device *dev = dev_priv->dev;
2509 /* Come back later if the device is busy... */
2511 if (mutex_trylock(&dev->struct_mutex)) {
2512 idle = i915_gem_retire_requests(dev);
2513 mutex_unlock(&dev->struct_mutex);
2516 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2517 round_jiffies_up_relative(HZ));
2521 i915_gem_idle_work_handler(struct work_struct *work)
2523 struct drm_i915_private *dev_priv =
2524 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2526 intel_mark_idle(dev_priv->dev);
2530 * Ensures that an object will eventually get non-busy by flushing any required
2531 * write domains, emitting any outstanding lazy request and retiring and
2532 * completed requests.
2535 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2540 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2544 i915_gem_retire_requests_ring(obj->ring);
2551 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2552 * @DRM_IOCTL_ARGS: standard ioctl arguments
2554 * Returns 0 if successful, else an error is returned with the remaining time in
2555 * the timeout parameter.
2556 * -ETIME: object is still busy after timeout
2557 * -ERESTARTSYS: signal interrupted the wait
2558 * -ENONENT: object doesn't exist
2559 * Also possible, but rare:
2560 * -EAGAIN: GPU wedged
2562 * -ENODEV: Internal IRQ fail
2563 * -E?: The add request failed
2565 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2566 * non-zero timeout parameter the wait ioctl will wait for the given number of
2567 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2568 * without holding struct_mutex the object may become re-busied before this
2569 * function completes. A similar but shorter * race condition exists in the busy
2573 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2575 drm_i915_private_t *dev_priv = dev->dev_private;
2576 struct drm_i915_gem_wait *args = data;
2577 struct drm_i915_gem_object *obj;
2578 struct intel_ring_buffer *ring = NULL;
2579 struct timespec timeout_stack, *timeout = NULL;
2580 unsigned reset_counter;
2584 if (args->timeout_ns >= 0) {
2585 timeout_stack = ns_to_timespec(args->timeout_ns);
2586 timeout = &timeout_stack;
2589 ret = i915_mutex_lock_interruptible(dev);
2593 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2594 if (&obj->base == NULL) {
2595 mutex_unlock(&dev->struct_mutex);
2599 /* Need to make sure the object gets inactive eventually. */
2600 ret = i915_gem_object_flush_active(obj);
2605 seqno = obj->last_read_seqno;
2612 /* Do this after OLR check to make sure we make forward progress polling
2613 * on this IOCTL with a 0 timeout (like busy ioctl)
2615 if (!args->timeout_ns) {
2620 drm_gem_object_unreference(&obj->base);
2621 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2622 mutex_unlock(&dev->struct_mutex);
2624 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
2626 args->timeout_ns = timespec_to_ns(timeout);
2630 drm_gem_object_unreference(&obj->base);
2631 mutex_unlock(&dev->struct_mutex);
2636 * i915_gem_object_sync - sync an object to a ring.
2638 * @obj: object which may be in use on another ring.
2639 * @to: ring we wish to use the object on. May be NULL.
2641 * This code is meant to abstract object synchronization with the GPU.
2642 * Calling with NULL implies synchronizing the object with the CPU
2643 * rather than a particular GPU ring.
2645 * Returns 0 if successful, else propagates up the lower layer error.
2648 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2649 struct intel_ring_buffer *to)
2651 struct intel_ring_buffer *from = obj->ring;
2655 if (from == NULL || to == from)
2658 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2659 return i915_gem_object_wait_rendering(obj, false);
2661 idx = intel_ring_sync_index(from, to);
2663 seqno = obj->last_read_seqno;
2664 if (seqno <= from->sync_seqno[idx])
2667 ret = i915_gem_check_olr(obj->ring, seqno);
2671 trace_i915_gem_ring_sync_to(from, to, seqno);
2672 ret = to->sync_to(to, from, seqno);
2674 /* We use last_read_seqno because sync_to()
2675 * might have just caused seqno wrap under
2678 from->sync_seqno[idx] = obj->last_read_seqno;
2683 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2685 u32 old_write_domain, old_read_domains;
2687 /* Force a pagefault for domain tracking on next user access */
2688 i915_gem_release_mmap(obj);
2690 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2693 /* Wait for any direct GTT access to complete */
2696 old_read_domains = obj->base.read_domains;
2697 old_write_domain = obj->base.write_domain;
2699 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2700 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2702 trace_i915_gem_object_change_domain(obj,
2707 int i915_vma_unbind(struct i915_vma *vma)
2709 struct drm_i915_gem_object *obj = vma->obj;
2710 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2713 if (list_empty(&vma->vma_link))
2716 if (!drm_mm_node_allocated(&vma->node)) {
2717 i915_gem_vma_destroy(vma);
2724 BUG_ON(obj->pages == NULL);
2726 ret = i915_gem_object_finish_gpu(obj);
2729 /* Continue on if we fail due to EIO, the GPU is hung so we
2730 * should be safe and we need to cleanup or else we might
2731 * cause memory corruption through use-after-free.
2734 i915_gem_object_finish_gtt(obj);
2736 /* release the fence reg _after_ flushing */
2737 ret = i915_gem_object_put_fence(obj);
2741 trace_i915_vma_unbind(vma);
2743 vma->unbind_vma(vma);
2745 i915_gem_gtt_finish_object(obj);
2747 list_del(&vma->mm_list);
2748 /* Avoid an unnecessary call to unbind on rebind. */
2749 if (i915_is_ggtt(vma->vm))
2750 obj->map_and_fenceable = true;
2752 drm_mm_remove_node(&vma->node);
2753 i915_gem_vma_destroy(vma);
2755 /* Since the unbound list is global, only move to that list if
2756 * no more VMAs exist. */
2757 if (list_empty(&obj->vma_list))
2758 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2760 /* And finally now the object is completely decoupled from this vma,
2761 * we can drop its hold on the backing storage and allow it to be
2762 * reaped by the shrinker.
2764 i915_gem_object_unpin_pages(obj);
2769 int i915_gpu_idle(struct drm_device *dev)
2771 drm_i915_private_t *dev_priv = dev->dev_private;
2772 struct intel_ring_buffer *ring;
2775 /* Flush everything onto the inactive list. */
2776 for_each_ring(ring, dev_priv, i) {
2777 ret = i915_switch_context(ring, NULL, ring->default_context);
2781 ret = intel_ring_idle(ring);
2789 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2790 struct drm_i915_gem_object *obj)
2792 drm_i915_private_t *dev_priv = dev->dev_private;
2794 int fence_pitch_shift;
2796 if (INTEL_INFO(dev)->gen >= 6) {
2797 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2798 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2800 fence_reg = FENCE_REG_965_0;
2801 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2804 fence_reg += reg * 8;
2806 /* To w/a incoherency with non-atomic 64-bit register updates,
2807 * we split the 64-bit update into two 32-bit writes. In order
2808 * for a partial fence not to be evaluated between writes, we
2809 * precede the update with write to turn off the fence register,
2810 * and only enable the fence as the last step.
2812 * For extra levels of paranoia, we make sure each step lands
2813 * before applying the next step.
2815 I915_WRITE(fence_reg, 0);
2816 POSTING_READ(fence_reg);
2819 u32 size = i915_gem_obj_ggtt_size(obj);
2822 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2824 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2825 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2826 if (obj->tiling_mode == I915_TILING_Y)
2827 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2828 val |= I965_FENCE_REG_VALID;
2830 I915_WRITE(fence_reg + 4, val >> 32);
2831 POSTING_READ(fence_reg + 4);
2833 I915_WRITE(fence_reg + 0, val);
2834 POSTING_READ(fence_reg);
2836 I915_WRITE(fence_reg + 4, 0);
2837 POSTING_READ(fence_reg + 4);
2841 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2842 struct drm_i915_gem_object *obj)
2844 drm_i915_private_t *dev_priv = dev->dev_private;
2848 u32 size = i915_gem_obj_ggtt_size(obj);
2852 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2853 (size & -size) != size ||
2854 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2855 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2856 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2858 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2863 /* Note: pitch better be a power of two tile widths */
2864 pitch_val = obj->stride / tile_width;
2865 pitch_val = ffs(pitch_val) - 1;
2867 val = i915_gem_obj_ggtt_offset(obj);
2868 if (obj->tiling_mode == I915_TILING_Y)
2869 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2870 val |= I915_FENCE_SIZE_BITS(size);
2871 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2872 val |= I830_FENCE_REG_VALID;
2877 reg = FENCE_REG_830_0 + reg * 4;
2879 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2881 I915_WRITE(reg, val);
2885 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2886 struct drm_i915_gem_object *obj)
2888 drm_i915_private_t *dev_priv = dev->dev_private;
2892 u32 size = i915_gem_obj_ggtt_size(obj);
2895 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
2896 (size & -size) != size ||
2897 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2898 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2899 i915_gem_obj_ggtt_offset(obj), size);
2901 pitch_val = obj->stride / 128;
2902 pitch_val = ffs(pitch_val) - 1;
2904 val = i915_gem_obj_ggtt_offset(obj);
2905 if (obj->tiling_mode == I915_TILING_Y)
2906 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2907 val |= I830_FENCE_SIZE_BITS(size);
2908 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2909 val |= I830_FENCE_REG_VALID;
2913 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2914 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2917 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2919 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2922 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2923 struct drm_i915_gem_object *obj)
2925 struct drm_i915_private *dev_priv = dev->dev_private;
2927 /* Ensure that all CPU reads are completed before installing a fence
2928 * and all writes before removing the fence.
2930 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2933 WARN(obj && (!obj->stride || !obj->tiling_mode),
2934 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2935 obj->stride, obj->tiling_mode);
2937 switch (INTEL_INFO(dev)->gen) {
2942 case 4: i965_write_fence_reg(dev, reg, obj); break;
2943 case 3: i915_write_fence_reg(dev, reg, obj); break;
2944 case 2: i830_write_fence_reg(dev, reg, obj); break;
2948 /* And similarly be paranoid that no direct access to this region
2949 * is reordered to before the fence is installed.
2951 if (i915_gem_object_needs_mb(obj))
2955 static inline int fence_number(struct drm_i915_private *dev_priv,
2956 struct drm_i915_fence_reg *fence)
2958 return fence - dev_priv->fence_regs;
2961 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2962 struct drm_i915_fence_reg *fence,
2965 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2966 int reg = fence_number(dev_priv, fence);
2968 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2971 obj->fence_reg = reg;
2973 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2975 obj->fence_reg = I915_FENCE_REG_NONE;
2977 list_del_init(&fence->lru_list);
2979 obj->fence_dirty = false;
2983 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
2985 if (obj->last_fenced_seqno) {
2986 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2990 obj->last_fenced_seqno = 0;
2993 obj->fenced_gpu_access = false;
2998 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3000 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3001 struct drm_i915_fence_reg *fence;
3004 ret = i915_gem_object_wait_fence(obj);
3008 if (obj->fence_reg == I915_FENCE_REG_NONE)
3011 fence = &dev_priv->fence_regs[obj->fence_reg];
3013 i915_gem_object_fence_lost(obj);
3014 i915_gem_object_update_fence(obj, fence, false);
3019 static struct drm_i915_fence_reg *
3020 i915_find_fence_reg(struct drm_device *dev)
3022 struct drm_i915_private *dev_priv = dev->dev_private;
3023 struct drm_i915_fence_reg *reg, *avail;
3026 /* First try to find a free reg */
3028 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3029 reg = &dev_priv->fence_regs[i];
3033 if (!reg->pin_count)
3040 /* None available, try to steal one or wait for a user to finish */
3041 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3049 /* Wait for completion of pending flips which consume fences */
3050 if (intel_has_pending_fb_unpin(dev))
3051 return ERR_PTR(-EAGAIN);
3053 return ERR_PTR(-EDEADLK);
3057 * i915_gem_object_get_fence - set up fencing for an object
3058 * @obj: object to map through a fence reg
3060 * When mapping objects through the GTT, userspace wants to be able to write
3061 * to them without having to worry about swizzling if the object is tiled.
3062 * This function walks the fence regs looking for a free one for @obj,
3063 * stealing one if it can't find any.
3065 * It then sets up the reg based on the object's properties: address, pitch
3066 * and tiling format.
3068 * For an untiled surface, this removes any existing fence.
3071 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3073 struct drm_device *dev = obj->base.dev;
3074 struct drm_i915_private *dev_priv = dev->dev_private;
3075 bool enable = obj->tiling_mode != I915_TILING_NONE;
3076 struct drm_i915_fence_reg *reg;
3079 /* Have we updated the tiling parameters upon the object and so
3080 * will need to serialise the write to the associated fence register?
3082 if (obj->fence_dirty) {
3083 ret = i915_gem_object_wait_fence(obj);
3088 /* Just update our place in the LRU if our fence is getting reused. */
3089 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3090 reg = &dev_priv->fence_regs[obj->fence_reg];
3091 if (!obj->fence_dirty) {
3092 list_move_tail(®->lru_list,
3093 &dev_priv->mm.fence_list);
3096 } else if (enable) {
3097 reg = i915_find_fence_reg(dev);
3099 return PTR_ERR(reg);
3102 struct drm_i915_gem_object *old = reg->obj;
3104 ret = i915_gem_object_wait_fence(old);
3108 i915_gem_object_fence_lost(old);
3113 i915_gem_object_update_fence(obj, reg, enable);
3118 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3119 struct drm_mm_node *gtt_space,
3120 unsigned long cache_level)
3122 struct drm_mm_node *other;
3124 /* On non-LLC machines we have to be careful when putting differing
3125 * types of snoopable memory together to avoid the prefetcher
3126 * crossing memory domains and dying.
3131 if (!drm_mm_node_allocated(gtt_space))
3134 if (list_empty(>t_space->node_list))
3137 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3138 if (other->allocated && !other->hole_follows && other->color != cache_level)
3141 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3142 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3148 static void i915_gem_verify_gtt(struct drm_device *dev)
3151 struct drm_i915_private *dev_priv = dev->dev_private;
3152 struct drm_i915_gem_object *obj;
3155 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3156 if (obj->gtt_space == NULL) {
3157 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3162 if (obj->cache_level != obj->gtt_space->color) {
3163 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3164 i915_gem_obj_ggtt_offset(obj),
3165 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3167 obj->gtt_space->color);
3172 if (!i915_gem_valid_gtt_space(dev,
3174 obj->cache_level)) {
3175 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3176 i915_gem_obj_ggtt_offset(obj),
3177 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3189 * Finds free space in the GTT aperture and binds the object there.
3191 static struct i915_vma *
3192 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3193 struct i915_address_space *vm,
3197 struct drm_device *dev = obj->base.dev;
3198 drm_i915_private_t *dev_priv = dev->dev_private;
3199 u32 size, fence_size, fence_alignment, unfenced_alignment;
3201 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3202 struct i915_vma *vma;
3205 fence_size = i915_gem_get_gtt_size(dev,
3208 fence_alignment = i915_gem_get_gtt_alignment(dev,
3210 obj->tiling_mode, true);
3211 unfenced_alignment =
3212 i915_gem_get_gtt_alignment(dev,
3214 obj->tiling_mode, false);
3217 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3219 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3220 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3221 return ERR_PTR(-EINVAL);
3224 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3226 /* If the object is bigger than the entire aperture, reject it early
3227 * before evicting everything in a vain attempt to find space.
3229 if (obj->base.size > gtt_max) {
3230 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3232 flags & PIN_MAPPABLE ? "mappable" : "total",
3234 return ERR_PTR(-E2BIG);
3237 ret = i915_gem_object_get_pages(obj);
3239 return ERR_PTR(ret);
3241 i915_gem_object_pin_pages(obj);
3243 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3248 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3250 obj->cache_level, 0, gtt_max,
3251 DRM_MM_SEARCH_DEFAULT);
3253 ret = i915_gem_evict_something(dev, vm, size, alignment,
3254 obj->cache_level, flags);
3260 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3261 obj->cache_level))) {
3263 goto err_remove_node;
3266 ret = i915_gem_gtt_prepare_object(obj);
3268 goto err_remove_node;
3270 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3271 list_add_tail(&vma->mm_list, &vm->inactive_list);
3273 if (i915_is_ggtt(vm)) {
3274 bool mappable, fenceable;
3276 fenceable = (vma->node.size == fence_size &&
3277 (vma->node.start & (fence_alignment - 1)) == 0);
3279 mappable = (vma->node.start + obj->base.size <=
3280 dev_priv->gtt.mappable_end);
3282 obj->map_and_fenceable = mappable && fenceable;
3285 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3287 trace_i915_vma_bind(vma, flags);
3288 i915_gem_verify_gtt(dev);
3292 drm_mm_remove_node(&vma->node);
3294 i915_gem_vma_destroy(vma);
3297 i915_gem_object_unpin_pages(obj);
3302 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3305 /* If we don't have a page list set up, then we're not pinned
3306 * to GPU, and we can ignore the cache flush because it'll happen
3307 * again at bind time.
3309 if (obj->pages == NULL)
3313 * Stolen memory is always coherent with the GPU as it is explicitly
3314 * marked as wc by the system, or the system is cache-coherent.
3319 /* If the GPU is snooping the contents of the CPU cache,
3320 * we do not need to manually clear the CPU cache lines. However,
3321 * the caches are only snooped when the render cache is
3322 * flushed/invalidated. As we always have to emit invalidations
3323 * and flushes when moving into and out of the RENDER domain, correct
3324 * snooping behaviour occurs naturally as the result of our domain
3327 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3330 trace_i915_gem_object_clflush(obj);
3331 drm_clflush_sg(obj->pages);
3336 /** Flushes the GTT write domain for the object if it's dirty. */
3338 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3340 uint32_t old_write_domain;
3342 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3345 /* No actual flushing is required for the GTT write domain. Writes
3346 * to it immediately go to main memory as far as we know, so there's
3347 * no chipset flush. It also doesn't land in render cache.
3349 * However, we do have to enforce the order so that all writes through
3350 * the GTT land before any writes to the device, such as updates to
3355 old_write_domain = obj->base.write_domain;
3356 obj->base.write_domain = 0;
3358 trace_i915_gem_object_change_domain(obj,
3359 obj->base.read_domains,
3363 /** Flushes the CPU write domain for the object if it's dirty. */
3365 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3368 uint32_t old_write_domain;
3370 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3373 if (i915_gem_clflush_object(obj, force))
3374 i915_gem_chipset_flush(obj->base.dev);
3376 old_write_domain = obj->base.write_domain;
3377 obj->base.write_domain = 0;
3379 trace_i915_gem_object_change_domain(obj,
3380 obj->base.read_domains,
3385 * Moves a single object to the GTT read, and possibly write domain.
3387 * This function returns when the move is complete, including waiting on
3391 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3393 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3394 uint32_t old_write_domain, old_read_domains;
3397 /* Not valid to be called on unbound objects. */
3398 if (!i915_gem_obj_bound_any(obj))
3401 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3404 ret = i915_gem_object_wait_rendering(obj, !write);
3408 i915_gem_object_flush_cpu_write_domain(obj, false);
3410 /* Serialise direct access to this object with the barriers for
3411 * coherent writes from the GPU, by effectively invalidating the
3412 * GTT domain upon first access.
3414 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3417 old_write_domain = obj->base.write_domain;
3418 old_read_domains = obj->base.read_domains;
3420 /* It should now be out of any other write domains, and we can update
3421 * the domain values for our changes.
3423 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3424 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3426 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3427 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3431 trace_i915_gem_object_change_domain(obj,
3435 /* And bump the LRU for this access */
3436 if (i915_gem_object_is_inactive(obj)) {
3437 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3439 list_move_tail(&vma->mm_list,
3440 &dev_priv->gtt.base.inactive_list);
3447 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3448 enum i915_cache_level cache_level)
3450 struct drm_device *dev = obj->base.dev;
3451 struct i915_vma *vma;
3454 if (obj->cache_level == cache_level)
3457 if (i915_gem_obj_is_pinned(obj)) {
3458 DRM_DEBUG("can not change the cache level of pinned objects\n");
3462 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3463 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3464 ret = i915_vma_unbind(vma);
3472 if (i915_gem_obj_bound_any(obj)) {
3473 ret = i915_gem_object_finish_gpu(obj);
3477 i915_gem_object_finish_gtt(obj);
3479 /* Before SandyBridge, you could not use tiling or fence
3480 * registers with snooped memory, so relinquish any fences
3481 * currently pointing to our region in the aperture.
3483 if (INTEL_INFO(dev)->gen < 6) {
3484 ret = i915_gem_object_put_fence(obj);
3489 list_for_each_entry(vma, &obj->vma_list, vma_link)
3490 vma->bind_vma(vma, cache_level, 0);
3493 list_for_each_entry(vma, &obj->vma_list, vma_link)
3494 vma->node.color = cache_level;
3495 obj->cache_level = cache_level;
3497 if (cpu_write_needs_clflush(obj)) {
3498 u32 old_read_domains, old_write_domain;
3500 /* If we're coming from LLC cached, then we haven't
3501 * actually been tracking whether the data is in the
3502 * CPU cache or not, since we only allow one bit set
3503 * in obj->write_domain and have been skipping the clflushes.
3504 * Just set it to the CPU cache for now.
3506 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3508 old_read_domains = obj->base.read_domains;
3509 old_write_domain = obj->base.write_domain;
3511 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3512 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3514 trace_i915_gem_object_change_domain(obj,
3519 i915_gem_verify_gtt(dev);
3523 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3524 struct drm_file *file)
3526 struct drm_i915_gem_caching *args = data;
3527 struct drm_i915_gem_object *obj;
3530 ret = i915_mutex_lock_interruptible(dev);
3534 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3535 if (&obj->base == NULL) {
3540 switch (obj->cache_level) {
3541 case I915_CACHE_LLC:
3542 case I915_CACHE_L3_LLC:
3543 args->caching = I915_CACHING_CACHED;
3547 args->caching = I915_CACHING_DISPLAY;
3551 args->caching = I915_CACHING_NONE;
3555 drm_gem_object_unreference(&obj->base);
3557 mutex_unlock(&dev->struct_mutex);
3561 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3562 struct drm_file *file)
3564 struct drm_i915_gem_caching *args = data;
3565 struct drm_i915_gem_object *obj;
3566 enum i915_cache_level level;
3569 switch (args->caching) {
3570 case I915_CACHING_NONE:
3571 level = I915_CACHE_NONE;
3573 case I915_CACHING_CACHED:
3574 level = I915_CACHE_LLC;
3576 case I915_CACHING_DISPLAY:
3577 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3583 ret = i915_mutex_lock_interruptible(dev);
3587 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3588 if (&obj->base == NULL) {
3593 ret = i915_gem_object_set_cache_level(obj, level);
3595 drm_gem_object_unreference(&obj->base);
3597 mutex_unlock(&dev->struct_mutex);
3601 static bool is_pin_display(struct drm_i915_gem_object *obj)
3603 /* There are 3 sources that pin objects:
3604 * 1. The display engine (scanouts, sprites, cursors);
3605 * 2. Reservations for execbuffer;
3608 * We can ignore reservations as we hold the struct_mutex and
3609 * are only called outside of the reservation path. The user
3610 * can only increment pin_count once, and so if after
3611 * subtracting the potential reference by the user, any pin_count
3612 * remains, it must be due to another use by the display engine.
3614 return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
3618 * Prepare buffer for display plane (scanout, cursors, etc).
3619 * Can be called from an uninterruptible phase (modesetting) and allows
3620 * any flushes to be pipelined (for pageflips).
3623 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3625 struct intel_ring_buffer *pipelined)
3627 u32 old_read_domains, old_write_domain;
3630 if (pipelined != obj->ring) {
3631 ret = i915_gem_object_sync(obj, pipelined);
3636 /* Mark the pin_display early so that we account for the
3637 * display coherency whilst setting up the cache domains.
3639 obj->pin_display = true;
3641 /* The display engine is not coherent with the LLC cache on gen6. As
3642 * a result, we make sure that the pinning that is about to occur is
3643 * done with uncached PTEs. This is lowest common denominator for all
3646 * However for gen6+, we could do better by using the GFDT bit instead
3647 * of uncaching, which would allow us to flush all the LLC-cached data
3648 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3650 ret = i915_gem_object_set_cache_level(obj,
3651 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3653 goto err_unpin_display;
3655 /* As the user may map the buffer once pinned in the display plane
3656 * (e.g. libkms for the bootup splash), we have to ensure that we
3657 * always use map_and_fenceable for all scanout buffers.
3659 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
3661 goto err_unpin_display;
3663 i915_gem_object_flush_cpu_write_domain(obj, true);
3665 old_write_domain = obj->base.write_domain;
3666 old_read_domains = obj->base.read_domains;
3668 /* It should now be out of any other write domains, and we can update
3669 * the domain values for our changes.
3671 obj->base.write_domain = 0;
3672 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3674 trace_i915_gem_object_change_domain(obj,
3681 obj->pin_display = is_pin_display(obj);
3686 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3688 i915_gem_object_ggtt_unpin(obj);
3689 obj->pin_display = is_pin_display(obj);
3693 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3697 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3700 ret = i915_gem_object_wait_rendering(obj, false);
3704 /* Ensure that we invalidate the GPU's caches and TLBs. */
3705 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3710 * Moves a single object to the CPU read, and possibly write domain.
3712 * This function returns when the move is complete, including waiting on
3716 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3718 uint32_t old_write_domain, old_read_domains;
3721 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3724 ret = i915_gem_object_wait_rendering(obj, !write);
3728 i915_gem_object_flush_gtt_write_domain(obj);
3730 old_write_domain = obj->base.write_domain;
3731 old_read_domains = obj->base.read_domains;
3733 /* Flush the CPU cache if it's still invalid. */
3734 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3735 i915_gem_clflush_object(obj, false);
3737 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3740 /* It should now be out of any other write domains, and we can update
3741 * the domain values for our changes.
3743 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3745 /* If we're writing through the CPU, then the GPU read domains will
3746 * need to be invalidated at next use.
3749 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3750 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3753 trace_i915_gem_object_change_domain(obj,
3760 /* Throttle our rendering by waiting until the ring has completed our requests
3761 * emitted over 20 msec ago.
3763 * Note that if we were to use the current jiffies each time around the loop,
3764 * we wouldn't escape the function with any frames outstanding if the time to
3765 * render a frame was over 20ms.
3767 * This should get us reasonable parallelism between CPU and GPU but also
3768 * relatively low latency when blocking on a particular request to finish.
3771 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3773 struct drm_i915_private *dev_priv = dev->dev_private;
3774 struct drm_i915_file_private *file_priv = file->driver_priv;
3775 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3776 struct drm_i915_gem_request *request;
3777 struct intel_ring_buffer *ring = NULL;
3778 unsigned reset_counter;
3782 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3786 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3790 spin_lock(&file_priv->mm.lock);
3791 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3792 if (time_after_eq(request->emitted_jiffies, recent_enough))
3795 ring = request->ring;
3796 seqno = request->seqno;
3798 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3799 spin_unlock(&file_priv->mm.lock);
3804 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
3806 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3812 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3813 struct i915_address_space *vm,
3817 struct i915_vma *vma;
3820 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
3823 vma = i915_gem_obj_to_vma(obj, vm);
3825 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3829 vma->node.start & (alignment - 1)) ||
3830 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
3831 WARN(vma->pin_count,
3832 "bo is already pinned with incorrect alignment:"
3833 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3834 " obj->map_and_fenceable=%d\n",
3835 i915_gem_obj_offset(obj, vm), alignment,
3836 flags & PIN_MAPPABLE,
3837 obj->map_and_fenceable);
3838 ret = i915_vma_unbind(vma);
3844 if (!i915_gem_obj_bound(obj, vm)) {
3846 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
3848 return PTR_ERR(vma);
3851 vma = i915_gem_obj_to_vma(obj, vm);
3853 vma->bind_vma(vma, obj->cache_level,
3854 flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
3856 i915_gem_obj_to_vma(obj, vm)->pin_count++;
3857 if (flags & PIN_MAPPABLE)
3858 obj->pin_mappable |= true;
3864 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
3866 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3869 BUG_ON(vma->pin_count == 0);
3870 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3872 if (--vma->pin_count == 0)
3873 obj->pin_mappable = false;
3877 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3878 struct drm_file *file)
3880 struct drm_i915_gem_pin *args = data;
3881 struct drm_i915_gem_object *obj;
3884 if (INTEL_INFO(dev)->gen >= 6)
3887 ret = i915_mutex_lock_interruptible(dev);
3891 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3892 if (&obj->base == NULL) {
3897 if (obj->madv != I915_MADV_WILLNEED) {
3898 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
3903 if (obj->pin_filp != NULL && obj->pin_filp != file) {
3904 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
3910 if (obj->user_pin_count == ULONG_MAX) {
3915 if (obj->user_pin_count == 0) {
3916 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
3921 obj->user_pin_count++;
3922 obj->pin_filp = file;
3924 args->offset = i915_gem_obj_ggtt_offset(obj);
3926 drm_gem_object_unreference(&obj->base);
3928 mutex_unlock(&dev->struct_mutex);
3933 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3934 struct drm_file *file)
3936 struct drm_i915_gem_pin *args = data;
3937 struct drm_i915_gem_object *obj;
3940 ret = i915_mutex_lock_interruptible(dev);
3944 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3945 if (&obj->base == NULL) {
3950 if (obj->pin_filp != file) {
3951 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3956 obj->user_pin_count--;
3957 if (obj->user_pin_count == 0) {
3958 obj->pin_filp = NULL;
3959 i915_gem_object_ggtt_unpin(obj);
3963 drm_gem_object_unreference(&obj->base);
3965 mutex_unlock(&dev->struct_mutex);
3970 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3971 struct drm_file *file)
3973 struct drm_i915_gem_busy *args = data;
3974 struct drm_i915_gem_object *obj;
3977 ret = i915_mutex_lock_interruptible(dev);
3981 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3982 if (&obj->base == NULL) {
3987 /* Count all active objects as busy, even if they are currently not used
3988 * by the gpu. Users of this interface expect objects to eventually
3989 * become non-busy without any further actions, therefore emit any
3990 * necessary flushes here.
3992 ret = i915_gem_object_flush_active(obj);
3994 args->busy = obj->active;
3996 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3997 args->busy |= intel_ring_flag(obj->ring) << 16;
4000 drm_gem_object_unreference(&obj->base);
4002 mutex_unlock(&dev->struct_mutex);
4007 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4008 struct drm_file *file_priv)
4010 return i915_gem_ring_throttle(dev, file_priv);
4014 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4015 struct drm_file *file_priv)
4017 struct drm_i915_gem_madvise *args = data;
4018 struct drm_i915_gem_object *obj;
4021 switch (args->madv) {
4022 case I915_MADV_DONTNEED:
4023 case I915_MADV_WILLNEED:
4029 ret = i915_mutex_lock_interruptible(dev);
4033 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4034 if (&obj->base == NULL) {
4039 if (i915_gem_obj_is_pinned(obj)) {
4044 if (obj->madv != __I915_MADV_PURGED)
4045 obj->madv = args->madv;
4047 /* if the object is no longer attached, discard its backing storage */
4048 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4049 i915_gem_object_truncate(obj);
4051 args->retained = obj->madv != __I915_MADV_PURGED;
4054 drm_gem_object_unreference(&obj->base);
4056 mutex_unlock(&dev->struct_mutex);
4060 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4061 const struct drm_i915_gem_object_ops *ops)
4063 INIT_LIST_HEAD(&obj->global_list);
4064 INIT_LIST_HEAD(&obj->ring_list);
4065 INIT_LIST_HEAD(&obj->obj_exec_link);
4066 INIT_LIST_HEAD(&obj->vma_list);
4070 obj->fence_reg = I915_FENCE_REG_NONE;
4071 obj->madv = I915_MADV_WILLNEED;
4072 /* Avoid an unnecessary call to unbind on the first bind. */
4073 obj->map_and_fenceable = true;
4075 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4078 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4079 .get_pages = i915_gem_object_get_pages_gtt,
4080 .put_pages = i915_gem_object_put_pages_gtt,
4083 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4086 struct drm_i915_gem_object *obj;
4087 struct address_space *mapping;
4090 obj = i915_gem_object_alloc(dev);
4094 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4095 i915_gem_object_free(obj);
4099 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4100 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4101 /* 965gm cannot relocate objects above 4GiB. */
4102 mask &= ~__GFP_HIGHMEM;
4103 mask |= __GFP_DMA32;
4106 mapping = file_inode(obj->base.filp)->i_mapping;
4107 mapping_set_gfp_mask(mapping, mask);
4109 i915_gem_object_init(obj, &i915_gem_object_ops);
4111 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4112 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4115 /* On some devices, we can have the GPU use the LLC (the CPU
4116 * cache) for about a 10% performance improvement
4117 * compared to uncached. Graphics requests other than
4118 * display scanout are coherent with the CPU in
4119 * accessing this cache. This means in this mode we
4120 * don't need to clflush on the CPU side, and on the
4121 * GPU side we only need to flush internal caches to
4122 * get data visible to the CPU.
4124 * However, we maintain the display planes as UC, and so
4125 * need to rebind when first used as such.
4127 obj->cache_level = I915_CACHE_LLC;
4129 obj->cache_level = I915_CACHE_NONE;
4131 trace_i915_gem_object_create(obj);
4136 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4138 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4139 struct drm_device *dev = obj->base.dev;
4140 drm_i915_private_t *dev_priv = dev->dev_private;
4141 struct i915_vma *vma, *next;
4143 intel_runtime_pm_get(dev_priv);
4145 trace_i915_gem_object_destroy(obj);
4148 i915_gem_detach_phys_object(dev, obj);
4150 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4154 ret = i915_vma_unbind(vma);
4155 if (WARN_ON(ret == -ERESTARTSYS)) {
4156 bool was_interruptible;
4158 was_interruptible = dev_priv->mm.interruptible;
4159 dev_priv->mm.interruptible = false;
4161 WARN_ON(i915_vma_unbind(vma));
4163 dev_priv->mm.interruptible = was_interruptible;
4167 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4168 * before progressing. */
4170 i915_gem_object_unpin_pages(obj);
4172 if (WARN_ON(obj->pages_pin_count))
4173 obj->pages_pin_count = 0;
4174 i915_gem_object_put_pages(obj);
4175 i915_gem_object_free_mmap_offset(obj);
4176 i915_gem_object_release_stolen(obj);
4180 if (obj->base.import_attach)
4181 drm_prime_gem_destroy(&obj->base, NULL);
4183 drm_gem_object_release(&obj->base);
4184 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4187 i915_gem_object_free(obj);
4189 intel_runtime_pm_put(dev_priv);
4192 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4193 struct i915_address_space *vm)
4195 struct i915_vma *vma;
4196 list_for_each_entry(vma, &obj->vma_list, vma_link)
4203 void i915_gem_vma_destroy(struct i915_vma *vma)
4205 WARN_ON(vma->node.allocated);
4207 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4208 if (!list_empty(&vma->exec_list))
4211 list_del(&vma->vma_link);
4217 i915_gem_suspend(struct drm_device *dev)
4219 drm_i915_private_t *dev_priv = dev->dev_private;
4222 mutex_lock(&dev->struct_mutex);
4223 if (dev_priv->ums.mm_suspended)
4226 ret = i915_gpu_idle(dev);
4230 i915_gem_retire_requests(dev);
4232 /* Under UMS, be paranoid and evict. */
4233 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4234 i915_gem_evict_everything(dev);
4236 i915_kernel_lost_context(dev);
4237 i915_gem_cleanup_ringbuffer(dev);
4239 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4240 * We need to replace this with a semaphore, or something.
4241 * And not confound ums.mm_suspended!
4243 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4245 mutex_unlock(&dev->struct_mutex);
4247 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4248 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4249 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
4254 mutex_unlock(&dev->struct_mutex);
4258 int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4260 struct drm_device *dev = ring->dev;
4261 drm_i915_private_t *dev_priv = dev->dev_private;
4262 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4263 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4266 if (!HAS_L3_DPF(dev) || !remap_info)
4269 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4274 * Note: We do not worry about the concurrent register cacheline hang
4275 * here because no other code should access these registers other than
4276 * at initialization time.
4278 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4279 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4280 intel_ring_emit(ring, reg_base + i);
4281 intel_ring_emit(ring, remap_info[i/4]);
4284 intel_ring_advance(ring);
4289 void i915_gem_init_swizzling(struct drm_device *dev)
4291 drm_i915_private_t *dev_priv = dev->dev_private;
4293 if (INTEL_INFO(dev)->gen < 5 ||
4294 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4297 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4298 DISP_TILE_SURFACE_SWIZZLING);
4303 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4305 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4306 else if (IS_GEN7(dev))
4307 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4308 else if (IS_GEN8(dev))
4309 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4315 intel_enable_blt(struct drm_device *dev)
4320 /* The blitter was dysfunctional on early prototypes */
4321 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4322 DRM_INFO("BLT not supported on this pre-production hardware;"
4323 " graphics performance will be degraded.\n");
4330 static int i915_gem_init_rings(struct drm_device *dev)
4332 struct drm_i915_private *dev_priv = dev->dev_private;
4335 ret = intel_init_render_ring_buffer(dev);
4340 ret = intel_init_bsd_ring_buffer(dev);
4342 goto cleanup_render_ring;
4345 if (intel_enable_blt(dev)) {
4346 ret = intel_init_blt_ring_buffer(dev);
4348 goto cleanup_bsd_ring;
4351 if (HAS_VEBOX(dev)) {
4352 ret = intel_init_vebox_ring_buffer(dev);
4354 goto cleanup_blt_ring;
4358 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4360 goto cleanup_vebox_ring;
4365 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4367 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4369 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4370 cleanup_render_ring:
4371 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4377 i915_gem_init_hw(struct drm_device *dev)
4379 drm_i915_private_t *dev_priv = dev->dev_private;
4382 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4385 if (dev_priv->ellc_size)
4386 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4388 if (IS_HASWELL(dev))
4389 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4390 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4392 if (HAS_PCH_NOP(dev)) {
4393 if (IS_IVYBRIDGE(dev)) {
4394 u32 temp = I915_READ(GEN7_MSG_CTL);
4395 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4396 I915_WRITE(GEN7_MSG_CTL, temp);
4397 } else if (INTEL_INFO(dev)->gen >= 7) {
4398 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4399 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4400 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4404 i915_gem_init_swizzling(dev);
4406 ret = i915_gem_init_rings(dev);
4410 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4411 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4414 * XXX: Contexts should only be initialized once. Doing a switch to the
4415 * default context switch however is something we'd like to do after
4416 * reset or thaw (the latter may not actually be necessary for HW, but
4417 * goes with our code better). Context switching requires rings (for
4418 * the do_switch), but before enabling PPGTT. So don't move this.
4420 ret = i915_gem_context_enable(dev_priv);
4422 DRM_ERROR("Context enable failed %d\n", ret);
4429 i915_gem_cleanup_ringbuffer(dev);
4433 int i915_gem_init(struct drm_device *dev)
4435 struct drm_i915_private *dev_priv = dev->dev_private;
4438 mutex_lock(&dev->struct_mutex);
4440 if (IS_VALLEYVIEW(dev)) {
4441 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4442 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4443 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4444 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4447 i915_gem_init_global_gtt(dev);
4449 ret = i915_gem_context_init(dev);
4451 mutex_unlock(&dev->struct_mutex);
4455 ret = i915_gem_init_hw(dev);
4456 mutex_unlock(&dev->struct_mutex);
4458 WARN_ON(dev_priv->mm.aliasing_ppgtt);
4459 i915_gem_context_fini(dev);
4460 drm_mm_takedown(&dev_priv->gtt.base.mm);
4464 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4465 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4466 dev_priv->dri1.allow_batchbuffer = 1;
4471 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4473 drm_i915_private_t *dev_priv = dev->dev_private;
4474 struct intel_ring_buffer *ring;
4477 for_each_ring(ring, dev_priv, i)
4478 intel_cleanup_ring_buffer(ring);
4482 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4483 struct drm_file *file_priv)
4485 struct drm_i915_private *dev_priv = dev->dev_private;
4488 if (drm_core_check_feature(dev, DRIVER_MODESET))
4491 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4492 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4493 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4496 mutex_lock(&dev->struct_mutex);
4497 dev_priv->ums.mm_suspended = 0;
4499 ret = i915_gem_init_hw(dev);
4501 mutex_unlock(&dev->struct_mutex);
4505 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4506 mutex_unlock(&dev->struct_mutex);
4508 ret = drm_irq_install(dev);
4510 goto cleanup_ringbuffer;
4515 mutex_lock(&dev->struct_mutex);
4516 i915_gem_cleanup_ringbuffer(dev);
4517 dev_priv->ums.mm_suspended = 1;
4518 mutex_unlock(&dev->struct_mutex);
4524 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4525 struct drm_file *file_priv)
4527 if (drm_core_check_feature(dev, DRIVER_MODESET))
4530 drm_irq_uninstall(dev);
4532 return i915_gem_suspend(dev);
4536 i915_gem_lastclose(struct drm_device *dev)
4540 if (drm_core_check_feature(dev, DRIVER_MODESET))
4543 ret = i915_gem_suspend(dev);
4545 DRM_ERROR("failed to idle hardware: %d\n", ret);
4549 init_ring_lists(struct intel_ring_buffer *ring)
4551 INIT_LIST_HEAD(&ring->active_list);
4552 INIT_LIST_HEAD(&ring->request_list);
4555 void i915_init_vm(struct drm_i915_private *dev_priv,
4556 struct i915_address_space *vm)
4558 if (!i915_is_ggtt(vm))
4559 drm_mm_init(&vm->mm, vm->start, vm->total);
4560 vm->dev = dev_priv->dev;
4561 INIT_LIST_HEAD(&vm->active_list);
4562 INIT_LIST_HEAD(&vm->inactive_list);
4563 INIT_LIST_HEAD(&vm->global_link);
4564 list_add_tail(&vm->global_link, &dev_priv->vm_list);
4568 i915_gem_load(struct drm_device *dev)
4570 drm_i915_private_t *dev_priv = dev->dev_private;
4574 kmem_cache_create("i915_gem_object",
4575 sizeof(struct drm_i915_gem_object), 0,
4579 INIT_LIST_HEAD(&dev_priv->vm_list);
4580 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4582 INIT_LIST_HEAD(&dev_priv->context_list);
4583 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4584 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4585 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4586 for (i = 0; i < I915_NUM_RINGS; i++)
4587 init_ring_lists(&dev_priv->ring[i]);
4588 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4589 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4590 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4591 i915_gem_retire_work_handler);
4592 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4593 i915_gem_idle_work_handler);
4594 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4596 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4598 I915_WRITE(MI_ARB_STATE,
4599 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4602 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4604 /* Old X drivers will take 0-2 for front, back, depth buffers */
4605 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4606 dev_priv->fence_reg_start = 3;
4608 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4609 dev_priv->num_fence_regs = 32;
4610 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4611 dev_priv->num_fence_regs = 16;
4613 dev_priv->num_fence_regs = 8;
4615 /* Initialize fence registers to zero */
4616 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4617 i915_gem_restore_fences(dev);
4619 i915_gem_detect_bit_6_swizzle(dev);
4620 init_waitqueue_head(&dev_priv->pending_flip_queue);
4622 dev_priv->mm.interruptible = true;
4624 dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4625 dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
4626 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4627 register_shrinker(&dev_priv->mm.inactive_shrinker);
4631 * Create a physically contiguous memory object for this object
4632 * e.g. for cursor + overlay regs
4634 static int i915_gem_init_phys_object(struct drm_device *dev,
4635 int id, int size, int align)
4637 drm_i915_private_t *dev_priv = dev->dev_private;
4638 struct drm_i915_gem_phys_object *phys_obj;
4641 if (dev_priv->mm.phys_objs[id - 1] || !size)
4644 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4650 phys_obj->handle = drm_pci_alloc(dev, size, align);
4651 if (!phys_obj->handle) {
4656 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4659 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4667 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4669 drm_i915_private_t *dev_priv = dev->dev_private;
4670 struct drm_i915_gem_phys_object *phys_obj;
4672 if (!dev_priv->mm.phys_objs[id - 1])
4675 phys_obj = dev_priv->mm.phys_objs[id - 1];
4676 if (phys_obj->cur_obj) {
4677 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4681 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4683 drm_pci_free(dev, phys_obj->handle);
4685 dev_priv->mm.phys_objs[id - 1] = NULL;
4688 void i915_gem_free_all_phys_object(struct drm_device *dev)
4692 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4693 i915_gem_free_phys_object(dev, i);
4696 void i915_gem_detach_phys_object(struct drm_device *dev,
4697 struct drm_i915_gem_object *obj)
4699 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4706 vaddr = obj->phys_obj->handle->vaddr;
4708 page_count = obj->base.size / PAGE_SIZE;
4709 for (i = 0; i < page_count; i++) {
4710 struct page *page = shmem_read_mapping_page(mapping, i);
4711 if (!IS_ERR(page)) {
4712 char *dst = kmap_atomic(page);
4713 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4716 drm_clflush_pages(&page, 1);
4718 set_page_dirty(page);
4719 mark_page_accessed(page);
4720 page_cache_release(page);
4723 i915_gem_chipset_flush(dev);
4725 obj->phys_obj->cur_obj = NULL;
4726 obj->phys_obj = NULL;
4730 i915_gem_attach_phys_object(struct drm_device *dev,
4731 struct drm_i915_gem_object *obj,
4735 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4736 drm_i915_private_t *dev_priv = dev->dev_private;
4741 if (id > I915_MAX_PHYS_OBJECT)
4744 if (obj->phys_obj) {
4745 if (obj->phys_obj->id == id)
4747 i915_gem_detach_phys_object(dev, obj);
4750 /* create a new object */
4751 if (!dev_priv->mm.phys_objs[id - 1]) {
4752 ret = i915_gem_init_phys_object(dev, id,
4753 obj->base.size, align);
4755 DRM_ERROR("failed to init phys object %d size: %zu\n",
4756 id, obj->base.size);
4761 /* bind to the object */
4762 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4763 obj->phys_obj->cur_obj = obj;
4765 page_count = obj->base.size / PAGE_SIZE;
4767 for (i = 0; i < page_count; i++) {
4771 page = shmem_read_mapping_page(mapping, i);
4773 return PTR_ERR(page);
4775 src = kmap_atomic(page);
4776 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4777 memcpy(dst, src, PAGE_SIZE);
4780 mark_page_accessed(page);
4781 page_cache_release(page);
4788 i915_gem_phys_pwrite(struct drm_device *dev,
4789 struct drm_i915_gem_object *obj,
4790 struct drm_i915_gem_pwrite *args,
4791 struct drm_file *file_priv)
4793 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4794 char __user *user_data = to_user_ptr(args->data_ptr);
4796 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4797 unsigned long unwritten;
4799 /* The physical object once assigned is fixed for the lifetime
4800 * of the obj, so we can safely drop the lock and continue
4803 mutex_unlock(&dev->struct_mutex);
4804 unwritten = copy_from_user(vaddr, user_data, args->size);
4805 mutex_lock(&dev->struct_mutex);
4810 i915_gem_chipset_flush(dev);
4814 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4816 struct drm_i915_file_private *file_priv = file->driver_priv;
4818 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4820 /* Clean up our request list when the client is going away, so that
4821 * later retire_requests won't dereference our soon-to-be-gone
4824 spin_lock(&file_priv->mm.lock);
4825 while (!list_empty(&file_priv->mm.request_list)) {
4826 struct drm_i915_gem_request *request;
4828 request = list_first_entry(&file_priv->mm.request_list,
4829 struct drm_i915_gem_request,
4831 list_del(&request->client_list);
4832 request->file_priv = NULL;
4834 spin_unlock(&file_priv->mm.lock);
4838 i915_gem_file_idle_work_handler(struct work_struct *work)
4840 struct drm_i915_file_private *file_priv =
4841 container_of(work, typeof(*file_priv), mm.idle_work.work);
4843 atomic_set(&file_priv->rps_wait_boost, false);
4846 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4848 struct drm_i915_file_private *file_priv;
4851 DRM_DEBUG_DRIVER("\n");
4853 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4857 file->driver_priv = file_priv;
4858 file_priv->dev_priv = dev->dev_private;
4860 spin_lock_init(&file_priv->mm.lock);
4861 INIT_LIST_HEAD(&file_priv->mm.request_list);
4862 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4863 i915_gem_file_idle_work_handler);
4865 ret = i915_gem_context_open(dev, file);
4872 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4874 if (!mutex_is_locked(mutex))
4877 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4878 return mutex->owner == task;
4880 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4885 static unsigned long
4886 i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
4888 struct drm_i915_private *dev_priv =
4889 container_of(shrinker,
4890 struct drm_i915_private,
4891 mm.inactive_shrinker);
4892 struct drm_device *dev = dev_priv->dev;
4893 struct drm_i915_gem_object *obj;
4895 unsigned long count;
4897 if (!mutex_trylock(&dev->struct_mutex)) {
4898 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4901 if (dev_priv->mm.shrinker_no_lock_stealing)
4908 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4909 if (obj->pages_pin_count == 0)
4910 count += obj->base.size >> PAGE_SHIFT;
4912 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4916 if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
4917 count += obj->base.size >> PAGE_SHIFT;
4921 mutex_unlock(&dev->struct_mutex);
4926 /* All the new VM stuff */
4927 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4928 struct i915_address_space *vm)
4930 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4931 struct i915_vma *vma;
4933 if (!dev_priv->mm.aliasing_ppgtt ||
4934 vm == &dev_priv->mm.aliasing_ppgtt->base)
4935 vm = &dev_priv->gtt.base;
4937 BUG_ON(list_empty(&o->vma_list));
4938 list_for_each_entry(vma, &o->vma_list, vma_link) {
4940 return vma->node.start;
4946 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4947 struct i915_address_space *vm)
4949 struct i915_vma *vma;
4951 list_for_each_entry(vma, &o->vma_list, vma_link)
4952 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4958 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4960 struct i915_vma *vma;
4962 list_for_each_entry(vma, &o->vma_list, vma_link)
4963 if (drm_mm_node_allocated(&vma->node))
4969 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4970 struct i915_address_space *vm)
4972 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4973 struct i915_vma *vma;
4975 if (!dev_priv->mm.aliasing_ppgtt ||
4976 vm == &dev_priv->mm.aliasing_ppgtt->base)
4977 vm = &dev_priv->gtt.base;
4979 BUG_ON(list_empty(&o->vma_list));
4981 list_for_each_entry(vma, &o->vma_list, vma_link)
4983 return vma->node.size;
4988 static unsigned long
4989 i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4991 struct drm_i915_private *dev_priv =
4992 container_of(shrinker,
4993 struct drm_i915_private,
4994 mm.inactive_shrinker);
4995 struct drm_device *dev = dev_priv->dev;
4996 unsigned long freed;
4999 if (!mutex_trylock(&dev->struct_mutex)) {
5000 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5003 if (dev_priv->mm.shrinker_no_lock_stealing)
5009 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5010 if (freed < sc->nr_to_scan)
5011 freed += __i915_gem_shrink(dev_priv,
5012 sc->nr_to_scan - freed,
5014 if (freed < sc->nr_to_scan)
5015 freed += i915_gem_shrink_all(dev_priv);
5018 mutex_unlock(&dev->struct_mutex);
5023 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5025 struct i915_vma *vma;
5027 if (WARN_ON(list_empty(&obj->vma_list)))
5030 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5031 if (vma->vm != obj_to_ggtt(obj))