drm/i915: Remove the now obsolete 'i915_gem_check_olr()'
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_vgpu.h"
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35 #include <linux/shmem_fs.h>
36 #include <linux/slab.h>
37 #include <linux/swap.h>
38 #include <linux/pci.h>
39 #include <linux/dma-buf.h>
40
41 #define RQ_BUG_ON(expr)
42
43 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
44 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
45 static void
46 i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
47 static void
48 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
49 static void i915_gem_write_fence(struct drm_device *dev, int reg,
50                                  struct drm_i915_gem_object *obj);
51 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
52                                          struct drm_i915_fence_reg *fence,
53                                          bool enable);
54
55 static bool cpu_cache_is_coherent(struct drm_device *dev,
56                                   enum i915_cache_level level)
57 {
58         return HAS_LLC(dev) || level != I915_CACHE_NONE;
59 }
60
61 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
62 {
63         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
64                 return true;
65
66         return obj->pin_display;
67 }
68
69 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
70 {
71         if (obj->tiling_mode)
72                 i915_gem_release_mmap(obj);
73
74         /* As we do not have an associated fence register, we will force
75          * a tiling change if we ever need to acquire one.
76          */
77         obj->fence_dirty = false;
78         obj->fence_reg = I915_FENCE_REG_NONE;
79 }
80
81 /* some bookkeeping */
82 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
83                                   size_t size)
84 {
85         spin_lock(&dev_priv->mm.object_stat_lock);
86         dev_priv->mm.object_count++;
87         dev_priv->mm.object_memory += size;
88         spin_unlock(&dev_priv->mm.object_stat_lock);
89 }
90
91 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
92                                      size_t size)
93 {
94         spin_lock(&dev_priv->mm.object_stat_lock);
95         dev_priv->mm.object_count--;
96         dev_priv->mm.object_memory -= size;
97         spin_unlock(&dev_priv->mm.object_stat_lock);
98 }
99
100 static int
101 i915_gem_wait_for_error(struct i915_gpu_error *error)
102 {
103         int ret;
104
105 #define EXIT_COND (!i915_reset_in_progress(error) || \
106                    i915_terminally_wedged(error))
107         if (EXIT_COND)
108                 return 0;
109
110         /*
111          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
112          * userspace. If it takes that long something really bad is going on and
113          * we should simply try to bail out and fail as gracefully as possible.
114          */
115         ret = wait_event_interruptible_timeout(error->reset_queue,
116                                                EXIT_COND,
117                                                10*HZ);
118         if (ret == 0) {
119                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
120                 return -EIO;
121         } else if (ret < 0) {
122                 return ret;
123         }
124 #undef EXIT_COND
125
126         return 0;
127 }
128
129 int i915_mutex_lock_interruptible(struct drm_device *dev)
130 {
131         struct drm_i915_private *dev_priv = dev->dev_private;
132         int ret;
133
134         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
135         if (ret)
136                 return ret;
137
138         ret = mutex_lock_interruptible(&dev->struct_mutex);
139         if (ret)
140                 return ret;
141
142         WARN_ON(i915_verify_lists(dev));
143         return 0;
144 }
145
146 int
147 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
148                             struct drm_file *file)
149 {
150         struct drm_i915_private *dev_priv = dev->dev_private;
151         struct drm_i915_gem_get_aperture *args = data;
152         struct drm_i915_gem_object *obj;
153         size_t pinned;
154
155         pinned = 0;
156         mutex_lock(&dev->struct_mutex);
157         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
158                 if (i915_gem_obj_is_pinned(obj))
159                         pinned += i915_gem_obj_ggtt_size(obj);
160         mutex_unlock(&dev->struct_mutex);
161
162         args->aper_size = dev_priv->gtt.base.total;
163         args->aper_available_size = args->aper_size - pinned;
164
165         return 0;
166 }
167
168 static int
169 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
170 {
171         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
172         char *vaddr = obj->phys_handle->vaddr;
173         struct sg_table *st;
174         struct scatterlist *sg;
175         int i;
176
177         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
178                 return -EINVAL;
179
180         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
181                 struct page *page;
182                 char *src;
183
184                 page = shmem_read_mapping_page(mapping, i);
185                 if (IS_ERR(page))
186                         return PTR_ERR(page);
187
188                 src = kmap_atomic(page);
189                 memcpy(vaddr, src, PAGE_SIZE);
190                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
191                 kunmap_atomic(src);
192
193                 page_cache_release(page);
194                 vaddr += PAGE_SIZE;
195         }
196
197         i915_gem_chipset_flush(obj->base.dev);
198
199         st = kmalloc(sizeof(*st), GFP_KERNEL);
200         if (st == NULL)
201                 return -ENOMEM;
202
203         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
204                 kfree(st);
205                 return -ENOMEM;
206         }
207
208         sg = st->sgl;
209         sg->offset = 0;
210         sg->length = obj->base.size;
211
212         sg_dma_address(sg) = obj->phys_handle->busaddr;
213         sg_dma_len(sg) = obj->base.size;
214
215         obj->pages = st;
216         obj->has_dma_mapping = true;
217         return 0;
218 }
219
220 static void
221 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
222 {
223         int ret;
224
225         BUG_ON(obj->madv == __I915_MADV_PURGED);
226
227         ret = i915_gem_object_set_to_cpu_domain(obj, true);
228         if (ret) {
229                 /* In the event of a disaster, abandon all caches and
230                  * hope for the best.
231                  */
232                 WARN_ON(ret != -EIO);
233                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
234         }
235
236         if (obj->madv == I915_MADV_DONTNEED)
237                 obj->dirty = 0;
238
239         if (obj->dirty) {
240                 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
241                 char *vaddr = obj->phys_handle->vaddr;
242                 int i;
243
244                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
245                         struct page *page;
246                         char *dst;
247
248                         page = shmem_read_mapping_page(mapping, i);
249                         if (IS_ERR(page))
250                                 continue;
251
252                         dst = kmap_atomic(page);
253                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
254                         memcpy(dst, vaddr, PAGE_SIZE);
255                         kunmap_atomic(dst);
256
257                         set_page_dirty(page);
258                         if (obj->madv == I915_MADV_WILLNEED)
259                                 mark_page_accessed(page);
260                         page_cache_release(page);
261                         vaddr += PAGE_SIZE;
262                 }
263                 obj->dirty = 0;
264         }
265
266         sg_free_table(obj->pages);
267         kfree(obj->pages);
268
269         obj->has_dma_mapping = false;
270 }
271
272 static void
273 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
274 {
275         drm_pci_free(obj->base.dev, obj->phys_handle);
276 }
277
278 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
279         .get_pages = i915_gem_object_get_pages_phys,
280         .put_pages = i915_gem_object_put_pages_phys,
281         .release = i915_gem_object_release_phys,
282 };
283
284 static int
285 drop_pages(struct drm_i915_gem_object *obj)
286 {
287         struct i915_vma *vma, *next;
288         int ret;
289
290         drm_gem_object_reference(&obj->base);
291         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
292                 if (i915_vma_unbind(vma))
293                         break;
294
295         ret = i915_gem_object_put_pages(obj);
296         drm_gem_object_unreference(&obj->base);
297
298         return ret;
299 }
300
301 int
302 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
303                             int align)
304 {
305         drm_dma_handle_t *phys;
306         int ret;
307
308         if (obj->phys_handle) {
309                 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
310                         return -EBUSY;
311
312                 return 0;
313         }
314
315         if (obj->madv != I915_MADV_WILLNEED)
316                 return -EFAULT;
317
318         if (obj->base.filp == NULL)
319                 return -EINVAL;
320
321         ret = drop_pages(obj);
322         if (ret)
323                 return ret;
324
325         /* create a new object */
326         phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
327         if (!phys)
328                 return -ENOMEM;
329
330         obj->phys_handle = phys;
331         obj->ops = &i915_gem_phys_ops;
332
333         return i915_gem_object_get_pages(obj);
334 }
335
336 static int
337 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
338                      struct drm_i915_gem_pwrite *args,
339                      struct drm_file *file_priv)
340 {
341         struct drm_device *dev = obj->base.dev;
342         void *vaddr = obj->phys_handle->vaddr + args->offset;
343         char __user *user_data = to_user_ptr(args->data_ptr);
344         int ret = 0;
345
346         /* We manually control the domain here and pretend that it
347          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
348          */
349         ret = i915_gem_object_wait_rendering(obj, false);
350         if (ret)
351                 return ret;
352
353         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
354         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
355                 unsigned long unwritten;
356
357                 /* The physical object once assigned is fixed for the lifetime
358                  * of the obj, so we can safely drop the lock and continue
359                  * to access vaddr.
360                  */
361                 mutex_unlock(&dev->struct_mutex);
362                 unwritten = copy_from_user(vaddr, user_data, args->size);
363                 mutex_lock(&dev->struct_mutex);
364                 if (unwritten) {
365                         ret = -EFAULT;
366                         goto out;
367                 }
368         }
369
370         drm_clflush_virt_range(vaddr, args->size);
371         i915_gem_chipset_flush(dev);
372
373 out:
374         intel_fb_obj_flush(obj, false);
375         return ret;
376 }
377
378 void *i915_gem_object_alloc(struct drm_device *dev)
379 {
380         struct drm_i915_private *dev_priv = dev->dev_private;
381         return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
382 }
383
384 void i915_gem_object_free(struct drm_i915_gem_object *obj)
385 {
386         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
387         kmem_cache_free(dev_priv->objects, obj);
388 }
389
390 static int
391 i915_gem_create(struct drm_file *file,
392                 struct drm_device *dev,
393                 uint64_t size,
394                 uint32_t *handle_p)
395 {
396         struct drm_i915_gem_object *obj;
397         int ret;
398         u32 handle;
399
400         size = roundup(size, PAGE_SIZE);
401         if (size == 0)
402                 return -EINVAL;
403
404         /* Allocate the new object */
405         obj = i915_gem_alloc_object(dev, size);
406         if (obj == NULL)
407                 return -ENOMEM;
408
409         ret = drm_gem_handle_create(file, &obj->base, &handle);
410         /* drop reference from allocate - handle holds it now */
411         drm_gem_object_unreference_unlocked(&obj->base);
412         if (ret)
413                 return ret;
414
415         *handle_p = handle;
416         return 0;
417 }
418
419 int
420 i915_gem_dumb_create(struct drm_file *file,
421                      struct drm_device *dev,
422                      struct drm_mode_create_dumb *args)
423 {
424         /* have to work out size/pitch and return them */
425         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
426         args->size = args->pitch * args->height;
427         return i915_gem_create(file, dev,
428                                args->size, &args->handle);
429 }
430
431 /**
432  * Creates a new mm object and returns a handle to it.
433  */
434 int
435 i915_gem_create_ioctl(struct drm_device *dev, void *data,
436                       struct drm_file *file)
437 {
438         struct drm_i915_gem_create *args = data;
439
440         return i915_gem_create(file, dev,
441                                args->size, &args->handle);
442 }
443
444 static inline int
445 __copy_to_user_swizzled(char __user *cpu_vaddr,
446                         const char *gpu_vaddr, int gpu_offset,
447                         int length)
448 {
449         int ret, cpu_offset = 0;
450
451         while (length > 0) {
452                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
453                 int this_length = min(cacheline_end - gpu_offset, length);
454                 int swizzled_gpu_offset = gpu_offset ^ 64;
455
456                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
457                                      gpu_vaddr + swizzled_gpu_offset,
458                                      this_length);
459                 if (ret)
460                         return ret + length;
461
462                 cpu_offset += this_length;
463                 gpu_offset += this_length;
464                 length -= this_length;
465         }
466
467         return 0;
468 }
469
470 static inline int
471 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
472                           const char __user *cpu_vaddr,
473                           int length)
474 {
475         int ret, cpu_offset = 0;
476
477         while (length > 0) {
478                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
479                 int this_length = min(cacheline_end - gpu_offset, length);
480                 int swizzled_gpu_offset = gpu_offset ^ 64;
481
482                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
483                                        cpu_vaddr + cpu_offset,
484                                        this_length);
485                 if (ret)
486                         return ret + length;
487
488                 cpu_offset += this_length;
489                 gpu_offset += this_length;
490                 length -= this_length;
491         }
492
493         return 0;
494 }
495
496 /*
497  * Pins the specified object's pages and synchronizes the object with
498  * GPU accesses. Sets needs_clflush to non-zero if the caller should
499  * flush the object from the CPU cache.
500  */
501 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
502                                     int *needs_clflush)
503 {
504         int ret;
505
506         *needs_clflush = 0;
507
508         if (!obj->base.filp)
509                 return -EINVAL;
510
511         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
512                 /* If we're not in the cpu read domain, set ourself into the gtt
513                  * read domain and manually flush cachelines (if required). This
514                  * optimizes for the case when the gpu will dirty the data
515                  * anyway again before the next pread happens. */
516                 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
517                                                         obj->cache_level);
518                 ret = i915_gem_object_wait_rendering(obj, true);
519                 if (ret)
520                         return ret;
521         }
522
523         ret = i915_gem_object_get_pages(obj);
524         if (ret)
525                 return ret;
526
527         i915_gem_object_pin_pages(obj);
528
529         return ret;
530 }
531
532 /* Per-page copy function for the shmem pread fastpath.
533  * Flushes invalid cachelines before reading the target if
534  * needs_clflush is set. */
535 static int
536 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
537                  char __user *user_data,
538                  bool page_do_bit17_swizzling, bool needs_clflush)
539 {
540         char *vaddr;
541         int ret;
542
543         if (unlikely(page_do_bit17_swizzling))
544                 return -EINVAL;
545
546         vaddr = kmap_atomic(page);
547         if (needs_clflush)
548                 drm_clflush_virt_range(vaddr + shmem_page_offset,
549                                        page_length);
550         ret = __copy_to_user_inatomic(user_data,
551                                       vaddr + shmem_page_offset,
552                                       page_length);
553         kunmap_atomic(vaddr);
554
555         return ret ? -EFAULT : 0;
556 }
557
558 static void
559 shmem_clflush_swizzled_range(char *addr, unsigned long length,
560                              bool swizzled)
561 {
562         if (unlikely(swizzled)) {
563                 unsigned long start = (unsigned long) addr;
564                 unsigned long end = (unsigned long) addr + length;
565
566                 /* For swizzling simply ensure that we always flush both
567                  * channels. Lame, but simple and it works. Swizzled
568                  * pwrite/pread is far from a hotpath - current userspace
569                  * doesn't use it at all. */
570                 start = round_down(start, 128);
571                 end = round_up(end, 128);
572
573                 drm_clflush_virt_range((void *)start, end - start);
574         } else {
575                 drm_clflush_virt_range(addr, length);
576         }
577
578 }
579
580 /* Only difference to the fast-path function is that this can handle bit17
581  * and uses non-atomic copy and kmap functions. */
582 static int
583 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
584                  char __user *user_data,
585                  bool page_do_bit17_swizzling, bool needs_clflush)
586 {
587         char *vaddr;
588         int ret;
589
590         vaddr = kmap(page);
591         if (needs_clflush)
592                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
593                                              page_length,
594                                              page_do_bit17_swizzling);
595
596         if (page_do_bit17_swizzling)
597                 ret = __copy_to_user_swizzled(user_data,
598                                               vaddr, shmem_page_offset,
599                                               page_length);
600         else
601                 ret = __copy_to_user(user_data,
602                                      vaddr + shmem_page_offset,
603                                      page_length);
604         kunmap(page);
605
606         return ret ? - EFAULT : 0;
607 }
608
609 static int
610 i915_gem_shmem_pread(struct drm_device *dev,
611                      struct drm_i915_gem_object *obj,
612                      struct drm_i915_gem_pread *args,
613                      struct drm_file *file)
614 {
615         char __user *user_data;
616         ssize_t remain;
617         loff_t offset;
618         int shmem_page_offset, page_length, ret = 0;
619         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
620         int prefaulted = 0;
621         int needs_clflush = 0;
622         struct sg_page_iter sg_iter;
623
624         user_data = to_user_ptr(args->data_ptr);
625         remain = args->size;
626
627         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
628
629         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
630         if (ret)
631                 return ret;
632
633         offset = args->offset;
634
635         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
636                          offset >> PAGE_SHIFT) {
637                 struct page *page = sg_page_iter_page(&sg_iter);
638
639                 if (remain <= 0)
640                         break;
641
642                 /* Operation in this page
643                  *
644                  * shmem_page_offset = offset within page in shmem file
645                  * page_length = bytes to copy for this page
646                  */
647                 shmem_page_offset = offset_in_page(offset);
648                 page_length = remain;
649                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
650                         page_length = PAGE_SIZE - shmem_page_offset;
651
652                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
653                         (page_to_phys(page) & (1 << 17)) != 0;
654
655                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
656                                        user_data, page_do_bit17_swizzling,
657                                        needs_clflush);
658                 if (ret == 0)
659                         goto next_page;
660
661                 mutex_unlock(&dev->struct_mutex);
662
663                 if (likely(!i915.prefault_disable) && !prefaulted) {
664                         ret = fault_in_multipages_writeable(user_data, remain);
665                         /* Userspace is tricking us, but we've already clobbered
666                          * its pages with the prefault and promised to write the
667                          * data up to the first fault. Hence ignore any errors
668                          * and just continue. */
669                         (void)ret;
670                         prefaulted = 1;
671                 }
672
673                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
674                                        user_data, page_do_bit17_swizzling,
675                                        needs_clflush);
676
677                 mutex_lock(&dev->struct_mutex);
678
679                 if (ret)
680                         goto out;
681
682 next_page:
683                 remain -= page_length;
684                 user_data += page_length;
685                 offset += page_length;
686         }
687
688 out:
689         i915_gem_object_unpin_pages(obj);
690
691         return ret;
692 }
693
694 /**
695  * Reads data from the object referenced by handle.
696  *
697  * On error, the contents of *data are undefined.
698  */
699 int
700 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
701                      struct drm_file *file)
702 {
703         struct drm_i915_gem_pread *args = data;
704         struct drm_i915_gem_object *obj;
705         int ret = 0;
706
707         if (args->size == 0)
708                 return 0;
709
710         if (!access_ok(VERIFY_WRITE,
711                        to_user_ptr(args->data_ptr),
712                        args->size))
713                 return -EFAULT;
714
715         ret = i915_mutex_lock_interruptible(dev);
716         if (ret)
717                 return ret;
718
719         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
720         if (&obj->base == NULL) {
721                 ret = -ENOENT;
722                 goto unlock;
723         }
724
725         /* Bounds check source.  */
726         if (args->offset > obj->base.size ||
727             args->size > obj->base.size - args->offset) {
728                 ret = -EINVAL;
729                 goto out;
730         }
731
732         /* prime objects have no backing filp to GEM pread/pwrite
733          * pages from.
734          */
735         if (!obj->base.filp) {
736                 ret = -EINVAL;
737                 goto out;
738         }
739
740         trace_i915_gem_object_pread(obj, args->offset, args->size);
741
742         ret = i915_gem_shmem_pread(dev, obj, args, file);
743
744 out:
745         drm_gem_object_unreference(&obj->base);
746 unlock:
747         mutex_unlock(&dev->struct_mutex);
748         return ret;
749 }
750
751 /* This is the fast write path which cannot handle
752  * page faults in the source data
753  */
754
755 static inline int
756 fast_user_write(struct io_mapping *mapping,
757                 loff_t page_base, int page_offset,
758                 char __user *user_data,
759                 int length)
760 {
761         void __iomem *vaddr_atomic;
762         void *vaddr;
763         unsigned long unwritten;
764
765         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
766         /* We can use the cpu mem copy function because this is X86. */
767         vaddr = (void __force*)vaddr_atomic + page_offset;
768         unwritten = __copy_from_user_inatomic_nocache(vaddr,
769                                                       user_data, length);
770         io_mapping_unmap_atomic(vaddr_atomic);
771         return unwritten;
772 }
773
774 /**
775  * This is the fast pwrite path, where we copy the data directly from the
776  * user into the GTT, uncached.
777  */
778 static int
779 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
780                          struct drm_i915_gem_object *obj,
781                          struct drm_i915_gem_pwrite *args,
782                          struct drm_file *file)
783 {
784         struct drm_i915_private *dev_priv = dev->dev_private;
785         ssize_t remain;
786         loff_t offset, page_base;
787         char __user *user_data;
788         int page_offset, page_length, ret;
789
790         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
791         if (ret)
792                 goto out;
793
794         ret = i915_gem_object_set_to_gtt_domain(obj, true);
795         if (ret)
796                 goto out_unpin;
797
798         ret = i915_gem_object_put_fence(obj);
799         if (ret)
800                 goto out_unpin;
801
802         user_data = to_user_ptr(args->data_ptr);
803         remain = args->size;
804
805         offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
806
807         intel_fb_obj_invalidate(obj, ORIGIN_GTT);
808
809         while (remain > 0) {
810                 /* Operation in this page
811                  *
812                  * page_base = page offset within aperture
813                  * page_offset = offset within page
814                  * page_length = bytes to copy for this page
815                  */
816                 page_base = offset & PAGE_MASK;
817                 page_offset = offset_in_page(offset);
818                 page_length = remain;
819                 if ((page_offset + remain) > PAGE_SIZE)
820                         page_length = PAGE_SIZE - page_offset;
821
822                 /* If we get a fault while copying data, then (presumably) our
823                  * source page isn't available.  Return the error and we'll
824                  * retry in the slow path.
825                  */
826                 if (fast_user_write(dev_priv->gtt.mappable, page_base,
827                                     page_offset, user_data, page_length)) {
828                         ret = -EFAULT;
829                         goto out_flush;
830                 }
831
832                 remain -= page_length;
833                 user_data += page_length;
834                 offset += page_length;
835         }
836
837 out_flush:
838         intel_fb_obj_flush(obj, false);
839 out_unpin:
840         i915_gem_object_ggtt_unpin(obj);
841 out:
842         return ret;
843 }
844
845 /* Per-page copy function for the shmem pwrite fastpath.
846  * Flushes invalid cachelines before writing to the target if
847  * needs_clflush_before is set and flushes out any written cachelines after
848  * writing if needs_clflush is set. */
849 static int
850 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
851                   char __user *user_data,
852                   bool page_do_bit17_swizzling,
853                   bool needs_clflush_before,
854                   bool needs_clflush_after)
855 {
856         char *vaddr;
857         int ret;
858
859         if (unlikely(page_do_bit17_swizzling))
860                 return -EINVAL;
861
862         vaddr = kmap_atomic(page);
863         if (needs_clflush_before)
864                 drm_clflush_virt_range(vaddr + shmem_page_offset,
865                                        page_length);
866         ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
867                                         user_data, page_length);
868         if (needs_clflush_after)
869                 drm_clflush_virt_range(vaddr + shmem_page_offset,
870                                        page_length);
871         kunmap_atomic(vaddr);
872
873         return ret ? -EFAULT : 0;
874 }
875
876 /* Only difference to the fast-path function is that this can handle bit17
877  * and uses non-atomic copy and kmap functions. */
878 static int
879 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
880                   char __user *user_data,
881                   bool page_do_bit17_swizzling,
882                   bool needs_clflush_before,
883                   bool needs_clflush_after)
884 {
885         char *vaddr;
886         int ret;
887
888         vaddr = kmap(page);
889         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
890                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
891                                              page_length,
892                                              page_do_bit17_swizzling);
893         if (page_do_bit17_swizzling)
894                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
895                                                 user_data,
896                                                 page_length);
897         else
898                 ret = __copy_from_user(vaddr + shmem_page_offset,
899                                        user_data,
900                                        page_length);
901         if (needs_clflush_after)
902                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
903                                              page_length,
904                                              page_do_bit17_swizzling);
905         kunmap(page);
906
907         return ret ? -EFAULT : 0;
908 }
909
910 static int
911 i915_gem_shmem_pwrite(struct drm_device *dev,
912                       struct drm_i915_gem_object *obj,
913                       struct drm_i915_gem_pwrite *args,
914                       struct drm_file *file)
915 {
916         ssize_t remain;
917         loff_t offset;
918         char __user *user_data;
919         int shmem_page_offset, page_length, ret = 0;
920         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
921         int hit_slowpath = 0;
922         int needs_clflush_after = 0;
923         int needs_clflush_before = 0;
924         struct sg_page_iter sg_iter;
925
926         user_data = to_user_ptr(args->data_ptr);
927         remain = args->size;
928
929         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
930
931         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
932                 /* If we're not in the cpu write domain, set ourself into the gtt
933                  * write domain and manually flush cachelines (if required). This
934                  * optimizes for the case when the gpu will use the data
935                  * right away and we therefore have to clflush anyway. */
936                 needs_clflush_after = cpu_write_needs_clflush(obj);
937                 ret = i915_gem_object_wait_rendering(obj, false);
938                 if (ret)
939                         return ret;
940         }
941         /* Same trick applies to invalidate partially written cachelines read
942          * before writing. */
943         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
944                 needs_clflush_before =
945                         !cpu_cache_is_coherent(dev, obj->cache_level);
946
947         ret = i915_gem_object_get_pages(obj);
948         if (ret)
949                 return ret;
950
951         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
952
953         i915_gem_object_pin_pages(obj);
954
955         offset = args->offset;
956         obj->dirty = 1;
957
958         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
959                          offset >> PAGE_SHIFT) {
960                 struct page *page = sg_page_iter_page(&sg_iter);
961                 int partial_cacheline_write;
962
963                 if (remain <= 0)
964                         break;
965
966                 /* Operation in this page
967                  *
968                  * shmem_page_offset = offset within page in shmem file
969                  * page_length = bytes to copy for this page
970                  */
971                 shmem_page_offset = offset_in_page(offset);
972
973                 page_length = remain;
974                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
975                         page_length = PAGE_SIZE - shmem_page_offset;
976
977                 /* If we don't overwrite a cacheline completely we need to be
978                  * careful to have up-to-date data by first clflushing. Don't
979                  * overcomplicate things and flush the entire patch. */
980                 partial_cacheline_write = needs_clflush_before &&
981                         ((shmem_page_offset | page_length)
982                                 & (boot_cpu_data.x86_clflush_size - 1));
983
984                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
985                         (page_to_phys(page) & (1 << 17)) != 0;
986
987                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
988                                         user_data, page_do_bit17_swizzling,
989                                         partial_cacheline_write,
990                                         needs_clflush_after);
991                 if (ret == 0)
992                         goto next_page;
993
994                 hit_slowpath = 1;
995                 mutex_unlock(&dev->struct_mutex);
996                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
997                                         user_data, page_do_bit17_swizzling,
998                                         partial_cacheline_write,
999                                         needs_clflush_after);
1000
1001                 mutex_lock(&dev->struct_mutex);
1002
1003                 if (ret)
1004                         goto out;
1005
1006 next_page:
1007                 remain -= page_length;
1008                 user_data += page_length;
1009                 offset += page_length;
1010         }
1011
1012 out:
1013         i915_gem_object_unpin_pages(obj);
1014
1015         if (hit_slowpath) {
1016                 /*
1017                  * Fixup: Flush cpu caches in case we didn't flush the dirty
1018                  * cachelines in-line while writing and the object moved
1019                  * out of the cpu write domain while we've dropped the lock.
1020                  */
1021                 if (!needs_clflush_after &&
1022                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1023                         if (i915_gem_clflush_object(obj, obj->pin_display))
1024                                 i915_gem_chipset_flush(dev);
1025                 }
1026         }
1027
1028         if (needs_clflush_after)
1029                 i915_gem_chipset_flush(dev);
1030
1031         intel_fb_obj_flush(obj, false);
1032         return ret;
1033 }
1034
1035 /**
1036  * Writes data to the object referenced by handle.
1037  *
1038  * On error, the contents of the buffer that were to be modified are undefined.
1039  */
1040 int
1041 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1042                       struct drm_file *file)
1043 {
1044         struct drm_i915_private *dev_priv = dev->dev_private;
1045         struct drm_i915_gem_pwrite *args = data;
1046         struct drm_i915_gem_object *obj;
1047         int ret;
1048
1049         if (args->size == 0)
1050                 return 0;
1051
1052         if (!access_ok(VERIFY_READ,
1053                        to_user_ptr(args->data_ptr),
1054                        args->size))
1055                 return -EFAULT;
1056
1057         if (likely(!i915.prefault_disable)) {
1058                 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
1059                                                    args->size);
1060                 if (ret)
1061                         return -EFAULT;
1062         }
1063
1064         intel_runtime_pm_get(dev_priv);
1065
1066         ret = i915_mutex_lock_interruptible(dev);
1067         if (ret)
1068                 goto put_rpm;
1069
1070         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1071         if (&obj->base == NULL) {
1072                 ret = -ENOENT;
1073                 goto unlock;
1074         }
1075
1076         /* Bounds check destination. */
1077         if (args->offset > obj->base.size ||
1078             args->size > obj->base.size - args->offset) {
1079                 ret = -EINVAL;
1080                 goto out;
1081         }
1082
1083         /* prime objects have no backing filp to GEM pread/pwrite
1084          * pages from.
1085          */
1086         if (!obj->base.filp) {
1087                 ret = -EINVAL;
1088                 goto out;
1089         }
1090
1091         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1092
1093         ret = -EFAULT;
1094         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1095          * it would end up going through the fenced access, and we'll get
1096          * different detiling behavior between reading and writing.
1097          * pread/pwrite currently are reading and writing from the CPU
1098          * perspective, requiring manual detiling by the client.
1099          */
1100         if (obj->tiling_mode == I915_TILING_NONE &&
1101             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1102             cpu_write_needs_clflush(obj)) {
1103                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1104                 /* Note that the gtt paths might fail with non-page-backed user
1105                  * pointers (e.g. gtt mappings when moving data between
1106                  * textures). Fallback to the shmem path in that case. */
1107         }
1108
1109         if (ret == -EFAULT || ret == -ENOSPC) {
1110                 if (obj->phys_handle)
1111                         ret = i915_gem_phys_pwrite(obj, args, file);
1112                 else
1113                         ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1114         }
1115
1116 out:
1117         drm_gem_object_unreference(&obj->base);
1118 unlock:
1119         mutex_unlock(&dev->struct_mutex);
1120 put_rpm:
1121         intel_runtime_pm_put(dev_priv);
1122
1123         return ret;
1124 }
1125
1126 int
1127 i915_gem_check_wedge(struct i915_gpu_error *error,
1128                      bool interruptible)
1129 {
1130         if (i915_reset_in_progress(error)) {
1131                 /* Non-interruptible callers can't handle -EAGAIN, hence return
1132                  * -EIO unconditionally for these. */
1133                 if (!interruptible)
1134                         return -EIO;
1135
1136                 /* Recovery complete, but the reset failed ... */
1137                 if (i915_terminally_wedged(error))
1138                         return -EIO;
1139
1140                 /*
1141                  * Check if GPU Reset is in progress - we need intel_ring_begin
1142                  * to work properly to reinit the hw state while the gpu is
1143                  * still marked as reset-in-progress. Handle this with a flag.
1144                  */
1145                 if (!error->reload_in_reset)
1146                         return -EAGAIN;
1147         }
1148
1149         return 0;
1150 }
1151
1152 static void fake_irq(unsigned long data)
1153 {
1154         wake_up_process((struct task_struct *)data);
1155 }
1156
1157 static bool missed_irq(struct drm_i915_private *dev_priv,
1158                        struct intel_engine_cs *ring)
1159 {
1160         return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1161 }
1162
1163 static int __i915_spin_request(struct drm_i915_gem_request *req)
1164 {
1165         unsigned long timeout;
1166
1167         if (i915_gem_request_get_ring(req)->irq_refcount)
1168                 return -EBUSY;
1169
1170         timeout = jiffies + 1;
1171         while (!need_resched()) {
1172                 if (i915_gem_request_completed(req, true))
1173                         return 0;
1174
1175                 if (time_after_eq(jiffies, timeout))
1176                         break;
1177
1178                 cpu_relax_lowlatency();
1179         }
1180         if (i915_gem_request_completed(req, false))
1181                 return 0;
1182
1183         return -EAGAIN;
1184 }
1185
1186 /**
1187  * __i915_wait_request - wait until execution of request has finished
1188  * @req: duh!
1189  * @reset_counter: reset sequence associated with the given request
1190  * @interruptible: do an interruptible wait (normally yes)
1191  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1192  *
1193  * Note: It is of utmost importance that the passed in seqno and reset_counter
1194  * values have been read by the caller in an smp safe manner. Where read-side
1195  * locks are involved, it is sufficient to read the reset_counter before
1196  * unlocking the lock that protects the seqno. For lockless tricks, the
1197  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1198  * inserted.
1199  *
1200  * Returns 0 if the request was found within the alloted time. Else returns the
1201  * errno with remaining time filled in timeout argument.
1202  */
1203 int __i915_wait_request(struct drm_i915_gem_request *req,
1204                         unsigned reset_counter,
1205                         bool interruptible,
1206                         s64 *timeout,
1207                         struct intel_rps_client *rps)
1208 {
1209         struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
1210         struct drm_device *dev = ring->dev;
1211         struct drm_i915_private *dev_priv = dev->dev_private;
1212         const bool irq_test_in_progress =
1213                 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1214         DEFINE_WAIT(wait);
1215         unsigned long timeout_expire;
1216         s64 before, now;
1217         int ret;
1218
1219         WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1220
1221         if (list_empty(&req->list))
1222                 return 0;
1223
1224         if (i915_gem_request_completed(req, true))
1225                 return 0;
1226
1227         timeout_expire = timeout ?
1228                 jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
1229
1230         if (INTEL_INFO(dev_priv)->gen >= 6)
1231                 gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
1232
1233         /* Record current time in case interrupted by signal, or wedged */
1234         trace_i915_gem_request_wait_begin(req);
1235         before = ktime_get_raw_ns();
1236
1237         /* Optimistic spin for the next jiffie before touching IRQs */
1238         ret = __i915_spin_request(req);
1239         if (ret == 0)
1240                 goto out;
1241
1242         if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
1243                 ret = -ENODEV;
1244                 goto out;
1245         }
1246
1247         for (;;) {
1248                 struct timer_list timer;
1249
1250                 prepare_to_wait(&ring->irq_queue, &wait,
1251                                 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1252
1253                 /* We need to check whether any gpu reset happened in between
1254                  * the caller grabbing the seqno and now ... */
1255                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1256                         /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1257                          * is truely gone. */
1258                         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1259                         if (ret == 0)
1260                                 ret = -EAGAIN;
1261                         break;
1262                 }
1263
1264                 if (i915_gem_request_completed(req, false)) {
1265                         ret = 0;
1266                         break;
1267                 }
1268
1269                 if (interruptible && signal_pending(current)) {
1270                         ret = -ERESTARTSYS;
1271                         break;
1272                 }
1273
1274                 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1275                         ret = -ETIME;
1276                         break;
1277                 }
1278
1279                 timer.function = NULL;
1280                 if (timeout || missed_irq(dev_priv, ring)) {
1281                         unsigned long expire;
1282
1283                         setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1284                         expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1285                         mod_timer(&timer, expire);
1286                 }
1287
1288                 io_schedule();
1289
1290                 if (timer.function) {
1291                         del_singleshot_timer_sync(&timer);
1292                         destroy_timer_on_stack(&timer);
1293                 }
1294         }
1295         if (!irq_test_in_progress)
1296                 ring->irq_put(ring);
1297
1298         finish_wait(&ring->irq_queue, &wait);
1299
1300 out:
1301         now = ktime_get_raw_ns();
1302         trace_i915_gem_request_wait_end(req);
1303
1304         if (timeout) {
1305                 s64 tres = *timeout - (now - before);
1306
1307                 *timeout = tres < 0 ? 0 : tres;
1308
1309                 /*
1310                  * Apparently ktime isn't accurate enough and occasionally has a
1311                  * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
1312                  * things up to make the test happy. We allow up to 1 jiffy.
1313                  *
1314                  * This is a regrssion from the timespec->ktime conversion.
1315                  */
1316                 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
1317                         *timeout = 0;
1318         }
1319
1320         return ret;
1321 }
1322
1323 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
1324                                    struct drm_file *file)
1325 {
1326         struct drm_i915_private *dev_private;
1327         struct drm_i915_file_private *file_priv;
1328
1329         WARN_ON(!req || !file || req->file_priv);
1330
1331         if (!req || !file)
1332                 return -EINVAL;
1333
1334         if (req->file_priv)
1335                 return -EINVAL;
1336
1337         dev_private = req->ring->dev->dev_private;
1338         file_priv = file->driver_priv;
1339
1340         spin_lock(&file_priv->mm.lock);
1341         req->file_priv = file_priv;
1342         list_add_tail(&req->client_list, &file_priv->mm.request_list);
1343         spin_unlock(&file_priv->mm.lock);
1344
1345         req->pid = get_pid(task_pid(current));
1346
1347         return 0;
1348 }
1349
1350 static inline void
1351 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1352 {
1353         struct drm_i915_file_private *file_priv = request->file_priv;
1354
1355         if (!file_priv)
1356                 return;
1357
1358         spin_lock(&file_priv->mm.lock);
1359         list_del(&request->client_list);
1360         request->file_priv = NULL;
1361         spin_unlock(&file_priv->mm.lock);
1362
1363         put_pid(request->pid);
1364         request->pid = NULL;
1365 }
1366
1367 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
1368 {
1369         trace_i915_gem_request_retire(request);
1370
1371         /* We know the GPU must have read the request to have
1372          * sent us the seqno + interrupt, so use the position
1373          * of tail of the request to update the last known position
1374          * of the GPU head.
1375          *
1376          * Note this requires that we are always called in request
1377          * completion order.
1378          */
1379         request->ringbuf->last_retired_head = request->postfix;
1380
1381         list_del_init(&request->list);
1382         i915_gem_request_remove_from_client(request);
1383
1384         i915_gem_request_unreference(request);
1385 }
1386
1387 static void
1388 __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1389 {
1390         struct intel_engine_cs *engine = req->ring;
1391         struct drm_i915_gem_request *tmp;
1392
1393         lockdep_assert_held(&engine->dev->struct_mutex);
1394
1395         if (list_empty(&req->list))
1396                 return;
1397
1398         do {
1399                 tmp = list_first_entry(&engine->request_list,
1400                                        typeof(*tmp), list);
1401
1402                 i915_gem_request_retire(tmp);
1403         } while (tmp != req);
1404
1405         WARN_ON(i915_verify_lists(engine->dev));
1406 }
1407
1408 /**
1409  * Waits for a request to be signaled, and cleans up the
1410  * request and object lists appropriately for that event.
1411  */
1412 int
1413 i915_wait_request(struct drm_i915_gem_request *req)
1414 {
1415         struct drm_device *dev;
1416         struct drm_i915_private *dev_priv;
1417         bool interruptible;
1418         int ret;
1419
1420         BUG_ON(req == NULL);
1421
1422         dev = req->ring->dev;
1423         dev_priv = dev->dev_private;
1424         interruptible = dev_priv->mm.interruptible;
1425
1426         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1427
1428         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1429         if (ret)
1430                 return ret;
1431
1432         ret = __i915_wait_request(req,
1433                                   atomic_read(&dev_priv->gpu_error.reset_counter),
1434                                   interruptible, NULL, NULL);
1435         if (ret)
1436                 return ret;
1437
1438         __i915_gem_request_retire__upto(req);
1439         return 0;
1440 }
1441
1442 /**
1443  * Ensures that all rendering to the object has completed and the object is
1444  * safe to unbind from the GTT or access from the CPU.
1445  */
1446 int
1447 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1448                                bool readonly)
1449 {
1450         int ret, i;
1451
1452         if (!obj->active)
1453                 return 0;
1454
1455         if (readonly) {
1456                 if (obj->last_write_req != NULL) {
1457                         ret = i915_wait_request(obj->last_write_req);
1458                         if (ret)
1459                                 return ret;
1460
1461                         i = obj->last_write_req->ring->id;
1462                         if (obj->last_read_req[i] == obj->last_write_req)
1463                                 i915_gem_object_retire__read(obj, i);
1464                         else
1465                                 i915_gem_object_retire__write(obj);
1466                 }
1467         } else {
1468                 for (i = 0; i < I915_NUM_RINGS; i++) {
1469                         if (obj->last_read_req[i] == NULL)
1470                                 continue;
1471
1472                         ret = i915_wait_request(obj->last_read_req[i]);
1473                         if (ret)
1474                                 return ret;
1475
1476                         i915_gem_object_retire__read(obj, i);
1477                 }
1478                 RQ_BUG_ON(obj->active);
1479         }
1480
1481         return 0;
1482 }
1483
1484 static void
1485 i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
1486                                struct drm_i915_gem_request *req)
1487 {
1488         int ring = req->ring->id;
1489
1490         if (obj->last_read_req[ring] == req)
1491                 i915_gem_object_retire__read(obj, ring);
1492         else if (obj->last_write_req == req)
1493                 i915_gem_object_retire__write(obj);
1494
1495         __i915_gem_request_retire__upto(req);
1496 }
1497
1498 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1499  * as the object state may change during this call.
1500  */
1501 static __must_check int
1502 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1503                                             struct intel_rps_client *rps,
1504                                             bool readonly)
1505 {
1506         struct drm_device *dev = obj->base.dev;
1507         struct drm_i915_private *dev_priv = dev->dev_private;
1508         struct drm_i915_gem_request *requests[I915_NUM_RINGS];
1509         unsigned reset_counter;
1510         int ret, i, n = 0;
1511
1512         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1513         BUG_ON(!dev_priv->mm.interruptible);
1514
1515         if (!obj->active)
1516                 return 0;
1517
1518         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1519         if (ret)
1520                 return ret;
1521
1522         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1523
1524         if (readonly) {
1525                 struct drm_i915_gem_request *req;
1526
1527                 req = obj->last_write_req;
1528                 if (req == NULL)
1529                         return 0;
1530
1531                 requests[n++] = i915_gem_request_reference(req);
1532         } else {
1533                 for (i = 0; i < I915_NUM_RINGS; i++) {
1534                         struct drm_i915_gem_request *req;
1535
1536                         req = obj->last_read_req[i];
1537                         if (req == NULL)
1538                                 continue;
1539
1540                         requests[n++] = i915_gem_request_reference(req);
1541                 }
1542         }
1543
1544         mutex_unlock(&dev->struct_mutex);
1545         for (i = 0; ret == 0 && i < n; i++)
1546                 ret = __i915_wait_request(requests[i], reset_counter, true,
1547                                           NULL, rps);
1548         mutex_lock(&dev->struct_mutex);
1549
1550         for (i = 0; i < n; i++) {
1551                 if (ret == 0)
1552                         i915_gem_object_retire_request(obj, requests[i]);
1553                 i915_gem_request_unreference(requests[i]);
1554         }
1555
1556         return ret;
1557 }
1558
1559 static struct intel_rps_client *to_rps_client(struct drm_file *file)
1560 {
1561         struct drm_i915_file_private *fpriv = file->driver_priv;
1562         return &fpriv->rps;
1563 }
1564
1565 /**
1566  * Called when user space prepares to use an object with the CPU, either
1567  * through the mmap ioctl's mapping or a GTT mapping.
1568  */
1569 int
1570 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1571                           struct drm_file *file)
1572 {
1573         struct drm_i915_gem_set_domain *args = data;
1574         struct drm_i915_gem_object *obj;
1575         uint32_t read_domains = args->read_domains;
1576         uint32_t write_domain = args->write_domain;
1577         int ret;
1578
1579         /* Only handle setting domains to types used by the CPU. */
1580         if (write_domain & I915_GEM_GPU_DOMAINS)
1581                 return -EINVAL;
1582
1583         if (read_domains & I915_GEM_GPU_DOMAINS)
1584                 return -EINVAL;
1585
1586         /* Having something in the write domain implies it's in the read
1587          * domain, and only that read domain.  Enforce that in the request.
1588          */
1589         if (write_domain != 0 && read_domains != write_domain)
1590                 return -EINVAL;
1591
1592         ret = i915_mutex_lock_interruptible(dev);
1593         if (ret)
1594                 return ret;
1595
1596         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1597         if (&obj->base == NULL) {
1598                 ret = -ENOENT;
1599                 goto unlock;
1600         }
1601
1602         /* Try to flush the object off the GPU without holding the lock.
1603          * We will repeat the flush holding the lock in the normal manner
1604          * to catch cases where we are gazumped.
1605          */
1606         ret = i915_gem_object_wait_rendering__nonblocking(obj,
1607                                                           to_rps_client(file),
1608                                                           !write_domain);
1609         if (ret)
1610                 goto unref;
1611
1612         if (read_domains & I915_GEM_DOMAIN_GTT)
1613                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1614         else
1615                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1616
1617 unref:
1618         drm_gem_object_unreference(&obj->base);
1619 unlock:
1620         mutex_unlock(&dev->struct_mutex);
1621         return ret;
1622 }
1623
1624 /**
1625  * Called when user space has done writes to this buffer
1626  */
1627 int
1628 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1629                          struct drm_file *file)
1630 {
1631         struct drm_i915_gem_sw_finish *args = data;
1632         struct drm_i915_gem_object *obj;
1633         int ret = 0;
1634
1635         ret = i915_mutex_lock_interruptible(dev);
1636         if (ret)
1637                 return ret;
1638
1639         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1640         if (&obj->base == NULL) {
1641                 ret = -ENOENT;
1642                 goto unlock;
1643         }
1644
1645         /* Pinned buffers may be scanout, so flush the cache */
1646         if (obj->pin_display)
1647                 i915_gem_object_flush_cpu_write_domain(obj);
1648
1649         drm_gem_object_unreference(&obj->base);
1650 unlock:
1651         mutex_unlock(&dev->struct_mutex);
1652         return ret;
1653 }
1654
1655 /**
1656  * Maps the contents of an object, returning the address it is mapped
1657  * into.
1658  *
1659  * While the mapping holds a reference on the contents of the object, it doesn't
1660  * imply a ref on the object itself.
1661  *
1662  * IMPORTANT:
1663  *
1664  * DRM driver writers who look a this function as an example for how to do GEM
1665  * mmap support, please don't implement mmap support like here. The modern way
1666  * to implement DRM mmap support is with an mmap offset ioctl (like
1667  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1668  * That way debug tooling like valgrind will understand what's going on, hiding
1669  * the mmap call in a driver private ioctl will break that. The i915 driver only
1670  * does cpu mmaps this way because we didn't know better.
1671  */
1672 int
1673 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1674                     struct drm_file *file)
1675 {
1676         struct drm_i915_gem_mmap *args = data;
1677         struct drm_gem_object *obj;
1678         unsigned long addr;
1679
1680         if (args->flags & ~(I915_MMAP_WC))
1681                 return -EINVAL;
1682
1683         if (args->flags & I915_MMAP_WC && !cpu_has_pat)
1684                 return -ENODEV;
1685
1686         obj = drm_gem_object_lookup(dev, file, args->handle);
1687         if (obj == NULL)
1688                 return -ENOENT;
1689
1690         /* prime objects have no backing filp to GEM mmap
1691          * pages from.
1692          */
1693         if (!obj->filp) {
1694                 drm_gem_object_unreference_unlocked(obj);
1695                 return -EINVAL;
1696         }
1697
1698         addr = vm_mmap(obj->filp, 0, args->size,
1699                        PROT_READ | PROT_WRITE, MAP_SHARED,
1700                        args->offset);
1701         if (args->flags & I915_MMAP_WC) {
1702                 struct mm_struct *mm = current->mm;
1703                 struct vm_area_struct *vma;
1704
1705                 down_write(&mm->mmap_sem);
1706                 vma = find_vma(mm, addr);
1707                 if (vma)
1708                         vma->vm_page_prot =
1709                                 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1710                 else
1711                         addr = -ENOMEM;
1712                 up_write(&mm->mmap_sem);
1713         }
1714         drm_gem_object_unreference_unlocked(obj);
1715         if (IS_ERR((void *)addr))
1716                 return addr;
1717
1718         args->addr_ptr = (uint64_t) addr;
1719
1720         return 0;
1721 }
1722
1723 /**
1724  * i915_gem_fault - fault a page into the GTT
1725  * vma: VMA in question
1726  * vmf: fault info
1727  *
1728  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1729  * from userspace.  The fault handler takes care of binding the object to
1730  * the GTT (if needed), allocating and programming a fence register (again,
1731  * only if needed based on whether the old reg is still valid or the object
1732  * is tiled) and inserting a new PTE into the faulting process.
1733  *
1734  * Note that the faulting process may involve evicting existing objects
1735  * from the GTT and/or fence registers to make room.  So performance may
1736  * suffer if the GTT working set is large or there are few fence registers
1737  * left.
1738  */
1739 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1740 {
1741         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1742         struct drm_device *dev = obj->base.dev;
1743         struct drm_i915_private *dev_priv = dev->dev_private;
1744         struct i915_ggtt_view view = i915_ggtt_view_normal;
1745         pgoff_t page_offset;
1746         unsigned long pfn;
1747         int ret = 0;
1748         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1749
1750         intel_runtime_pm_get(dev_priv);
1751
1752         /* We don't use vmf->pgoff since that has the fake offset */
1753         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1754                 PAGE_SHIFT;
1755
1756         ret = i915_mutex_lock_interruptible(dev);
1757         if (ret)
1758                 goto out;
1759
1760         trace_i915_gem_object_fault(obj, page_offset, true, write);
1761
1762         /* Try to flush the object off the GPU first without holding the lock.
1763          * Upon reacquiring the lock, we will perform our sanity checks and then
1764          * repeat the flush holding the lock in the normal manner to catch cases
1765          * where we are gazumped.
1766          */
1767         ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1768         if (ret)
1769                 goto unlock;
1770
1771         /* Access to snoopable pages through the GTT is incoherent. */
1772         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1773                 ret = -EFAULT;
1774                 goto unlock;
1775         }
1776
1777         /* Use a partial view if the object is bigger than the aperture. */
1778         if (obj->base.size >= dev_priv->gtt.mappable_end &&
1779             obj->tiling_mode == I915_TILING_NONE) {
1780                 static const unsigned int chunk_size = 256; // 1 MiB
1781
1782                 memset(&view, 0, sizeof(view));
1783                 view.type = I915_GGTT_VIEW_PARTIAL;
1784                 view.params.partial.offset = rounddown(page_offset, chunk_size);
1785                 view.params.partial.size =
1786                         min_t(unsigned int,
1787                               chunk_size,
1788                               (vma->vm_end - vma->vm_start)/PAGE_SIZE -
1789                               view.params.partial.offset);
1790         }
1791
1792         /* Now pin it into the GTT if needed */
1793         ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
1794         if (ret)
1795                 goto unlock;
1796
1797         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1798         if (ret)
1799                 goto unpin;
1800
1801         ret = i915_gem_object_get_fence(obj);
1802         if (ret)
1803                 goto unpin;
1804
1805         /* Finally, remap it using the new GTT offset */
1806         pfn = dev_priv->gtt.mappable_base +
1807                 i915_gem_obj_ggtt_offset_view(obj, &view);
1808         pfn >>= PAGE_SHIFT;
1809
1810         if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
1811                 /* Overriding existing pages in partial view does not cause
1812                  * us any trouble as TLBs are still valid because the fault
1813                  * is due to userspace losing part of the mapping or never
1814                  * having accessed it before (at this partials' range).
1815                  */
1816                 unsigned long base = vma->vm_start +
1817                                      (view.params.partial.offset << PAGE_SHIFT);
1818                 unsigned int i;
1819
1820                 for (i = 0; i < view.params.partial.size; i++) {
1821                         ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
1822                         if (ret)
1823                                 break;
1824                 }
1825
1826                 obj->fault_mappable = true;
1827         } else {
1828                 if (!obj->fault_mappable) {
1829                         unsigned long size = min_t(unsigned long,
1830                                                    vma->vm_end - vma->vm_start,
1831                                                    obj->base.size);
1832                         int i;
1833
1834                         for (i = 0; i < size >> PAGE_SHIFT; i++) {
1835                                 ret = vm_insert_pfn(vma,
1836                                                     (unsigned long)vma->vm_start + i * PAGE_SIZE,
1837                                                     pfn + i);
1838                                 if (ret)
1839                                         break;
1840                         }
1841
1842                         obj->fault_mappable = true;
1843                 } else
1844                         ret = vm_insert_pfn(vma,
1845                                             (unsigned long)vmf->virtual_address,
1846                                             pfn + page_offset);
1847         }
1848 unpin:
1849         i915_gem_object_ggtt_unpin_view(obj, &view);
1850 unlock:
1851         mutex_unlock(&dev->struct_mutex);
1852 out:
1853         switch (ret) {
1854         case -EIO:
1855                 /*
1856                  * We eat errors when the gpu is terminally wedged to avoid
1857                  * userspace unduly crashing (gl has no provisions for mmaps to
1858                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
1859                  * and so needs to be reported.
1860                  */
1861                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1862                         ret = VM_FAULT_SIGBUS;
1863                         break;
1864                 }
1865         case -EAGAIN:
1866                 /*
1867                  * EAGAIN means the gpu is hung and we'll wait for the error
1868                  * handler to reset everything when re-faulting in
1869                  * i915_mutex_lock_interruptible.
1870                  */
1871         case 0:
1872         case -ERESTARTSYS:
1873         case -EINTR:
1874         case -EBUSY:
1875                 /*
1876                  * EBUSY is ok: this just means that another thread
1877                  * already did the job.
1878                  */
1879                 ret = VM_FAULT_NOPAGE;
1880                 break;
1881         case -ENOMEM:
1882                 ret = VM_FAULT_OOM;
1883                 break;
1884         case -ENOSPC:
1885         case -EFAULT:
1886                 ret = VM_FAULT_SIGBUS;
1887                 break;
1888         default:
1889                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1890                 ret = VM_FAULT_SIGBUS;
1891                 break;
1892         }
1893
1894         intel_runtime_pm_put(dev_priv);
1895         return ret;
1896 }
1897
1898 /**
1899  * i915_gem_release_mmap - remove physical page mappings
1900  * @obj: obj in question
1901  *
1902  * Preserve the reservation of the mmapping with the DRM core code, but
1903  * relinquish ownership of the pages back to the system.
1904  *
1905  * It is vital that we remove the page mapping if we have mapped a tiled
1906  * object through the GTT and then lose the fence register due to
1907  * resource pressure. Similarly if the object has been moved out of the
1908  * aperture, than pages mapped into userspace must be revoked. Removing the
1909  * mapping will then trigger a page fault on the next user access, allowing
1910  * fixup by i915_gem_fault().
1911  */
1912 void
1913 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1914 {
1915         if (!obj->fault_mappable)
1916                 return;
1917
1918         drm_vma_node_unmap(&obj->base.vma_node,
1919                            obj->base.dev->anon_inode->i_mapping);
1920         obj->fault_mappable = false;
1921 }
1922
1923 void
1924 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1925 {
1926         struct drm_i915_gem_object *obj;
1927
1928         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1929                 i915_gem_release_mmap(obj);
1930 }
1931
1932 uint32_t
1933 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1934 {
1935         uint32_t gtt_size;
1936
1937         if (INTEL_INFO(dev)->gen >= 4 ||
1938             tiling_mode == I915_TILING_NONE)
1939                 return size;
1940
1941         /* Previous chips need a power-of-two fence region when tiling */
1942         if (INTEL_INFO(dev)->gen == 3)
1943                 gtt_size = 1024*1024;
1944         else
1945                 gtt_size = 512*1024;
1946
1947         while (gtt_size < size)
1948                 gtt_size <<= 1;
1949
1950         return gtt_size;
1951 }
1952
1953 /**
1954  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1955  * @obj: object to check
1956  *
1957  * Return the required GTT alignment for an object, taking into account
1958  * potential fence register mapping.
1959  */
1960 uint32_t
1961 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1962                            int tiling_mode, bool fenced)
1963 {
1964         /*
1965          * Minimum alignment is 4k (GTT page size), but might be greater
1966          * if a fence register is needed for the object.
1967          */
1968         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1969             tiling_mode == I915_TILING_NONE)
1970                 return 4096;
1971
1972         /*
1973          * Previous chips need to be aligned to the size of the smallest
1974          * fence register that can contain the object.
1975          */
1976         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1977 }
1978
1979 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1980 {
1981         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1982         int ret;
1983
1984         if (drm_vma_node_has_offset(&obj->base.vma_node))
1985                 return 0;
1986
1987         dev_priv->mm.shrinker_no_lock_stealing = true;
1988
1989         ret = drm_gem_create_mmap_offset(&obj->base);
1990         if (ret != -ENOSPC)
1991                 goto out;
1992
1993         /* Badly fragmented mmap space? The only way we can recover
1994          * space is by destroying unwanted objects. We can't randomly release
1995          * mmap_offsets as userspace expects them to be persistent for the
1996          * lifetime of the objects. The closest we can is to release the
1997          * offsets on purgeable objects by truncating it and marking it purged,
1998          * which prevents userspace from ever using that object again.
1999          */
2000         i915_gem_shrink(dev_priv,
2001                         obj->base.size >> PAGE_SHIFT,
2002                         I915_SHRINK_BOUND |
2003                         I915_SHRINK_UNBOUND |
2004                         I915_SHRINK_PURGEABLE);
2005         ret = drm_gem_create_mmap_offset(&obj->base);
2006         if (ret != -ENOSPC)
2007                 goto out;
2008
2009         i915_gem_shrink_all(dev_priv);
2010         ret = drm_gem_create_mmap_offset(&obj->base);
2011 out:
2012         dev_priv->mm.shrinker_no_lock_stealing = false;
2013
2014         return ret;
2015 }
2016
2017 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2018 {
2019         drm_gem_free_mmap_offset(&obj->base);
2020 }
2021
2022 int
2023 i915_gem_mmap_gtt(struct drm_file *file,
2024                   struct drm_device *dev,
2025                   uint32_t handle,
2026                   uint64_t *offset)
2027 {
2028         struct drm_i915_gem_object *obj;
2029         int ret;
2030
2031         ret = i915_mutex_lock_interruptible(dev);
2032         if (ret)
2033                 return ret;
2034
2035         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
2036         if (&obj->base == NULL) {
2037                 ret = -ENOENT;
2038                 goto unlock;
2039         }
2040
2041         if (obj->madv != I915_MADV_WILLNEED) {
2042                 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
2043                 ret = -EFAULT;
2044                 goto out;
2045         }
2046
2047         ret = i915_gem_object_create_mmap_offset(obj);
2048         if (ret)
2049                 goto out;
2050
2051         *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2052
2053 out:
2054         drm_gem_object_unreference(&obj->base);
2055 unlock:
2056         mutex_unlock(&dev->struct_mutex);
2057         return ret;
2058 }
2059
2060 /**
2061  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2062  * @dev: DRM device
2063  * @data: GTT mapping ioctl data
2064  * @file: GEM object info
2065  *
2066  * Simply returns the fake offset to userspace so it can mmap it.
2067  * The mmap call will end up in drm_gem_mmap(), which will set things
2068  * up so we can get faults in the handler above.
2069  *
2070  * The fault handler will take care of binding the object into the GTT
2071  * (since it may have been evicted to make room for something), allocating
2072  * a fence register, and mapping the appropriate aperture address into
2073  * userspace.
2074  */
2075 int
2076 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2077                         struct drm_file *file)
2078 {
2079         struct drm_i915_gem_mmap_gtt *args = data;
2080
2081         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2082 }
2083
2084 /* Immediately discard the backing storage */
2085 static void
2086 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2087 {
2088         i915_gem_object_free_mmap_offset(obj);
2089
2090         if (obj->base.filp == NULL)
2091                 return;
2092
2093         /* Our goal here is to return as much of the memory as
2094          * is possible back to the system as we are called from OOM.
2095          * To do this we must instruct the shmfs to drop all of its
2096          * backing pages, *now*.
2097          */
2098         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2099         obj->madv = __I915_MADV_PURGED;
2100 }
2101
2102 /* Try to discard unwanted pages */
2103 static void
2104 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2105 {
2106         struct address_space *mapping;
2107
2108         switch (obj->madv) {
2109         case I915_MADV_DONTNEED:
2110                 i915_gem_object_truncate(obj);
2111         case __I915_MADV_PURGED:
2112                 return;
2113         }
2114
2115         if (obj->base.filp == NULL)
2116                 return;
2117
2118         mapping = file_inode(obj->base.filp)->i_mapping,
2119         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2120 }
2121
2122 static void
2123 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2124 {
2125         struct sg_page_iter sg_iter;
2126         int ret;
2127
2128         BUG_ON(obj->madv == __I915_MADV_PURGED);
2129
2130         ret = i915_gem_object_set_to_cpu_domain(obj, true);
2131         if (ret) {
2132                 /* In the event of a disaster, abandon all caches and
2133                  * hope for the best.
2134                  */
2135                 WARN_ON(ret != -EIO);
2136                 i915_gem_clflush_object(obj, true);
2137                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2138         }
2139
2140         if (i915_gem_object_needs_bit17_swizzle(obj))
2141                 i915_gem_object_save_bit_17_swizzle(obj);
2142
2143         if (obj->madv == I915_MADV_DONTNEED)
2144                 obj->dirty = 0;
2145
2146         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
2147                 struct page *page = sg_page_iter_page(&sg_iter);
2148
2149                 if (obj->dirty)
2150                         set_page_dirty(page);
2151
2152                 if (obj->madv == I915_MADV_WILLNEED)
2153                         mark_page_accessed(page);
2154
2155                 page_cache_release(page);
2156         }
2157         obj->dirty = 0;
2158
2159         sg_free_table(obj->pages);
2160         kfree(obj->pages);
2161 }
2162
2163 int
2164 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2165 {
2166         const struct drm_i915_gem_object_ops *ops = obj->ops;
2167
2168         if (obj->pages == NULL)
2169                 return 0;
2170
2171         if (obj->pages_pin_count)
2172                 return -EBUSY;
2173
2174         BUG_ON(i915_gem_obj_bound_any(obj));
2175
2176         /* ->put_pages might need to allocate memory for the bit17 swizzle
2177          * array, hence protect them from being reaped by removing them from gtt
2178          * lists early. */
2179         list_del(&obj->global_list);
2180
2181         ops->put_pages(obj);
2182         obj->pages = NULL;
2183
2184         i915_gem_object_invalidate(obj);
2185
2186         return 0;
2187 }
2188
2189 static int
2190 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2191 {
2192         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2193         int page_count, i;
2194         struct address_space *mapping;
2195         struct sg_table *st;
2196         struct scatterlist *sg;
2197         struct sg_page_iter sg_iter;
2198         struct page *page;
2199         unsigned long last_pfn = 0;     /* suppress gcc warning */
2200         gfp_t gfp;
2201
2202         /* Assert that the object is not currently in any GPU domain. As it
2203          * wasn't in the GTT, there shouldn't be any way it could have been in
2204          * a GPU cache
2205          */
2206         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2207         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2208
2209         st = kmalloc(sizeof(*st), GFP_KERNEL);
2210         if (st == NULL)
2211                 return -ENOMEM;
2212
2213         page_count = obj->base.size / PAGE_SIZE;
2214         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2215                 kfree(st);
2216                 return -ENOMEM;
2217         }
2218
2219         /* Get the list of pages out of our struct file.  They'll be pinned
2220          * at this point until we release them.
2221          *
2222          * Fail silently without starting the shrinker
2223          */
2224         mapping = file_inode(obj->base.filp)->i_mapping;
2225         gfp = mapping_gfp_mask(mapping);
2226         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
2227         gfp &= ~(__GFP_IO | __GFP_WAIT);
2228         sg = st->sgl;
2229         st->nents = 0;
2230         for (i = 0; i < page_count; i++) {
2231                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2232                 if (IS_ERR(page)) {
2233                         i915_gem_shrink(dev_priv,
2234                                         page_count,
2235                                         I915_SHRINK_BOUND |
2236                                         I915_SHRINK_UNBOUND |
2237                                         I915_SHRINK_PURGEABLE);
2238                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2239                 }
2240                 if (IS_ERR(page)) {
2241                         /* We've tried hard to allocate the memory by reaping
2242                          * our own buffer, now let the real VM do its job and
2243                          * go down in flames if truly OOM.
2244                          */
2245                         i915_gem_shrink_all(dev_priv);
2246                         page = shmem_read_mapping_page(mapping, i);
2247                         if (IS_ERR(page))
2248                                 goto err_pages;
2249                 }
2250 #ifdef CONFIG_SWIOTLB
2251                 if (swiotlb_nr_tbl()) {
2252                         st->nents++;
2253                         sg_set_page(sg, page, PAGE_SIZE, 0);
2254                         sg = sg_next(sg);
2255                         continue;
2256                 }
2257 #endif
2258                 if (!i || page_to_pfn(page) != last_pfn + 1) {
2259                         if (i)
2260                                 sg = sg_next(sg);
2261                         st->nents++;
2262                         sg_set_page(sg, page, PAGE_SIZE, 0);
2263                 } else {
2264                         sg->length += PAGE_SIZE;
2265                 }
2266                 last_pfn = page_to_pfn(page);
2267
2268                 /* Check that the i965g/gm workaround works. */
2269                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2270         }
2271 #ifdef CONFIG_SWIOTLB
2272         if (!swiotlb_nr_tbl())
2273 #endif
2274                 sg_mark_end(sg);
2275         obj->pages = st;
2276
2277         if (i915_gem_object_needs_bit17_swizzle(obj))
2278                 i915_gem_object_do_bit_17_swizzle(obj);
2279
2280         if (obj->tiling_mode != I915_TILING_NONE &&
2281             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2282                 i915_gem_object_pin_pages(obj);
2283
2284         return 0;
2285
2286 err_pages:
2287         sg_mark_end(sg);
2288         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2289                 page_cache_release(sg_page_iter_page(&sg_iter));
2290         sg_free_table(st);
2291         kfree(st);
2292
2293         /* shmemfs first checks if there is enough memory to allocate the page
2294          * and reports ENOSPC should there be insufficient, along with the usual
2295          * ENOMEM for a genuine allocation failure.
2296          *
2297          * We use ENOSPC in our driver to mean that we have run out of aperture
2298          * space and so want to translate the error from shmemfs back to our
2299          * usual understanding of ENOMEM.
2300          */
2301         if (PTR_ERR(page) == -ENOSPC)
2302                 return -ENOMEM;
2303         else
2304                 return PTR_ERR(page);
2305 }
2306
2307 /* Ensure that the associated pages are gathered from the backing storage
2308  * and pinned into our object. i915_gem_object_get_pages() may be called
2309  * multiple times before they are released by a single call to
2310  * i915_gem_object_put_pages() - once the pages are no longer referenced
2311  * either as a result of memory pressure (reaping pages under the shrinker)
2312  * or as the object is itself released.
2313  */
2314 int
2315 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2316 {
2317         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2318         const struct drm_i915_gem_object_ops *ops = obj->ops;
2319         int ret;
2320
2321         if (obj->pages)
2322                 return 0;
2323
2324         if (obj->madv != I915_MADV_WILLNEED) {
2325                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2326                 return -EFAULT;
2327         }
2328
2329         BUG_ON(obj->pages_pin_count);
2330
2331         ret = ops->get_pages(obj);
2332         if (ret)
2333                 return ret;
2334
2335         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2336
2337         obj->get_page.sg = obj->pages->sgl;
2338         obj->get_page.last = 0;
2339
2340         return 0;
2341 }
2342
2343 void i915_vma_move_to_active(struct i915_vma *vma,
2344                              struct drm_i915_gem_request *req)
2345 {
2346         struct drm_i915_gem_object *obj = vma->obj;
2347         struct intel_engine_cs *ring;
2348
2349         ring = i915_gem_request_get_ring(req);
2350
2351         /* Add a reference if we're newly entering the active list. */
2352         if (obj->active == 0)
2353                 drm_gem_object_reference(&obj->base);
2354         obj->active |= intel_ring_flag(ring);
2355
2356         list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
2357         i915_gem_request_assign(&obj->last_read_req[ring->id], req);
2358
2359         list_move_tail(&vma->mm_list, &vma->vm->active_list);
2360 }
2361
2362 static void
2363 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
2364 {
2365         RQ_BUG_ON(obj->last_write_req == NULL);
2366         RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
2367
2368         i915_gem_request_assign(&obj->last_write_req, NULL);
2369         intel_fb_obj_flush(obj, true);
2370 }
2371
2372 static void
2373 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
2374 {
2375         struct i915_vma *vma;
2376
2377         RQ_BUG_ON(obj->last_read_req[ring] == NULL);
2378         RQ_BUG_ON(!(obj->active & (1 << ring)));
2379
2380         list_del_init(&obj->ring_list[ring]);
2381         i915_gem_request_assign(&obj->last_read_req[ring], NULL);
2382
2383         if (obj->last_write_req && obj->last_write_req->ring->id == ring)
2384                 i915_gem_object_retire__write(obj);
2385
2386         obj->active &= ~(1 << ring);
2387         if (obj->active)
2388                 return;
2389
2390         list_for_each_entry(vma, &obj->vma_list, vma_link) {
2391                 if (!list_empty(&vma->mm_list))
2392                         list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
2393         }
2394
2395         i915_gem_request_assign(&obj->last_fenced_req, NULL);
2396         drm_gem_object_unreference(&obj->base);
2397 }
2398
2399 static int
2400 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2401 {
2402         struct drm_i915_private *dev_priv = dev->dev_private;
2403         struct intel_engine_cs *ring;
2404         int ret, i, j;
2405
2406         /* Carefully retire all requests without writing to the rings */
2407         for_each_ring(ring, dev_priv, i) {
2408                 ret = intel_ring_idle(ring);
2409                 if (ret)
2410                         return ret;
2411         }
2412         i915_gem_retire_requests(dev);
2413
2414         /* Finally reset hw state */
2415         for_each_ring(ring, dev_priv, i) {
2416                 intel_ring_init_seqno(ring, seqno);
2417
2418                 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2419                         ring->semaphore.sync_seqno[j] = 0;
2420         }
2421
2422         return 0;
2423 }
2424
2425 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2426 {
2427         struct drm_i915_private *dev_priv = dev->dev_private;
2428         int ret;
2429
2430         if (seqno == 0)
2431                 return -EINVAL;
2432
2433         /* HWS page needs to be set less than what we
2434          * will inject to ring
2435          */
2436         ret = i915_gem_init_seqno(dev, seqno - 1);
2437         if (ret)
2438                 return ret;
2439
2440         /* Carefully set the last_seqno value so that wrap
2441          * detection still works
2442          */
2443         dev_priv->next_seqno = seqno;
2444         dev_priv->last_seqno = seqno - 1;
2445         if (dev_priv->last_seqno == 0)
2446                 dev_priv->last_seqno--;
2447
2448         return 0;
2449 }
2450
2451 int
2452 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2453 {
2454         struct drm_i915_private *dev_priv = dev->dev_private;
2455
2456         /* reserve 0 for non-seqno */
2457         if (dev_priv->next_seqno == 0) {
2458                 int ret = i915_gem_init_seqno(dev, 0);
2459                 if (ret)
2460                         return ret;
2461
2462                 dev_priv->next_seqno = 1;
2463         }
2464
2465         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2466         return 0;
2467 }
2468
2469 /*
2470  * NB: This function is not allowed to fail. Doing so would mean the the
2471  * request is not being tracked for completion but the work itself is
2472  * going to happen on the hardware. This would be a Bad Thing(tm).
2473  */
2474 void __i915_add_request(struct drm_i915_gem_request *request,
2475                         struct drm_i915_gem_object *obj,
2476                         bool flush_caches)
2477 {
2478         struct intel_engine_cs *ring;
2479         struct drm_i915_private *dev_priv;
2480         struct intel_ringbuffer *ringbuf;
2481         u32 request_start;
2482         int ret;
2483
2484         if (WARN_ON(request == NULL))
2485                 return;
2486
2487         ring = request->ring;
2488         dev_priv = ring->dev->dev_private;
2489         ringbuf = request->ringbuf;
2490
2491         /*
2492          * To ensure that this call will not fail, space for its emissions
2493          * should already have been reserved in the ring buffer. Let the ring
2494          * know that it is time to use that space up.
2495          */
2496         intel_ring_reserved_space_use(ringbuf);
2497
2498         request_start = intel_ring_get_tail(ringbuf);
2499         /*
2500          * Emit any outstanding flushes - execbuf can fail to emit the flush
2501          * after having emitted the batchbuffer command. Hence we need to fix
2502          * things up similar to emitting the lazy request. The difference here
2503          * is that the flush _must_ happen before the next request, no matter
2504          * what.
2505          */
2506         if (flush_caches) {
2507                 if (i915.enable_execlists)
2508                         ret = logical_ring_flush_all_caches(request);
2509                 else
2510                         ret = intel_ring_flush_all_caches(request);
2511                 /* Not allowed to fail! */
2512                 WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
2513         }
2514
2515         /* Record the position of the start of the request so that
2516          * should we detect the updated seqno part-way through the
2517          * GPU processing the request, we never over-estimate the
2518          * position of the head.
2519          */
2520         request->postfix = intel_ring_get_tail(ringbuf);
2521
2522         if (i915.enable_execlists)
2523                 ret = ring->emit_request(request);
2524         else {
2525                 ret = ring->add_request(request);
2526
2527                 request->tail = intel_ring_get_tail(ringbuf);
2528         }
2529         /* Not allowed to fail! */
2530         WARN(ret, "emit|add_request failed: %d!\n", ret);
2531
2532         request->head = request_start;
2533
2534         /* Whilst this request exists, batch_obj will be on the
2535          * active_list, and so will hold the active reference. Only when this
2536          * request is retired will the the batch_obj be moved onto the
2537          * inactive_list and lose its active reference. Hence we do not need
2538          * to explicitly hold another reference here.
2539          */
2540         request->batch_obj = obj;
2541
2542         request->emitted_jiffies = jiffies;
2543         list_add_tail(&request->list, &ring->request_list);
2544
2545         trace_i915_gem_request_add(request);
2546
2547         i915_queue_hangcheck(ring->dev);
2548
2549         queue_delayed_work(dev_priv->wq,
2550                            &dev_priv->mm.retire_work,
2551                            round_jiffies_up_relative(HZ));
2552         intel_mark_busy(dev_priv->dev);
2553
2554         /* Sanity check that the reserved size was large enough. */
2555         intel_ring_reserved_space_end(ringbuf);
2556 }
2557
2558 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2559                                    const struct intel_context *ctx)
2560 {
2561         unsigned long elapsed;
2562
2563         elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2564
2565         if (ctx->hang_stats.banned)
2566                 return true;
2567
2568         if (ctx->hang_stats.ban_period_seconds &&
2569             elapsed <= ctx->hang_stats.ban_period_seconds) {
2570                 if (!i915_gem_context_is_default(ctx)) {
2571                         DRM_DEBUG("context hanging too fast, banning!\n");
2572                         return true;
2573                 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2574                         if (i915_stop_ring_allow_warn(dev_priv))
2575                                 DRM_ERROR("gpu hanging too fast, banning!\n");
2576                         return true;
2577                 }
2578         }
2579
2580         return false;
2581 }
2582
2583 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2584                                   struct intel_context *ctx,
2585                                   const bool guilty)
2586 {
2587         struct i915_ctx_hang_stats *hs;
2588
2589         if (WARN_ON(!ctx))
2590                 return;
2591
2592         hs = &ctx->hang_stats;
2593
2594         if (guilty) {
2595                 hs->banned = i915_context_is_banned(dev_priv, ctx);
2596                 hs->batch_active++;
2597                 hs->guilty_ts = get_seconds();
2598         } else {
2599                 hs->batch_pending++;
2600         }
2601 }
2602
2603 void i915_gem_request_free(struct kref *req_ref)
2604 {
2605         struct drm_i915_gem_request *req = container_of(req_ref,
2606                                                  typeof(*req), ref);
2607         struct intel_context *ctx = req->ctx;
2608
2609         if (req->file_priv)
2610                 i915_gem_request_remove_from_client(req);
2611
2612         if (ctx) {
2613                 if (i915.enable_execlists) {
2614                         struct intel_engine_cs *ring = req->ring;
2615
2616                         if (ctx != ring->default_context)
2617                                 intel_lr_context_unpin(ring, ctx);
2618                 }
2619
2620                 i915_gem_context_unreference(ctx);
2621         }
2622
2623         kmem_cache_free(req->i915->requests, req);
2624 }
2625
2626 int i915_gem_request_alloc(struct intel_engine_cs *ring,
2627                            struct intel_context *ctx,
2628                            struct drm_i915_gem_request **req_out)
2629 {
2630         struct drm_i915_private *dev_priv = to_i915(ring->dev);
2631         struct drm_i915_gem_request *req;
2632         int ret;
2633
2634         if (!req_out)
2635                 return -EINVAL;
2636
2637         *req_out = NULL;
2638
2639         req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
2640         if (req == NULL)
2641                 return -ENOMEM;
2642
2643         ret = i915_gem_get_seqno(ring->dev, &req->seqno);
2644         if (ret)
2645                 goto err;
2646
2647         kref_init(&req->ref);
2648         req->i915 = dev_priv;
2649         req->ring = ring;
2650         req->ctx  = ctx;
2651         i915_gem_context_reference(req->ctx);
2652
2653         if (i915.enable_execlists)
2654                 ret = intel_logical_ring_alloc_request_extras(req);
2655         else
2656                 ret = intel_ring_alloc_request_extras(req);
2657         if (ret) {
2658                 i915_gem_context_unreference(req->ctx);
2659                 goto err;
2660         }
2661
2662         /*
2663          * Reserve space in the ring buffer for all the commands required to
2664          * eventually emit this request. This is to guarantee that the
2665          * i915_add_request() call can't fail. Note that the reserve may need
2666          * to be redone if the request is not actually submitted straight
2667          * away, e.g. because a GPU scheduler has deferred it.
2668          */
2669         if (i915.enable_execlists)
2670                 ret = intel_logical_ring_reserve_space(req);
2671         else
2672                 ret = intel_ring_reserve_space(req);
2673         if (ret) {
2674                 /*
2675                  * At this point, the request is fully allocated even if not
2676                  * fully prepared. Thus it can be cleaned up using the proper
2677                  * free code.
2678                  */
2679                 i915_gem_request_cancel(req);
2680                 return ret;
2681         }
2682
2683         *req_out = req;
2684         return 0;
2685
2686 err:
2687         kmem_cache_free(dev_priv->requests, req);
2688         return ret;
2689 }
2690
2691 void i915_gem_request_cancel(struct drm_i915_gem_request *req)
2692 {
2693         intel_ring_reserved_space_cancel(req->ringbuf);
2694
2695         i915_gem_request_unreference(req);
2696 }
2697
2698 struct drm_i915_gem_request *
2699 i915_gem_find_active_request(struct intel_engine_cs *ring)
2700 {
2701         struct drm_i915_gem_request *request;
2702
2703         list_for_each_entry(request, &ring->request_list, list) {
2704                 if (i915_gem_request_completed(request, false))
2705                         continue;
2706
2707                 return request;
2708         }
2709
2710         return NULL;
2711 }
2712
2713 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2714                                        struct intel_engine_cs *ring)
2715 {
2716         struct drm_i915_gem_request *request;
2717         bool ring_hung;
2718
2719         request = i915_gem_find_active_request(ring);
2720
2721         if (request == NULL)
2722                 return;
2723
2724         ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2725
2726         i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2727
2728         list_for_each_entry_continue(request, &ring->request_list, list)
2729                 i915_set_reset_status(dev_priv, request->ctx, false);
2730 }
2731
2732 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2733                                         struct intel_engine_cs *ring)
2734 {
2735         while (!list_empty(&ring->active_list)) {
2736                 struct drm_i915_gem_object *obj;
2737
2738                 obj = list_first_entry(&ring->active_list,
2739                                        struct drm_i915_gem_object,
2740                                        ring_list[ring->id]);
2741
2742                 i915_gem_object_retire__read(obj, ring->id);
2743         }
2744
2745         /*
2746          * Clear the execlists queue up before freeing the requests, as those
2747          * are the ones that keep the context and ringbuffer backing objects
2748          * pinned in place.
2749          */
2750         while (!list_empty(&ring->execlist_queue)) {
2751                 struct drm_i915_gem_request *submit_req;
2752
2753                 submit_req = list_first_entry(&ring->execlist_queue,
2754                                 struct drm_i915_gem_request,
2755                                 execlist_link);
2756                 list_del(&submit_req->execlist_link);
2757
2758                 if (submit_req->ctx != ring->default_context)
2759                         intel_lr_context_unpin(ring, submit_req->ctx);
2760
2761                 i915_gem_request_unreference(submit_req);
2762         }
2763
2764         /*
2765          * We must free the requests after all the corresponding objects have
2766          * been moved off active lists. Which is the same order as the normal
2767          * retire_requests function does. This is important if object hold
2768          * implicit references on things like e.g. ppgtt address spaces through
2769          * the request.
2770          */
2771         while (!list_empty(&ring->request_list)) {
2772                 struct drm_i915_gem_request *request;
2773
2774                 request = list_first_entry(&ring->request_list,
2775                                            struct drm_i915_gem_request,
2776                                            list);
2777
2778                 i915_gem_request_retire(request);
2779         }
2780 }
2781
2782 void i915_gem_restore_fences(struct drm_device *dev)
2783 {
2784         struct drm_i915_private *dev_priv = dev->dev_private;
2785         int i;
2786
2787         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2788                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2789
2790                 /*
2791                  * Commit delayed tiling changes if we have an object still
2792                  * attached to the fence, otherwise just clear the fence.
2793                  */
2794                 if (reg->obj) {
2795                         i915_gem_object_update_fence(reg->obj, reg,
2796                                                      reg->obj->tiling_mode);
2797                 } else {
2798                         i915_gem_write_fence(dev, i, NULL);
2799                 }
2800         }
2801 }
2802
2803 void i915_gem_reset(struct drm_device *dev)
2804 {
2805         struct drm_i915_private *dev_priv = dev->dev_private;
2806         struct intel_engine_cs *ring;
2807         int i;
2808
2809         /*
2810          * Before we free the objects from the requests, we need to inspect
2811          * them for finding the guilty party. As the requests only borrow
2812          * their reference to the objects, the inspection must be done first.
2813          */
2814         for_each_ring(ring, dev_priv, i)
2815                 i915_gem_reset_ring_status(dev_priv, ring);
2816
2817         for_each_ring(ring, dev_priv, i)
2818                 i915_gem_reset_ring_cleanup(dev_priv, ring);
2819
2820         i915_gem_context_reset(dev);
2821
2822         i915_gem_restore_fences(dev);
2823
2824         WARN_ON(i915_verify_lists(dev));
2825 }
2826
2827 /**
2828  * This function clears the request list as sequence numbers are passed.
2829  */
2830 void
2831 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2832 {
2833         WARN_ON(i915_verify_lists(ring->dev));
2834
2835         /* Retire requests first as we use it above for the early return.
2836          * If we retire requests last, we may use a later seqno and so clear
2837          * the requests lists without clearing the active list, leading to
2838          * confusion.
2839          */
2840         while (!list_empty(&ring->request_list)) {
2841                 struct drm_i915_gem_request *request;
2842
2843                 request = list_first_entry(&ring->request_list,
2844                                            struct drm_i915_gem_request,
2845                                            list);
2846
2847                 if (!i915_gem_request_completed(request, true))
2848                         break;
2849
2850                 i915_gem_request_retire(request);
2851         }
2852
2853         /* Move any buffers on the active list that are no longer referenced
2854          * by the ringbuffer to the flushing/inactive lists as appropriate,
2855          * before we free the context associated with the requests.
2856          */
2857         while (!list_empty(&ring->active_list)) {
2858                 struct drm_i915_gem_object *obj;
2859
2860                 obj = list_first_entry(&ring->active_list,
2861                                       struct drm_i915_gem_object,
2862                                       ring_list[ring->id]);
2863
2864                 if (!list_empty(&obj->last_read_req[ring->id]->list))
2865                         break;
2866
2867                 i915_gem_object_retire__read(obj, ring->id);
2868         }
2869
2870         if (unlikely(ring->trace_irq_req &&
2871                      i915_gem_request_completed(ring->trace_irq_req, true))) {
2872                 ring->irq_put(ring);
2873                 i915_gem_request_assign(&ring->trace_irq_req, NULL);
2874         }
2875
2876         WARN_ON(i915_verify_lists(ring->dev));
2877 }
2878
2879 bool
2880 i915_gem_retire_requests(struct drm_device *dev)
2881 {
2882         struct drm_i915_private *dev_priv = dev->dev_private;
2883         struct intel_engine_cs *ring;
2884         bool idle = true;
2885         int i;
2886
2887         for_each_ring(ring, dev_priv, i) {
2888                 i915_gem_retire_requests_ring(ring);
2889                 idle &= list_empty(&ring->request_list);
2890                 if (i915.enable_execlists) {
2891                         unsigned long flags;
2892
2893                         spin_lock_irqsave(&ring->execlist_lock, flags);
2894                         idle &= list_empty(&ring->execlist_queue);
2895                         spin_unlock_irqrestore(&ring->execlist_lock, flags);
2896
2897                         intel_execlists_retire_requests(ring);
2898                 }
2899         }
2900
2901         if (idle)
2902                 mod_delayed_work(dev_priv->wq,
2903                                    &dev_priv->mm.idle_work,
2904                                    msecs_to_jiffies(100));
2905
2906         return idle;
2907 }
2908
2909 static void
2910 i915_gem_retire_work_handler(struct work_struct *work)
2911 {
2912         struct drm_i915_private *dev_priv =
2913                 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2914         struct drm_device *dev = dev_priv->dev;
2915         bool idle;
2916
2917         /* Come back later if the device is busy... */
2918         idle = false;
2919         if (mutex_trylock(&dev->struct_mutex)) {
2920                 idle = i915_gem_retire_requests(dev);
2921                 mutex_unlock(&dev->struct_mutex);
2922         }
2923         if (!idle)
2924                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2925                                    round_jiffies_up_relative(HZ));
2926 }
2927
2928 static void
2929 i915_gem_idle_work_handler(struct work_struct *work)
2930 {
2931         struct drm_i915_private *dev_priv =
2932                 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2933         struct drm_device *dev = dev_priv->dev;
2934         struct intel_engine_cs *ring;
2935         int i;
2936
2937         for_each_ring(ring, dev_priv, i)
2938                 if (!list_empty(&ring->request_list))
2939                         return;
2940
2941         intel_mark_idle(dev);
2942
2943         if (mutex_trylock(&dev->struct_mutex)) {
2944                 struct intel_engine_cs *ring;
2945                 int i;
2946
2947                 for_each_ring(ring, dev_priv, i)
2948                         i915_gem_batch_pool_fini(&ring->batch_pool);
2949
2950                 mutex_unlock(&dev->struct_mutex);
2951         }
2952 }
2953
2954 /**
2955  * Ensures that an object will eventually get non-busy by flushing any required
2956  * write domains, emitting any outstanding lazy request and retiring and
2957  * completed requests.
2958  */
2959 static int
2960 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2961 {
2962         int i;
2963
2964         if (!obj->active)
2965                 return 0;
2966
2967         for (i = 0; i < I915_NUM_RINGS; i++) {
2968                 struct drm_i915_gem_request *req;
2969
2970                 req = obj->last_read_req[i];
2971                 if (req == NULL)
2972                         continue;
2973
2974                 if (list_empty(&req->list))
2975                         goto retire;
2976
2977                 if (i915_gem_request_completed(req, true)) {
2978                         __i915_gem_request_retire__upto(req);
2979 retire:
2980                         i915_gem_object_retire__read(obj, i);
2981                 }
2982         }
2983
2984         return 0;
2985 }
2986
2987 /**
2988  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2989  * @DRM_IOCTL_ARGS: standard ioctl arguments
2990  *
2991  * Returns 0 if successful, else an error is returned with the remaining time in
2992  * the timeout parameter.
2993  *  -ETIME: object is still busy after timeout
2994  *  -ERESTARTSYS: signal interrupted the wait
2995  *  -ENONENT: object doesn't exist
2996  * Also possible, but rare:
2997  *  -EAGAIN: GPU wedged
2998  *  -ENOMEM: damn
2999  *  -ENODEV: Internal IRQ fail
3000  *  -E?: The add request failed
3001  *
3002  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3003  * non-zero timeout parameter the wait ioctl will wait for the given number of
3004  * nanoseconds on an object becoming unbusy. Since the wait itself does so
3005  * without holding struct_mutex the object may become re-busied before this
3006  * function completes. A similar but shorter * race condition exists in the busy
3007  * ioctl
3008  */
3009 int
3010 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3011 {
3012         struct drm_i915_private *dev_priv = dev->dev_private;
3013         struct drm_i915_gem_wait *args = data;
3014         struct drm_i915_gem_object *obj;
3015         struct drm_i915_gem_request *req[I915_NUM_RINGS];
3016         unsigned reset_counter;
3017         int i, n = 0;
3018         int ret;
3019
3020         if (args->flags != 0)
3021                 return -EINVAL;
3022
3023         ret = i915_mutex_lock_interruptible(dev);
3024         if (ret)
3025                 return ret;
3026
3027         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
3028         if (&obj->base == NULL) {
3029                 mutex_unlock(&dev->struct_mutex);
3030                 return -ENOENT;
3031         }
3032
3033         /* Need to make sure the object gets inactive eventually. */
3034         ret = i915_gem_object_flush_active(obj);
3035         if (ret)
3036                 goto out;
3037
3038         if (!obj->active)
3039                 goto out;
3040
3041         /* Do this after OLR check to make sure we make forward progress polling
3042          * on this IOCTL with a timeout == 0 (like busy ioctl)
3043          */
3044         if (args->timeout_ns == 0) {
3045                 ret = -ETIME;
3046                 goto out;
3047         }
3048
3049         drm_gem_object_unreference(&obj->base);
3050         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3051
3052         for (i = 0; i < I915_NUM_RINGS; i++) {
3053                 if (obj->last_read_req[i] == NULL)
3054                         continue;
3055
3056                 req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
3057         }
3058
3059         mutex_unlock(&dev->struct_mutex);
3060
3061         for (i = 0; i < n; i++) {
3062                 if (ret == 0)
3063                         ret = __i915_wait_request(req[i], reset_counter, true,
3064                                                   args->timeout_ns > 0 ? &args->timeout_ns : NULL,
3065                                                   file->driver_priv);
3066                 i915_gem_request_unreference__unlocked(req[i]);
3067         }
3068         return ret;
3069
3070 out:
3071         drm_gem_object_unreference(&obj->base);
3072         mutex_unlock(&dev->struct_mutex);
3073         return ret;
3074 }
3075
3076 static int
3077 __i915_gem_object_sync(struct drm_i915_gem_object *obj,
3078                        struct intel_engine_cs *to,
3079                        struct drm_i915_gem_request *from_req,
3080                        struct drm_i915_gem_request **to_req)
3081 {
3082         struct intel_engine_cs *from;
3083         int ret;
3084
3085         from = i915_gem_request_get_ring(from_req);
3086         if (to == from)
3087                 return 0;
3088
3089         if (i915_gem_request_completed(from_req, true))
3090                 return 0;
3091
3092         if (!i915_semaphore_is_enabled(obj->base.dev)) {
3093                 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3094                 ret = __i915_wait_request(from_req,
3095                                           atomic_read(&i915->gpu_error.reset_counter),
3096                                           i915->mm.interruptible,
3097                                           NULL,
3098                                           &i915->rps.semaphores);
3099                 if (ret)
3100                         return ret;
3101
3102                 i915_gem_object_retire_request(obj, from_req);
3103         } else {
3104                 int idx = intel_ring_sync_index(from, to);
3105                 u32 seqno = i915_gem_request_get_seqno(from_req);
3106
3107                 WARN_ON(!to_req);
3108
3109                 if (seqno <= from->semaphore.sync_seqno[idx])
3110                         return 0;
3111
3112                 if (*to_req == NULL) {
3113                         ret = i915_gem_request_alloc(to, to->default_context, to_req);
3114                         if (ret)
3115                                 return ret;
3116                 }
3117
3118                 trace_i915_gem_ring_sync_to(*to_req, from, from_req);
3119                 ret = to->semaphore.sync_to(*to_req, from, seqno);
3120                 if (ret)
3121                         return ret;
3122
3123                 /* We use last_read_req because sync_to()
3124                  * might have just caused seqno wrap under
3125                  * the radar.
3126                  */
3127                 from->semaphore.sync_seqno[idx] =
3128                         i915_gem_request_get_seqno(obj->last_read_req[from->id]);
3129         }
3130
3131         return 0;
3132 }
3133
3134 /**
3135  * i915_gem_object_sync - sync an object to a ring.
3136  *
3137  * @obj: object which may be in use on another ring.
3138  * @to: ring we wish to use the object on. May be NULL.
3139  * @to_req: request we wish to use the object for. See below.
3140  *          This will be allocated and returned if a request is
3141  *          required but not passed in.
3142  *
3143  * This code is meant to abstract object synchronization with the GPU.
3144  * Calling with NULL implies synchronizing the object with the CPU
3145  * rather than a particular GPU ring. Conceptually we serialise writes
3146  * between engines inside the GPU. We only allow one engine to write
3147  * into a buffer at any time, but multiple readers. To ensure each has
3148  * a coherent view of memory, we must:
3149  *
3150  * - If there is an outstanding write request to the object, the new
3151  *   request must wait for it to complete (either CPU or in hw, requests
3152  *   on the same ring will be naturally ordered).
3153  *
3154  * - If we are a write request (pending_write_domain is set), the new
3155  *   request must wait for outstanding read requests to complete.
3156  *
3157  * For CPU synchronisation (NULL to) no request is required. For syncing with
3158  * rings to_req must be non-NULL. However, a request does not have to be
3159  * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
3160  * request will be allocated automatically and returned through *to_req. Note
3161  * that it is not guaranteed that commands will be emitted (because the system
3162  * might already be idle). Hence there is no need to create a request that
3163  * might never have any work submitted. Note further that if a request is
3164  * returned in *to_req, it is the responsibility of the caller to submit
3165  * that request (after potentially adding more work to it).
3166  *
3167  * Returns 0 if successful, else propagates up the lower layer error.
3168  */
3169 int
3170 i915_gem_object_sync(struct drm_i915_gem_object *obj,
3171                      struct intel_engine_cs *to,
3172                      struct drm_i915_gem_request **to_req)
3173 {
3174         const bool readonly = obj->base.pending_write_domain == 0;
3175         struct drm_i915_gem_request *req[I915_NUM_RINGS];
3176         int ret, i, n;
3177
3178         if (!obj->active)
3179                 return 0;
3180
3181         if (to == NULL)
3182                 return i915_gem_object_wait_rendering(obj, readonly);
3183
3184         n = 0;
3185         if (readonly) {
3186                 if (obj->last_write_req)
3187                         req[n++] = obj->last_write_req;
3188         } else {
3189                 for (i = 0; i < I915_NUM_RINGS; i++)
3190                         if (obj->last_read_req[i])
3191                                 req[n++] = obj->last_read_req[i];
3192         }
3193         for (i = 0; i < n; i++) {
3194                 ret = __i915_gem_object_sync(obj, to, req[i], to_req);
3195                 if (ret)
3196                         return ret;
3197         }
3198
3199         return 0;
3200 }
3201
3202 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3203 {
3204         u32 old_write_domain, old_read_domains;
3205
3206         /* Force a pagefault for domain tracking on next user access */
3207         i915_gem_release_mmap(obj);
3208
3209         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3210                 return;
3211
3212         /* Wait for any direct GTT access to complete */
3213         mb();
3214
3215         old_read_domains = obj->base.read_domains;
3216         old_write_domain = obj->base.write_domain;
3217
3218         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
3219         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
3220
3221         trace_i915_gem_object_change_domain(obj,
3222                                             old_read_domains,
3223                                             old_write_domain);
3224 }
3225
3226 int i915_vma_unbind(struct i915_vma *vma)
3227 {
3228         struct drm_i915_gem_object *obj = vma->obj;
3229         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3230         int ret;
3231
3232         if (list_empty(&vma->vma_link))
3233                 return 0;
3234
3235         if (!drm_mm_node_allocated(&vma->node)) {
3236                 i915_gem_vma_destroy(vma);
3237                 return 0;
3238         }
3239
3240         if (vma->pin_count)
3241                 return -EBUSY;
3242
3243         BUG_ON(obj->pages == NULL);
3244
3245         ret = i915_gem_object_wait_rendering(obj, false);
3246         if (ret)
3247                 return ret;
3248         /* Continue on if we fail due to EIO, the GPU is hung so we
3249          * should be safe and we need to cleanup or else we might
3250          * cause memory corruption through use-after-free.
3251          */
3252
3253         if (i915_is_ggtt(vma->vm) &&
3254             vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3255                 i915_gem_object_finish_gtt(obj);
3256
3257                 /* release the fence reg _after_ flushing */
3258                 ret = i915_gem_object_put_fence(obj);
3259                 if (ret)
3260                         return ret;
3261         }
3262
3263         trace_i915_vma_unbind(vma);
3264
3265         vma->vm->unbind_vma(vma);
3266         vma->bound = 0;
3267
3268         list_del_init(&vma->mm_list);
3269         if (i915_is_ggtt(vma->vm)) {
3270                 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3271                         obj->map_and_fenceable = false;
3272                 } else if (vma->ggtt_view.pages) {
3273                         sg_free_table(vma->ggtt_view.pages);
3274                         kfree(vma->ggtt_view.pages);
3275                         vma->ggtt_view.pages = NULL;
3276                 }
3277         }
3278
3279         drm_mm_remove_node(&vma->node);
3280         i915_gem_vma_destroy(vma);
3281
3282         /* Since the unbound list is global, only move to that list if
3283          * no more VMAs exist. */
3284         if (list_empty(&obj->vma_list)) {
3285                 i915_gem_gtt_finish_object(obj);
3286                 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3287         }
3288
3289         /* And finally now the object is completely decoupled from this vma,
3290          * we can drop its hold on the backing storage and allow it to be
3291          * reaped by the shrinker.
3292          */
3293         i915_gem_object_unpin_pages(obj);
3294
3295         return 0;
3296 }
3297
3298 int i915_gpu_idle(struct drm_device *dev)
3299 {
3300         struct drm_i915_private *dev_priv = dev->dev_private;
3301         struct intel_engine_cs *ring;
3302         int ret, i;
3303
3304         /* Flush everything onto the inactive list. */
3305         for_each_ring(ring, dev_priv, i) {
3306                 if (!i915.enable_execlists) {
3307                         struct drm_i915_gem_request *req;
3308
3309                         ret = i915_gem_request_alloc(ring, ring->default_context, &req);
3310                         if (ret)
3311                                 return ret;
3312
3313                         ret = i915_switch_context(req);
3314                         if (ret) {
3315                                 i915_gem_request_cancel(req);
3316                                 return ret;
3317                         }
3318
3319                         i915_add_request_no_flush(req);
3320                 }
3321
3322                 ret = intel_ring_idle(ring);
3323                 if (ret)
3324                         return ret;
3325         }
3326
3327         WARN_ON(i915_verify_lists(dev));
3328         return 0;
3329 }
3330
3331 static void i965_write_fence_reg(struct drm_device *dev, int reg,
3332                                  struct drm_i915_gem_object *obj)
3333 {
3334         struct drm_i915_private *dev_priv = dev->dev_private;
3335         int fence_reg;
3336         int fence_pitch_shift;
3337
3338         if (INTEL_INFO(dev)->gen >= 6) {
3339                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
3340                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
3341         } else {
3342                 fence_reg = FENCE_REG_965_0;
3343                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
3344         }
3345
3346         fence_reg += reg * 8;
3347
3348         /* To w/a incoherency with non-atomic 64-bit register updates,
3349          * we split the 64-bit update into two 32-bit writes. In order
3350          * for a partial fence not to be evaluated between writes, we
3351          * precede the update with write to turn off the fence register,
3352          * and only enable the fence as the last step.
3353          *
3354          * For extra levels of paranoia, we make sure each step lands
3355          * before applying the next step.
3356          */
3357         I915_WRITE(fence_reg, 0);
3358         POSTING_READ(fence_reg);
3359
3360         if (obj) {
3361                 u32 size = i915_gem_obj_ggtt_size(obj);
3362                 uint64_t val;
3363
3364                 /* Adjust fence size to match tiled area */
3365                 if (obj->tiling_mode != I915_TILING_NONE) {
3366                         uint32_t row_size = obj->stride *
3367                                 (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
3368                         size = (size / row_size) * row_size;
3369                 }
3370
3371                 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
3372                                  0xfffff000) << 32;
3373                 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
3374                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
3375                 if (obj->tiling_mode == I915_TILING_Y)
3376                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
3377                 val |= I965_FENCE_REG_VALID;
3378
3379                 I915_WRITE(fence_reg + 4, val >> 32);
3380                 POSTING_READ(fence_reg + 4);
3381
3382                 I915_WRITE(fence_reg + 0, val);
3383                 POSTING_READ(fence_reg);
3384         } else {
3385                 I915_WRITE(fence_reg + 4, 0);
3386                 POSTING_READ(fence_reg + 4);
3387         }
3388 }
3389
3390 static void i915_write_fence_reg(struct drm_device *dev, int reg,
3391                                  struct drm_i915_gem_object *obj)
3392 {
3393         struct drm_i915_private *dev_priv = dev->dev_private;
3394         u32 val;
3395
3396         if (obj) {
3397                 u32 size = i915_gem_obj_ggtt_size(obj);
3398                 int pitch_val;
3399                 int tile_width;
3400
3401                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
3402                      (size & -size) != size ||
3403                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3404                      "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
3405                      i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
3406
3407                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
3408                         tile_width = 128;
3409                 else
3410                         tile_width = 512;
3411
3412                 /* Note: pitch better be a power of two tile widths */
3413                 pitch_val = obj->stride / tile_width;
3414                 pitch_val = ffs(pitch_val) - 1;
3415
3416                 val = i915_gem_obj_ggtt_offset(obj);
3417                 if (obj->tiling_mode == I915_TILING_Y)
3418                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3419                 val |= I915_FENCE_SIZE_BITS(size);
3420                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3421                 val |= I830_FENCE_REG_VALID;
3422         } else
3423                 val = 0;
3424
3425         if (reg < 8)
3426                 reg = FENCE_REG_830_0 + reg * 4;
3427         else
3428                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
3429
3430         I915_WRITE(reg, val);
3431         POSTING_READ(reg);
3432 }
3433
3434 static void i830_write_fence_reg(struct drm_device *dev, int reg,
3435                                 struct drm_i915_gem_object *obj)
3436 {
3437         struct drm_i915_private *dev_priv = dev->dev_private;
3438         uint32_t val;
3439
3440         if (obj) {
3441                 u32 size = i915_gem_obj_ggtt_size(obj);
3442                 uint32_t pitch_val;
3443
3444                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3445                      (size & -size) != size ||
3446                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3447                      "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
3448                      i915_gem_obj_ggtt_offset(obj), size);
3449
3450                 pitch_val = obj->stride / 128;
3451                 pitch_val = ffs(pitch_val) - 1;
3452
3453                 val = i915_gem_obj_ggtt_offset(obj);
3454                 if (obj->tiling_mode == I915_TILING_Y)
3455                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3456                 val |= I830_FENCE_SIZE_BITS(size);
3457                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3458                 val |= I830_FENCE_REG_VALID;
3459         } else
3460                 val = 0;
3461
3462         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
3463         POSTING_READ(FENCE_REG_830_0 + reg * 4);
3464 }
3465
3466 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
3467 {
3468         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
3469 }
3470
3471 static void i915_gem_write_fence(struct drm_device *dev, int reg,
3472                                  struct drm_i915_gem_object *obj)
3473 {
3474         struct drm_i915_private *dev_priv = dev->dev_private;
3475
3476         /* Ensure that all CPU reads are completed before installing a fence
3477          * and all writes before removing the fence.
3478          */
3479         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
3480                 mb();
3481
3482         WARN(obj && (!obj->stride || !obj->tiling_mode),
3483              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
3484              obj->stride, obj->tiling_mode);
3485
3486         if (IS_GEN2(dev))
3487                 i830_write_fence_reg(dev, reg, obj);
3488         else if (IS_GEN3(dev))
3489                 i915_write_fence_reg(dev, reg, obj);
3490         else if (INTEL_INFO(dev)->gen >= 4)
3491                 i965_write_fence_reg(dev, reg, obj);
3492
3493         /* And similarly be paranoid that no direct access to this region
3494          * is reordered to before the fence is installed.
3495          */
3496         if (i915_gem_object_needs_mb(obj))
3497                 mb();
3498 }
3499
3500 static inline int fence_number(struct drm_i915_private *dev_priv,
3501                                struct drm_i915_fence_reg *fence)
3502 {
3503         return fence - dev_priv->fence_regs;
3504 }
3505
3506 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3507                                          struct drm_i915_fence_reg *fence,
3508                                          bool enable)
3509 {
3510         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3511         int reg = fence_number(dev_priv, fence);
3512
3513         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3514
3515         if (enable) {
3516                 obj->fence_reg = reg;
3517                 fence->obj = obj;
3518                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3519         } else {
3520                 obj->fence_reg = I915_FENCE_REG_NONE;
3521                 fence->obj = NULL;
3522                 list_del_init(&fence->lru_list);
3523         }
3524         obj->fence_dirty = false;
3525 }
3526
3527 static int
3528 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3529 {
3530         if (obj->last_fenced_req) {
3531                 int ret = i915_wait_request(obj->last_fenced_req);
3532                 if (ret)
3533                         return ret;
3534
3535                 i915_gem_request_assign(&obj->last_fenced_req, NULL);
3536         }
3537
3538         return 0;
3539 }
3540
3541 int
3542 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3543 {
3544         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3545         struct drm_i915_fence_reg *fence;
3546         int ret;
3547
3548         ret = i915_gem_object_wait_fence(obj);
3549         if (ret)
3550                 return ret;
3551
3552         if (obj->fence_reg == I915_FENCE_REG_NONE)
3553                 return 0;
3554
3555         fence = &dev_priv->fence_regs[obj->fence_reg];
3556
3557         if (WARN_ON(fence->pin_count))
3558                 return -EBUSY;
3559
3560         i915_gem_object_fence_lost(obj);
3561         i915_gem_object_update_fence(obj, fence, false);
3562
3563         return 0;
3564 }
3565
3566 static struct drm_i915_fence_reg *
3567 i915_find_fence_reg(struct drm_device *dev)
3568 {
3569         struct drm_i915_private *dev_priv = dev->dev_private;
3570         struct drm_i915_fence_reg *reg, *avail;
3571         int i;
3572
3573         /* First try to find a free reg */
3574         avail = NULL;
3575         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3576                 reg = &dev_priv->fence_regs[i];
3577                 if (!reg->obj)
3578                         return reg;
3579
3580                 if (!reg->pin_count)
3581                         avail = reg;
3582         }
3583
3584         if (avail == NULL)
3585                 goto deadlock;
3586
3587         /* None available, try to steal one or wait for a user to finish */
3588         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3589                 if (reg->pin_count)
3590                         continue;
3591
3592                 return reg;
3593         }
3594
3595 deadlock:
3596         /* Wait for completion of pending flips which consume fences */
3597         if (intel_has_pending_fb_unpin(dev))
3598                 return ERR_PTR(-EAGAIN);
3599
3600         return ERR_PTR(-EDEADLK);
3601 }
3602
3603 /**
3604  * i915_gem_object_get_fence - set up fencing for an object
3605  * @obj: object to map through a fence reg
3606  *
3607  * When mapping objects through the GTT, userspace wants to be able to write
3608  * to them without having to worry about swizzling if the object is tiled.
3609  * This function walks the fence regs looking for a free one for @obj,
3610  * stealing one if it can't find any.
3611  *
3612  * It then sets up the reg based on the object's properties: address, pitch
3613  * and tiling format.
3614  *
3615  * For an untiled surface, this removes any existing fence.
3616  */
3617 int
3618 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3619 {
3620         struct drm_device *dev = obj->base.dev;
3621         struct drm_i915_private *dev_priv = dev->dev_private;
3622         bool enable = obj->tiling_mode != I915_TILING_NONE;
3623         struct drm_i915_fence_reg *reg;
3624         int ret;
3625
3626         /* Have we updated the tiling parameters upon the object and so
3627          * will need to serialise the write to the associated fence register?
3628          */
3629         if (obj->fence_dirty) {
3630                 ret = i915_gem_object_wait_fence(obj);
3631                 if (ret)
3632                         return ret;
3633         }
3634
3635         /* Just update our place in the LRU if our fence is getting reused. */
3636         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3637                 reg = &dev_priv->fence_regs[obj->fence_reg];
3638                 if (!obj->fence_dirty) {
3639                         list_move_tail(&reg->lru_list,
3640                                        &dev_priv->mm.fence_list);
3641                         return 0;
3642                 }
3643         } else if (enable) {
3644                 if (WARN_ON(!obj->map_and_fenceable))
3645                         return -EINVAL;
3646
3647                 reg = i915_find_fence_reg(dev);
3648                 if (IS_ERR(reg))
3649                         return PTR_ERR(reg);
3650
3651                 if (reg->obj) {
3652                         struct drm_i915_gem_object *old = reg->obj;
3653
3654                         ret = i915_gem_object_wait_fence(old);
3655                         if (ret)
3656                                 return ret;
3657
3658                         i915_gem_object_fence_lost(old);
3659                 }
3660         } else
3661                 return 0;
3662
3663         i915_gem_object_update_fence(obj, reg, enable);
3664
3665         return 0;
3666 }
3667
3668 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3669                                      unsigned long cache_level)
3670 {
3671         struct drm_mm_node *gtt_space = &vma->node;
3672         struct drm_mm_node *other;
3673
3674         /*
3675          * On some machines we have to be careful when putting differing types
3676          * of snoopable memory together to avoid the prefetcher crossing memory
3677          * domains and dying. During vm initialisation, we decide whether or not
3678          * these constraints apply and set the drm_mm.color_adjust
3679          * appropriately.
3680          */
3681         if (vma->vm->mm.color_adjust == NULL)
3682                 return true;
3683
3684         if (!drm_mm_node_allocated(gtt_space))
3685                 return true;
3686
3687         if (list_empty(&gtt_space->node_list))
3688                 return true;
3689
3690         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3691         if (other->allocated && !other->hole_follows && other->color != cache_level)
3692                 return false;
3693
3694         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3695         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3696                 return false;
3697
3698         return true;
3699 }
3700
3701 /**
3702  * Finds free space in the GTT aperture and binds the object or a view of it
3703  * there.
3704  */
3705 static struct i915_vma *
3706 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3707                            struct i915_address_space *vm,
3708                            const struct i915_ggtt_view *ggtt_view,
3709                            unsigned alignment,
3710                            uint64_t flags)
3711 {
3712         struct drm_device *dev = obj->base.dev;
3713         struct drm_i915_private *dev_priv = dev->dev_private;
3714         u32 size, fence_size, fence_alignment, unfenced_alignment;
3715         unsigned long start =
3716                 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3717         unsigned long end =
3718                 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3719         struct i915_vma *vma;
3720         int ret;
3721
3722         if (i915_is_ggtt(vm)) {
3723                 u32 view_size;
3724
3725                 if (WARN_ON(!ggtt_view))
3726                         return ERR_PTR(-EINVAL);
3727
3728                 view_size = i915_ggtt_view_size(obj, ggtt_view);
3729
3730                 fence_size = i915_gem_get_gtt_size(dev,
3731                                                    view_size,
3732                                                    obj->tiling_mode);
3733                 fence_alignment = i915_gem_get_gtt_alignment(dev,
3734                                                              view_size,
3735                                                              obj->tiling_mode,
3736                                                              true);
3737                 unfenced_alignment = i915_gem_get_gtt_alignment(dev,
3738                                                                 view_size,
3739                                                                 obj->tiling_mode,
3740                                                                 false);
3741                 size = flags & PIN_MAPPABLE ? fence_size : view_size;
3742         } else {
3743                 fence_size = i915_gem_get_gtt_size(dev,
3744                                                    obj->base.size,
3745                                                    obj->tiling_mode);
3746                 fence_alignment = i915_gem_get_gtt_alignment(dev,
3747                                                              obj->base.size,
3748                                                              obj->tiling_mode,
3749                                                              true);
3750                 unfenced_alignment =
3751                         i915_gem_get_gtt_alignment(dev,
3752                                                    obj->base.size,
3753                                                    obj->tiling_mode,
3754                                                    false);
3755                 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3756         }
3757
3758         if (alignment == 0)
3759                 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3760                                                 unfenced_alignment;
3761         if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3762                 DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
3763                           ggtt_view ? ggtt_view->type : 0,
3764                           alignment);
3765                 return ERR_PTR(-EINVAL);
3766         }
3767
3768         /* If binding the object/GGTT view requires more space than the entire
3769          * aperture has, reject it early before evicting everything in a vain
3770          * attempt to find space.
3771          */
3772         if (size > end) {
3773                 DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
3774                           ggtt_view ? ggtt_view->type : 0,
3775                           size,
3776                           flags & PIN_MAPPABLE ? "mappable" : "total",
3777                           end);
3778                 return ERR_PTR(-E2BIG);
3779         }
3780
3781         ret = i915_gem_object_get_pages(obj);
3782         if (ret)
3783                 return ERR_PTR(ret);
3784
3785         i915_gem_object_pin_pages(obj);
3786
3787         vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
3788                           i915_gem_obj_lookup_or_create_vma(obj, vm);
3789
3790         if (IS_ERR(vma))
3791                 goto err_unpin;
3792
3793 search_free:
3794         ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3795                                                   size, alignment,
3796                                                   obj->cache_level,
3797                                                   start, end,
3798                                                   DRM_MM_SEARCH_DEFAULT,
3799                                                   DRM_MM_CREATE_DEFAULT);
3800         if (ret) {
3801                 ret = i915_gem_evict_something(dev, vm, size, alignment,
3802                                                obj->cache_level,
3803                                                start, end,
3804                                                flags);
3805                 if (ret == 0)
3806                         goto search_free;
3807
3808                 goto err_free_vma;
3809         }
3810         if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3811                 ret = -EINVAL;
3812                 goto err_remove_node;
3813         }
3814
3815         ret = i915_gem_gtt_prepare_object(obj);
3816         if (ret)
3817                 goto err_remove_node;
3818
3819         trace_i915_vma_bind(vma, flags);
3820         ret = i915_vma_bind(vma, obj->cache_level, flags);
3821         if (ret)
3822                 goto err_finish_gtt;
3823
3824         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3825         list_add_tail(&vma->mm_list, &vm->inactive_list);
3826
3827         return vma;
3828
3829 err_finish_gtt:
3830         i915_gem_gtt_finish_object(obj);
3831 err_remove_node:
3832         drm_mm_remove_node(&vma->node);
3833 err_free_vma:
3834         i915_gem_vma_destroy(vma);
3835         vma = ERR_PTR(ret);
3836 err_unpin:
3837         i915_gem_object_unpin_pages(obj);
3838         return vma;
3839 }
3840
3841 bool
3842 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3843                         bool force)
3844 {
3845         /* If we don't have a page list set up, then we're not pinned
3846          * to GPU, and we can ignore the cache flush because it'll happen
3847          * again at bind time.
3848          */
3849         if (obj->pages == NULL)
3850                 return false;
3851
3852         /*
3853          * Stolen memory is always coherent with the GPU as it is explicitly
3854          * marked as wc by the system, or the system is cache-coherent.
3855          */
3856         if (obj->stolen || obj->phys_handle)
3857                 return false;
3858
3859         /* If the GPU is snooping the contents of the CPU cache,
3860          * we do not need to manually clear the CPU cache lines.  However,
3861          * the caches are only snooped when the render cache is
3862          * flushed/invalidated.  As we always have to emit invalidations
3863          * and flushes when moving into and out of the RENDER domain, correct
3864          * snooping behaviour occurs naturally as the result of our domain
3865          * tracking.
3866          */
3867         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3868                 obj->cache_dirty = true;
3869                 return false;
3870         }
3871
3872         trace_i915_gem_object_clflush(obj);
3873         drm_clflush_sg(obj->pages);
3874         obj->cache_dirty = false;
3875
3876         return true;
3877 }
3878
3879 /** Flushes the GTT write domain for the object if it's dirty. */
3880 static void
3881 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3882 {
3883         uint32_t old_write_domain;
3884
3885         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3886                 return;
3887
3888         /* No actual flushing is required for the GTT write domain.  Writes
3889          * to it immediately go to main memory as far as we know, so there's
3890          * no chipset flush.  It also doesn't land in render cache.
3891          *
3892          * However, we do have to enforce the order so that all writes through
3893          * the GTT land before any writes to the device, such as updates to
3894          * the GATT itself.
3895          */
3896         wmb();
3897
3898         old_write_domain = obj->base.write_domain;
3899         obj->base.write_domain = 0;
3900
3901         intel_fb_obj_flush(obj, false);
3902
3903         trace_i915_gem_object_change_domain(obj,
3904                                             obj->base.read_domains,
3905                                             old_write_domain);
3906 }
3907
3908 /** Flushes the CPU write domain for the object if it's dirty. */
3909 static void
3910 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3911 {
3912         uint32_t old_write_domain;
3913
3914         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3915                 return;
3916
3917         if (i915_gem_clflush_object(obj, obj->pin_display))
3918                 i915_gem_chipset_flush(obj->base.dev);
3919
3920         old_write_domain = obj->base.write_domain;
3921         obj->base.write_domain = 0;
3922
3923         intel_fb_obj_flush(obj, false);
3924
3925         trace_i915_gem_object_change_domain(obj,
3926                                             obj->base.read_domains,
3927                                             old_write_domain);
3928 }
3929
3930 /**
3931  * Moves a single object to the GTT read, and possibly write domain.
3932  *
3933  * This function returns when the move is complete, including waiting on
3934  * flushes to occur.
3935  */
3936 int
3937 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3938 {
3939         uint32_t old_write_domain, old_read_domains;
3940         struct i915_vma *vma;
3941         int ret;
3942
3943         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3944                 return 0;
3945
3946         ret = i915_gem_object_wait_rendering(obj, !write);
3947         if (ret)
3948                 return ret;
3949
3950         /* Flush and acquire obj->pages so that we are coherent through
3951          * direct access in memory with previous cached writes through
3952          * shmemfs and that our cache domain tracking remains valid.
3953          * For example, if the obj->filp was moved to swap without us
3954          * being notified and releasing the pages, we would mistakenly
3955          * continue to assume that the obj remained out of the CPU cached
3956          * domain.
3957          */
3958         ret = i915_gem_object_get_pages(obj);
3959         if (ret)
3960                 return ret;
3961
3962         i915_gem_object_flush_cpu_write_domain(obj);
3963
3964         /* Serialise direct access to this object with the barriers for
3965          * coherent writes from the GPU, by effectively invalidating the
3966          * GTT domain upon first access.
3967          */
3968         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3969                 mb();
3970
3971         old_write_domain = obj->base.write_domain;
3972         old_read_domains = obj->base.read_domains;
3973
3974         /* It should now be out of any other write domains, and we can update
3975          * the domain values for our changes.
3976          */
3977         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3978         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3979         if (write) {
3980                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3981                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3982                 obj->dirty = 1;
3983         }
3984
3985         if (write)
3986                 intel_fb_obj_invalidate(obj, ORIGIN_GTT);
3987
3988         trace_i915_gem_object_change_domain(obj,
3989                                             old_read_domains,
3990                                             old_write_domain);
3991
3992         /* And bump the LRU for this access */
3993         vma = i915_gem_obj_to_ggtt(obj);
3994         if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
3995                 list_move_tail(&vma->mm_list,
3996                                &to_i915(obj->base.dev)->gtt.base.inactive_list);
3997
3998         return 0;
3999 }
4000
4001 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
4002                                     enum i915_cache_level cache_level)
4003 {
4004         struct drm_device *dev = obj->base.dev;
4005         struct i915_vma *vma, *next;
4006         int ret;
4007
4008         if (obj->cache_level == cache_level)
4009                 return 0;
4010
4011         if (i915_gem_obj_is_pinned(obj)) {
4012                 DRM_DEBUG("can not change the cache level of pinned objects\n");
4013                 return -EBUSY;
4014         }
4015
4016         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4017                 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
4018                         ret = i915_vma_unbind(vma);
4019                         if (ret)
4020                                 return ret;
4021                 }
4022         }
4023
4024         if (i915_gem_obj_bound_any(obj)) {
4025                 ret = i915_gem_object_wait_rendering(obj, false);
4026                 if (ret)
4027                         return ret;
4028
4029                 i915_gem_object_finish_gtt(obj);
4030
4031                 /* Before SandyBridge, you could not use tiling or fence
4032                  * registers with snooped memory, so relinquish any fences
4033                  * currently pointing to our region in the aperture.
4034                  */
4035                 if (INTEL_INFO(dev)->gen < 6) {
4036                         ret = i915_gem_object_put_fence(obj);
4037                         if (ret)
4038                                 return ret;
4039                 }
4040
4041                 list_for_each_entry(vma, &obj->vma_list, vma_link)
4042                         if (drm_mm_node_allocated(&vma->node)) {
4043                                 ret = i915_vma_bind(vma, cache_level,
4044                                                     PIN_UPDATE);
4045                                 if (ret)
4046                                         return ret;
4047                         }
4048         }
4049
4050         list_for_each_entry(vma, &obj->vma_list, vma_link)
4051                 vma->node.color = cache_level;
4052         obj->cache_level = cache_level;
4053
4054         if (obj->cache_dirty &&
4055             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
4056             cpu_write_needs_clflush(obj)) {
4057                 if (i915_gem_clflush_object(obj, true))
4058                         i915_gem_chipset_flush(obj->base.dev);
4059         }
4060
4061         return 0;
4062 }
4063
4064 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
4065                                struct drm_file *file)
4066 {
4067         struct drm_i915_gem_caching *args = data;
4068         struct drm_i915_gem_object *obj;
4069
4070         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4071         if (&obj->base == NULL)
4072                 return -ENOENT;
4073
4074         switch (obj->cache_level) {
4075         case I915_CACHE_LLC:
4076         case I915_CACHE_L3_LLC:
4077                 args->caching = I915_CACHING_CACHED;
4078                 break;
4079
4080         case I915_CACHE_WT:
4081                 args->caching = I915_CACHING_DISPLAY;
4082                 break;
4083
4084         default:
4085                 args->caching = I915_CACHING_NONE;
4086                 break;
4087         }
4088
4089         drm_gem_object_unreference_unlocked(&obj->base);
4090         return 0;
4091 }
4092
4093 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
4094                                struct drm_file *file)
4095 {
4096         struct drm_i915_gem_caching *args = data;
4097         struct drm_i915_gem_object *obj;
4098         enum i915_cache_level level;
4099         int ret;
4100
4101         switch (args->caching) {
4102         case I915_CACHING_NONE:
4103                 level = I915_CACHE_NONE;
4104                 break;
4105         case I915_CACHING_CACHED:
4106                 level = I915_CACHE_LLC;
4107                 break;
4108         case I915_CACHING_DISPLAY:
4109                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
4110                 break;
4111         default:
4112                 return -EINVAL;
4113         }
4114
4115         ret = i915_mutex_lock_interruptible(dev);
4116         if (ret)
4117                 return ret;
4118
4119         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4120         if (&obj->base == NULL) {
4121                 ret = -ENOENT;
4122                 goto unlock;
4123         }
4124
4125         ret = i915_gem_object_set_cache_level(obj, level);
4126
4127         drm_gem_object_unreference(&obj->base);
4128 unlock:
4129         mutex_unlock(&dev->struct_mutex);
4130         return ret;
4131 }
4132
4133 /*
4134  * Prepare buffer for display plane (scanout, cursors, etc).
4135  * Can be called from an uninterruptible phase (modesetting) and allows
4136  * any flushes to be pipelined (for pageflips).
4137  */
4138 int
4139 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
4140                                      u32 alignment,
4141                                      struct intel_engine_cs *pipelined,
4142                                      struct drm_i915_gem_request **pipelined_request,
4143                                      const struct i915_ggtt_view *view)
4144 {
4145         u32 old_read_domains, old_write_domain;
4146         int ret;
4147
4148         ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
4149         if (ret)
4150                 return ret;
4151
4152         /* Mark the pin_display early so that we account for the
4153          * display coherency whilst setting up the cache domains.
4154          */
4155         obj->pin_display++;
4156
4157         /* The display engine is not coherent with the LLC cache on gen6.  As
4158          * a result, we make sure that the pinning that is about to occur is
4159          * done with uncached PTEs. This is lowest common denominator for all
4160          * chipsets.
4161          *
4162          * However for gen6+, we could do better by using the GFDT bit instead
4163          * of uncaching, which would allow us to flush all the LLC-cached data
4164          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
4165          */
4166         ret = i915_gem_object_set_cache_level(obj,
4167                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
4168         if (ret)
4169                 goto err_unpin_display;
4170
4171         /* As the user may map the buffer once pinned in the display plane
4172          * (e.g. libkms for the bootup splash), we have to ensure that we
4173          * always use map_and_fenceable for all scanout buffers.
4174          */
4175         ret = i915_gem_object_ggtt_pin(obj, view, alignment,
4176                                        view->type == I915_GGTT_VIEW_NORMAL ?
4177                                        PIN_MAPPABLE : 0);
4178         if (ret)
4179                 goto err_unpin_display;
4180
4181         i915_gem_object_flush_cpu_write_domain(obj);
4182
4183         old_write_domain = obj->base.write_domain;
4184         old_read_domains = obj->base.read_domains;
4185
4186         /* It should now be out of any other write domains, and we can update
4187          * the domain values for our changes.
4188          */
4189         obj->base.write_domain = 0;
4190         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
4191
4192         trace_i915_gem_object_change_domain(obj,
4193                                             old_read_domains,
4194                                             old_write_domain);
4195
4196         return 0;
4197
4198 err_unpin_display:
4199         obj->pin_display--;
4200         return ret;
4201 }
4202
4203 void
4204 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
4205                                          const struct i915_ggtt_view *view)
4206 {
4207         if (WARN_ON(obj->pin_display == 0))
4208                 return;
4209
4210         i915_gem_object_ggtt_unpin_view(obj, view);
4211
4212         obj->pin_display--;
4213 }
4214
4215 /**
4216  * Moves a single object to the CPU read, and possibly write domain.
4217  *
4218  * This function returns when the move is complete, including waiting on
4219  * flushes to occur.
4220  */
4221 int
4222 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4223 {
4224         uint32_t old_write_domain, old_read_domains;
4225         int ret;
4226
4227         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
4228                 return 0;
4229
4230         ret = i915_gem_object_wait_rendering(obj, !write);
4231         if (ret)
4232                 return ret;
4233
4234         i915_gem_object_flush_gtt_write_domain(obj);
4235
4236         old_write_domain = obj->base.write_domain;
4237         old_read_domains = obj->base.read_domains;
4238
4239         /* Flush the CPU cache if it's still invalid. */
4240         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4241                 i915_gem_clflush_object(obj, false);
4242
4243                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
4244         }
4245
4246         /* It should now be out of any other write domains, and we can update
4247          * the domain values for our changes.
4248          */
4249         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
4250
4251         /* If we're writing through the CPU, then the GPU read domains will
4252          * need to be invalidated at next use.
4253          */
4254         if (write) {
4255                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4256                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4257         }
4258
4259         if (write)
4260                 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
4261
4262         trace_i915_gem_object_change_domain(obj,
4263                                             old_read_domains,
4264                                             old_write_domain);
4265
4266         return 0;
4267 }
4268
4269 /* Throttle our rendering by waiting until the ring has completed our requests
4270  * emitted over 20 msec ago.
4271  *
4272  * Note that if we were to use the current jiffies each time around the loop,
4273  * we wouldn't escape the function with any frames outstanding if the time to
4274  * render a frame was over 20ms.
4275  *
4276  * This should get us reasonable parallelism between CPU and GPU but also
4277  * relatively low latency when blocking on a particular request to finish.
4278  */
4279 static int
4280 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4281 {
4282         struct drm_i915_private *dev_priv = dev->dev_private;
4283         struct drm_i915_file_private *file_priv = file->driver_priv;
4284         unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4285         struct drm_i915_gem_request *request, *target = NULL;
4286         unsigned reset_counter;
4287         int ret;
4288
4289         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4290         if (ret)
4291                 return ret;
4292
4293         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
4294         if (ret)
4295                 return ret;
4296
4297         spin_lock(&file_priv->mm.lock);
4298         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4299                 if (time_after_eq(request->emitted_jiffies, recent_enough))
4300                         break;
4301
4302                 /*
4303                  * Note that the request might not have been submitted yet.
4304                  * In which case emitted_jiffies will be zero.
4305                  */
4306                 if (!request->emitted_jiffies)
4307                         continue;
4308
4309                 target = request;
4310         }
4311         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
4312         if (target)
4313                 i915_gem_request_reference(target);
4314         spin_unlock(&file_priv->mm.lock);
4315
4316         if (target == NULL)
4317                 return 0;
4318
4319         ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
4320         if (ret == 0)
4321                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4322
4323         i915_gem_request_unreference__unlocked(target);
4324
4325         return ret;
4326 }
4327
4328 static bool
4329 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4330 {
4331         struct drm_i915_gem_object *obj = vma->obj;
4332
4333         if (alignment &&
4334             vma->node.start & (alignment - 1))
4335                 return true;
4336
4337         if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4338                 return true;
4339
4340         if (flags & PIN_OFFSET_BIAS &&
4341             vma->node.start < (flags & PIN_OFFSET_MASK))
4342                 return true;
4343
4344         return false;
4345 }
4346
4347 static int
4348 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4349                        struct i915_address_space *vm,
4350                        const struct i915_ggtt_view *ggtt_view,
4351                        uint32_t alignment,
4352                        uint64_t flags)
4353 {
4354         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4355         struct i915_vma *vma;
4356         unsigned bound;
4357         int ret;
4358
4359         if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4360                 return -ENODEV;
4361
4362         if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4363                 return -EINVAL;
4364
4365         if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4366                 return -EINVAL;
4367
4368         if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
4369                 return -EINVAL;
4370
4371         vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
4372                           i915_gem_obj_to_vma(obj, vm);
4373
4374         if (IS_ERR(vma))
4375                 return PTR_ERR(vma);
4376
4377         if (vma) {
4378                 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4379                         return -EBUSY;
4380
4381                 if (i915_vma_misplaced(vma, alignment, flags)) {
4382                         unsigned long offset;
4383                         offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
4384                                              i915_gem_obj_offset(obj, vm);
4385                         WARN(vma->pin_count,
4386                              "bo is already pinned in %s with incorrect alignment:"
4387                              " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
4388                              " obj->map_and_fenceable=%d\n",
4389                              ggtt_view ? "ggtt" : "ppgtt",
4390                              offset,
4391                              alignment,
4392                              !!(flags & PIN_MAPPABLE),
4393                              obj->map_and_fenceable);
4394                         ret = i915_vma_unbind(vma);
4395                         if (ret)
4396                                 return ret;
4397
4398                         vma = NULL;
4399                 }
4400         }
4401
4402         bound = vma ? vma->bound : 0;
4403         if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4404                 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
4405                                                  flags);
4406                 if (IS_ERR(vma))
4407                         return PTR_ERR(vma);
4408         } else {
4409                 ret = i915_vma_bind(vma, obj->cache_level, flags);
4410                 if (ret)
4411                         return ret;
4412         }
4413
4414         if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
4415             (bound ^ vma->bound) & GLOBAL_BIND) {
4416                 bool mappable, fenceable;
4417                 u32 fence_size, fence_alignment;
4418
4419                 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4420                                                    obj->base.size,
4421                                                    obj->tiling_mode);
4422                 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4423                                                              obj->base.size,
4424                                                              obj->tiling_mode,
4425                                                              true);
4426
4427                 fenceable = (vma->node.size == fence_size &&
4428                              (vma->node.start & (fence_alignment - 1)) == 0);
4429
4430                 mappable = (vma->node.start + fence_size <=
4431                             dev_priv->gtt.mappable_end);
4432
4433                 obj->map_and_fenceable = mappable && fenceable;
4434
4435                 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4436         }
4437
4438         vma->pin_count++;
4439         return 0;
4440 }
4441
4442 int
4443 i915_gem_object_pin(struct drm_i915_gem_object *obj,
4444                     struct i915_address_space *vm,
4445                     uint32_t alignment,
4446                     uint64_t flags)
4447 {
4448         return i915_gem_object_do_pin(obj, vm,
4449                                       i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
4450                                       alignment, flags);
4451 }
4452
4453 int
4454 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4455                          const struct i915_ggtt_view *view,
4456                          uint32_t alignment,
4457                          uint64_t flags)
4458 {
4459         if (WARN_ONCE(!view, "no view specified"))
4460                 return -EINVAL;
4461
4462         return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
4463                                       alignment, flags | PIN_GLOBAL);
4464 }
4465
4466 void
4467 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
4468                                 const struct i915_ggtt_view *view)
4469 {
4470         struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
4471
4472         BUG_ON(!vma);
4473         WARN_ON(vma->pin_count == 0);
4474         WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
4475
4476         --vma->pin_count;
4477 }
4478
4479 bool
4480 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
4481 {
4482         if (obj->fence_reg != I915_FENCE_REG_NONE) {
4483                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4484                 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
4485
4486                 WARN_ON(!ggtt_vma ||
4487                         dev_priv->fence_regs[obj->fence_reg].pin_count >
4488                         ggtt_vma->pin_count);
4489                 dev_priv->fence_regs[obj->fence_reg].pin_count++;
4490                 return true;
4491         } else
4492                 return false;
4493 }
4494
4495 void
4496 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
4497 {
4498         if (obj->fence_reg != I915_FENCE_REG_NONE) {
4499                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4500                 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
4501                 dev_priv->fence_regs[obj->fence_reg].pin_count--;
4502         }
4503 }
4504
4505 int
4506 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4507                     struct drm_file *file)
4508 {
4509         struct drm_i915_gem_busy *args = data;
4510         struct drm_i915_gem_object *obj;
4511         int ret;
4512
4513         ret = i915_mutex_lock_interruptible(dev);
4514         if (ret)
4515                 return ret;
4516
4517         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4518         if (&obj->base == NULL) {
4519                 ret = -ENOENT;
4520                 goto unlock;
4521         }
4522
4523         /* Count all active objects as busy, even if they are currently not used
4524          * by the gpu. Users of this interface expect objects to eventually
4525          * become non-busy without any further actions, therefore emit any
4526          * necessary flushes here.
4527          */
4528         ret = i915_gem_object_flush_active(obj);
4529         if (ret)
4530                 goto unref;
4531
4532         BUILD_BUG_ON(I915_NUM_RINGS > 16);
4533         args->busy = obj->active << 16;
4534         if (obj->last_write_req)
4535                 args->busy |= obj->last_write_req->ring->id;
4536
4537 unref:
4538         drm_gem_object_unreference(&obj->base);
4539 unlock:
4540         mutex_unlock(&dev->struct_mutex);
4541         return ret;
4542 }
4543
4544 int
4545 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4546                         struct drm_file *file_priv)
4547 {
4548         return i915_gem_ring_throttle(dev, file_priv);
4549 }
4550
4551 int
4552 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4553                        struct drm_file *file_priv)
4554 {
4555         struct drm_i915_private *dev_priv = dev->dev_private;
4556         struct drm_i915_gem_madvise *args = data;
4557         struct drm_i915_gem_object *obj;
4558         int ret;
4559
4560         switch (args->madv) {
4561         case I915_MADV_DONTNEED:
4562         case I915_MADV_WILLNEED:
4563             break;
4564         default:
4565             return -EINVAL;
4566         }
4567
4568         ret = i915_mutex_lock_interruptible(dev);
4569         if (ret)
4570                 return ret;
4571
4572         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4573         if (&obj->base == NULL) {
4574                 ret = -ENOENT;
4575                 goto unlock;
4576         }
4577
4578         if (i915_gem_obj_is_pinned(obj)) {
4579                 ret = -EINVAL;
4580                 goto out;
4581         }
4582
4583         if (obj->pages &&
4584             obj->tiling_mode != I915_TILING_NONE &&
4585             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4586                 if (obj->madv == I915_MADV_WILLNEED)
4587                         i915_gem_object_unpin_pages(obj);
4588                 if (args->madv == I915_MADV_WILLNEED)
4589                         i915_gem_object_pin_pages(obj);
4590         }
4591
4592         if (obj->madv != __I915_MADV_PURGED)
4593                 obj->madv = args->madv;
4594
4595         /* if the object is no longer attached, discard its backing storage */
4596         if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4597                 i915_gem_object_truncate(obj);
4598
4599         args->retained = obj->madv != __I915_MADV_PURGED;
4600
4601 out:
4602         drm_gem_object_unreference(&obj->base);
4603 unlock:
4604         mutex_unlock(&dev->struct_mutex);
4605         return ret;
4606 }
4607
4608 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4609                           const struct drm_i915_gem_object_ops *ops)
4610 {
4611         int i;
4612
4613         INIT_LIST_HEAD(&obj->global_list);
4614         for (i = 0; i < I915_NUM_RINGS; i++)
4615                 INIT_LIST_HEAD(&obj->ring_list[i]);
4616         INIT_LIST_HEAD(&obj->obj_exec_link);
4617         INIT_LIST_HEAD(&obj->vma_list);
4618         INIT_LIST_HEAD(&obj->batch_pool_link);
4619
4620         obj->ops = ops;
4621
4622         obj->fence_reg = I915_FENCE_REG_NONE;
4623         obj->madv = I915_MADV_WILLNEED;
4624
4625         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4626 }
4627
4628 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4629         .get_pages = i915_gem_object_get_pages_gtt,
4630         .put_pages = i915_gem_object_put_pages_gtt,
4631 };
4632
4633 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4634                                                   size_t size)
4635 {
4636         struct drm_i915_gem_object *obj;
4637         struct address_space *mapping;
4638         gfp_t mask;
4639
4640         obj = i915_gem_object_alloc(dev);
4641         if (obj == NULL)
4642                 return NULL;
4643
4644         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4645                 i915_gem_object_free(obj);
4646                 return NULL;
4647         }
4648
4649         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4650         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4651                 /* 965gm cannot relocate objects above 4GiB. */
4652                 mask &= ~__GFP_HIGHMEM;
4653                 mask |= __GFP_DMA32;
4654         }
4655
4656         mapping = file_inode(obj->base.filp)->i_mapping;
4657         mapping_set_gfp_mask(mapping, mask);
4658
4659         i915_gem_object_init(obj, &i915_gem_object_ops);
4660
4661         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4662         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4663
4664         if (HAS_LLC(dev)) {
4665                 /* On some devices, we can have the GPU use the LLC (the CPU
4666                  * cache) for about a 10% performance improvement
4667                  * compared to uncached.  Graphics requests other than
4668                  * display scanout are coherent with the CPU in
4669                  * accessing this cache.  This means in this mode we
4670                  * don't need to clflush on the CPU side, and on the
4671                  * GPU side we only need to flush internal caches to
4672                  * get data visible to the CPU.
4673                  *
4674                  * However, we maintain the display planes as UC, and so
4675                  * need to rebind when first used as such.
4676                  */
4677                 obj->cache_level = I915_CACHE_LLC;
4678         } else
4679                 obj->cache_level = I915_CACHE_NONE;
4680
4681         trace_i915_gem_object_create(obj);
4682
4683         return obj;
4684 }
4685
4686 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4687 {
4688         /* If we are the last user of the backing storage (be it shmemfs
4689          * pages or stolen etc), we know that the pages are going to be
4690          * immediately released. In this case, we can then skip copying
4691          * back the contents from the GPU.
4692          */
4693
4694         if (obj->madv != I915_MADV_WILLNEED)
4695                 return false;
4696
4697         if (obj->base.filp == NULL)
4698                 return true;
4699
4700         /* At first glance, this looks racy, but then again so would be
4701          * userspace racing mmap against close. However, the first external
4702          * reference to the filp can only be obtained through the
4703          * i915_gem_mmap_ioctl() which safeguards us against the user
4704          * acquiring such a reference whilst we are in the middle of
4705          * freeing the object.
4706          */
4707         return atomic_long_read(&obj->base.filp->f_count) == 1;
4708 }
4709
4710 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4711 {
4712         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4713         struct drm_device *dev = obj->base.dev;
4714         struct drm_i915_private *dev_priv = dev->dev_private;
4715         struct i915_vma *vma, *next;
4716
4717         intel_runtime_pm_get(dev_priv);
4718
4719         trace_i915_gem_object_destroy(obj);
4720
4721         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4722                 int ret;
4723
4724                 vma->pin_count = 0;
4725                 ret = i915_vma_unbind(vma);
4726                 if (WARN_ON(ret == -ERESTARTSYS)) {
4727                         bool was_interruptible;
4728
4729                         was_interruptible = dev_priv->mm.interruptible;
4730                         dev_priv->mm.interruptible = false;
4731
4732                         WARN_ON(i915_vma_unbind(vma));
4733
4734                         dev_priv->mm.interruptible = was_interruptible;
4735                 }
4736         }
4737
4738         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4739          * before progressing. */
4740         if (obj->stolen)
4741                 i915_gem_object_unpin_pages(obj);
4742
4743         WARN_ON(obj->frontbuffer_bits);
4744
4745         if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4746             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4747             obj->tiling_mode != I915_TILING_NONE)
4748                 i915_gem_object_unpin_pages(obj);
4749
4750         if (WARN_ON(obj->pages_pin_count))
4751                 obj->pages_pin_count = 0;
4752         if (discard_backing_storage(obj))
4753                 obj->madv = I915_MADV_DONTNEED;
4754         i915_gem_object_put_pages(obj);
4755         i915_gem_object_free_mmap_offset(obj);
4756
4757         BUG_ON(obj->pages);
4758
4759         if (obj->base.import_attach)
4760                 drm_prime_gem_destroy(&obj->base, NULL);
4761
4762         if (obj->ops->release)
4763                 obj->ops->release(obj);
4764
4765         drm_gem_object_release(&obj->base);
4766         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4767
4768         kfree(obj->bit_17);
4769         i915_gem_object_free(obj);
4770
4771         intel_runtime_pm_put(dev_priv);
4772 }
4773
4774 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4775                                      struct i915_address_space *vm)
4776 {
4777         struct i915_vma *vma;
4778         list_for_each_entry(vma, &obj->vma_list, vma_link) {
4779                 if (i915_is_ggtt(vma->vm) &&
4780                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4781                         continue;
4782                 if (vma->vm == vm)
4783                         return vma;
4784         }
4785         return NULL;
4786 }
4787
4788 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4789                                            const struct i915_ggtt_view *view)
4790 {
4791         struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
4792         struct i915_vma *vma;
4793
4794         if (WARN_ONCE(!view, "no view specified"))
4795                 return ERR_PTR(-EINVAL);
4796
4797         list_for_each_entry(vma, &obj->vma_list, vma_link)
4798                 if (vma->vm == ggtt &&
4799                     i915_ggtt_view_equal(&vma->ggtt_view, view))
4800                         return vma;
4801         return NULL;
4802 }
4803
4804 void i915_gem_vma_destroy(struct i915_vma *vma)
4805 {
4806         struct i915_address_space *vm = NULL;
4807         WARN_ON(vma->node.allocated);
4808
4809         /* Keep the vma as a placeholder in the execbuffer reservation lists */
4810         if (!list_empty(&vma->exec_list))
4811                 return;
4812
4813         vm = vma->vm;
4814
4815         if (!i915_is_ggtt(vm))
4816                 i915_ppgtt_put(i915_vm_to_ppgtt(vm));
4817
4818         list_del(&vma->vma_link);
4819
4820         kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
4821 }
4822
4823 static void
4824 i915_gem_stop_ringbuffers(struct drm_device *dev)
4825 {
4826         struct drm_i915_private *dev_priv = dev->dev_private;
4827         struct intel_engine_cs *ring;
4828         int i;
4829
4830         for_each_ring(ring, dev_priv, i)
4831                 dev_priv->gt.stop_ring(ring);
4832 }
4833
4834 int
4835 i915_gem_suspend(struct drm_device *dev)
4836 {
4837         struct drm_i915_private *dev_priv = dev->dev_private;
4838         int ret = 0;
4839
4840         mutex_lock(&dev->struct_mutex);
4841         ret = i915_gpu_idle(dev);
4842         if (ret)
4843                 goto err;
4844
4845         i915_gem_retire_requests(dev);
4846
4847         i915_gem_stop_ringbuffers(dev);
4848         mutex_unlock(&dev->struct_mutex);
4849
4850         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4851         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4852         flush_delayed_work(&dev_priv->mm.idle_work);
4853
4854         /* Assert that we sucessfully flushed all the work and
4855          * reset the GPU back to its idle, low power state.
4856          */
4857         WARN_ON(dev_priv->mm.busy);
4858
4859         return 0;
4860
4861 err:
4862         mutex_unlock(&dev->struct_mutex);
4863         return ret;
4864 }
4865
4866 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
4867 {
4868         struct intel_engine_cs *ring = req->ring;
4869         struct drm_device *dev = ring->dev;
4870         struct drm_i915_private *dev_priv = dev->dev_private;
4871         u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4872         u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4873         int i, ret;
4874
4875         if (!HAS_L3_DPF(dev) || !remap_info)
4876                 return 0;
4877
4878         ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
4879         if (ret)
4880                 return ret;
4881
4882         /*
4883          * Note: We do not worry about the concurrent register cacheline hang
4884          * here because no other code should access these registers other than
4885          * at initialization time.
4886          */
4887         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4888                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4889                 intel_ring_emit(ring, reg_base + i);
4890                 intel_ring_emit(ring, remap_info[i/4]);
4891         }
4892
4893         intel_ring_advance(ring);
4894
4895         return ret;
4896 }
4897
4898 void i915_gem_init_swizzling(struct drm_device *dev)
4899 {
4900         struct drm_i915_private *dev_priv = dev->dev_private;
4901
4902         if (INTEL_INFO(dev)->gen < 5 ||
4903             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4904                 return;
4905
4906         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4907                                  DISP_TILE_SURFACE_SWIZZLING);
4908
4909         if (IS_GEN5(dev))
4910                 return;
4911
4912         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4913         if (IS_GEN6(dev))
4914                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4915         else if (IS_GEN7(dev))
4916                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4917         else if (IS_GEN8(dev))
4918                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4919         else
4920                 BUG();
4921 }
4922
4923 static bool
4924 intel_enable_blt(struct drm_device *dev)
4925 {
4926         if (!HAS_BLT(dev))
4927                 return false;
4928
4929         /* The blitter was dysfunctional on early prototypes */
4930         if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4931                 DRM_INFO("BLT not supported on this pre-production hardware;"
4932                          " graphics performance will be degraded.\n");
4933                 return false;
4934         }
4935
4936         return true;
4937 }
4938
4939 static void init_unused_ring(struct drm_device *dev, u32 base)
4940 {
4941         struct drm_i915_private *dev_priv = dev->dev_private;
4942
4943         I915_WRITE(RING_CTL(base), 0);
4944         I915_WRITE(RING_HEAD(base), 0);
4945         I915_WRITE(RING_TAIL(base), 0);
4946         I915_WRITE(RING_START(base), 0);
4947 }
4948
4949 static void init_unused_rings(struct drm_device *dev)
4950 {
4951         if (IS_I830(dev)) {
4952                 init_unused_ring(dev, PRB1_BASE);
4953                 init_unused_ring(dev, SRB0_BASE);
4954                 init_unused_ring(dev, SRB1_BASE);
4955                 init_unused_ring(dev, SRB2_BASE);
4956                 init_unused_ring(dev, SRB3_BASE);
4957         } else if (IS_GEN2(dev)) {
4958                 init_unused_ring(dev, SRB0_BASE);
4959                 init_unused_ring(dev, SRB1_BASE);
4960         } else if (IS_GEN3(dev)) {
4961                 init_unused_ring(dev, PRB1_BASE);
4962                 init_unused_ring(dev, PRB2_BASE);
4963         }
4964 }
4965
4966 int i915_gem_init_rings(struct drm_device *dev)
4967 {
4968         struct drm_i915_private *dev_priv = dev->dev_private;
4969         int ret;
4970
4971         ret = intel_init_render_ring_buffer(dev);
4972         if (ret)
4973                 return ret;
4974
4975         if (HAS_BSD(dev)) {
4976                 ret = intel_init_bsd_ring_buffer(dev);
4977                 if (ret)
4978                         goto cleanup_render_ring;
4979         }
4980
4981         if (intel_enable_blt(dev)) {
4982                 ret = intel_init_blt_ring_buffer(dev);
4983                 if (ret)
4984                         goto cleanup_bsd_ring;
4985         }
4986
4987         if (HAS_VEBOX(dev)) {
4988                 ret = intel_init_vebox_ring_buffer(dev);
4989                 if (ret)
4990                         goto cleanup_blt_ring;
4991         }
4992
4993         if (HAS_BSD2(dev)) {
4994                 ret = intel_init_bsd2_ring_buffer(dev);
4995                 if (ret)
4996                         goto cleanup_vebox_ring;
4997         }
4998
4999         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
5000         if (ret)
5001                 goto cleanup_bsd2_ring;
5002
5003         return 0;
5004
5005 cleanup_bsd2_ring:
5006         intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
5007 cleanup_vebox_ring:
5008         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
5009 cleanup_blt_ring:
5010         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
5011 cleanup_bsd_ring:
5012         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
5013 cleanup_render_ring:
5014         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
5015
5016         return ret;
5017 }
5018
5019 int
5020 i915_gem_init_hw(struct drm_device *dev)
5021 {
5022         struct drm_i915_private *dev_priv = dev->dev_private;
5023         struct intel_engine_cs *ring;
5024         int ret, i, j;
5025
5026         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
5027                 return -EIO;
5028
5029         /* Double layer security blanket, see i915_gem_init() */
5030         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5031
5032         if (dev_priv->ellc_size)
5033                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
5034
5035         if (IS_HASWELL(dev))
5036                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
5037                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
5038
5039         if (HAS_PCH_NOP(dev)) {
5040                 if (IS_IVYBRIDGE(dev)) {
5041                         u32 temp = I915_READ(GEN7_MSG_CTL);
5042                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
5043                         I915_WRITE(GEN7_MSG_CTL, temp);
5044                 } else if (INTEL_INFO(dev)->gen >= 7) {
5045                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
5046                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
5047                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
5048                 }
5049         }
5050
5051         i915_gem_init_swizzling(dev);
5052
5053         /*
5054          * At least 830 can leave some of the unused rings
5055          * "active" (ie. head != tail) after resume which
5056          * will prevent c3 entry. Makes sure all unused rings
5057          * are totally idle.
5058          */
5059         init_unused_rings(dev);
5060
5061         BUG_ON(!dev_priv->ring[RCS].default_context);
5062
5063         ret = i915_ppgtt_init_hw(dev);
5064         if (ret) {
5065                 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
5066                 goto out;
5067         }
5068
5069         /* Need to do basic initialisation of all rings first: */
5070         for_each_ring(ring, dev_priv, i) {
5071                 ret = ring->init_hw(ring);
5072                 if (ret)
5073                         goto out;
5074         }
5075
5076         /* Now it is safe to go back round and do everything else: */
5077         for_each_ring(ring, dev_priv, i) {
5078                 struct drm_i915_gem_request *req;
5079
5080                 WARN_ON(!ring->default_context);
5081
5082                 ret = i915_gem_request_alloc(ring, ring->default_context, &req);
5083                 if (ret) {
5084                         i915_gem_cleanup_ringbuffer(dev);
5085                         goto out;
5086                 }
5087
5088                 if (ring->id == RCS) {
5089                         for (j = 0; j < NUM_L3_SLICES(dev); j++)
5090                                 i915_gem_l3_remap(req, j);
5091                 }
5092
5093                 ret = i915_ppgtt_init_ring(req);
5094                 if (ret && ret != -EIO) {
5095                         DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
5096                         i915_gem_request_cancel(req);
5097                         i915_gem_cleanup_ringbuffer(dev);
5098                         goto out;
5099                 }
5100
5101                 ret = i915_gem_context_enable(req);
5102                 if (ret && ret != -EIO) {
5103                         DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
5104                         i915_gem_request_cancel(req);
5105                         i915_gem_cleanup_ringbuffer(dev);
5106                         goto out;
5107                 }
5108
5109                 i915_add_request_no_flush(req);
5110         }
5111
5112 out:
5113         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5114         return ret;
5115 }
5116
5117 int i915_gem_init(struct drm_device *dev)
5118 {
5119         struct drm_i915_private *dev_priv = dev->dev_private;
5120         int ret;
5121
5122         i915.enable_execlists = intel_sanitize_enable_execlists(dev,
5123                         i915.enable_execlists);
5124
5125         mutex_lock(&dev->struct_mutex);
5126
5127         if (IS_VALLEYVIEW(dev)) {
5128                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
5129                 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
5130                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
5131                               VLV_GTLC_ALLOWWAKEACK), 10))
5132                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
5133         }
5134
5135         if (!i915.enable_execlists) {
5136                 dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
5137                 dev_priv->gt.init_rings = i915_gem_init_rings;
5138                 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
5139                 dev_priv->gt.stop_ring = intel_stop_ring_buffer;
5140         } else {
5141                 dev_priv->gt.execbuf_submit = intel_execlists_submission;
5142                 dev_priv->gt.init_rings = intel_logical_rings_init;
5143                 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
5144                 dev_priv->gt.stop_ring = intel_logical_ring_stop;
5145         }
5146
5147         /* This is just a security blanket to placate dragons.
5148          * On some systems, we very sporadically observe that the first TLBs
5149          * used by the CS may be stale, despite us poking the TLB reset. If
5150          * we hold the forcewake during initialisation these problems
5151          * just magically go away.
5152          */
5153         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5154
5155         ret = i915_gem_init_userptr(dev);
5156         if (ret)
5157                 goto out_unlock;
5158
5159         i915_gem_init_global_gtt(dev);
5160
5161         ret = i915_gem_context_init(dev);
5162         if (ret)
5163                 goto out_unlock;
5164
5165         ret = dev_priv->gt.init_rings(dev);
5166         if (ret)
5167                 goto out_unlock;
5168
5169         ret = i915_gem_init_hw(dev);
5170         if (ret == -EIO) {
5171                 /* Allow ring initialisation to fail by marking the GPU as
5172                  * wedged. But we only want to do this where the GPU is angry,
5173                  * for all other failure, such as an allocation failure, bail.
5174                  */
5175                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
5176                 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
5177                 ret = 0;
5178         }
5179
5180 out_unlock:
5181         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5182         mutex_unlock(&dev->struct_mutex);
5183
5184         return ret;
5185 }
5186
5187 void
5188 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
5189 {
5190         struct drm_i915_private *dev_priv = dev->dev_private;
5191         struct intel_engine_cs *ring;
5192         int i;
5193
5194         for_each_ring(ring, dev_priv, i)
5195                 dev_priv->gt.cleanup_ring(ring);
5196 }
5197
5198 static void
5199 init_ring_lists(struct intel_engine_cs *ring)
5200 {
5201         INIT_LIST_HEAD(&ring->active_list);
5202         INIT_LIST_HEAD(&ring->request_list);
5203 }
5204
5205 void i915_init_vm(struct drm_i915_private *dev_priv,
5206                   struct i915_address_space *vm)
5207 {
5208         if (!i915_is_ggtt(vm))
5209                 drm_mm_init(&vm->mm, vm->start, vm->total);
5210         vm->dev = dev_priv->dev;
5211         INIT_LIST_HEAD(&vm->active_list);
5212         INIT_LIST_HEAD(&vm->inactive_list);
5213         INIT_LIST_HEAD(&vm->global_link);
5214         list_add_tail(&vm->global_link, &dev_priv->vm_list);
5215 }
5216
5217 void
5218 i915_gem_load(struct drm_device *dev)
5219 {
5220         struct drm_i915_private *dev_priv = dev->dev_private;
5221         int i;
5222
5223         dev_priv->objects =
5224                 kmem_cache_create("i915_gem_object",
5225                                   sizeof(struct drm_i915_gem_object), 0,
5226                                   SLAB_HWCACHE_ALIGN,
5227                                   NULL);
5228         dev_priv->vmas =
5229                 kmem_cache_create("i915_gem_vma",
5230                                   sizeof(struct i915_vma), 0,
5231                                   SLAB_HWCACHE_ALIGN,
5232                                   NULL);
5233         dev_priv->requests =
5234                 kmem_cache_create("i915_gem_request",
5235                                   sizeof(struct drm_i915_gem_request), 0,
5236                                   SLAB_HWCACHE_ALIGN,
5237                                   NULL);
5238
5239         INIT_LIST_HEAD(&dev_priv->vm_list);
5240         i915_init_vm(dev_priv, &dev_priv->gtt.base);
5241
5242         INIT_LIST_HEAD(&dev_priv->context_list);
5243         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
5244         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
5245         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5246         for (i = 0; i < I915_NUM_RINGS; i++)
5247                 init_ring_lists(&dev_priv->ring[i]);
5248         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
5249                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
5250         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
5251                           i915_gem_retire_work_handler);
5252         INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
5253                           i915_gem_idle_work_handler);
5254         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5255
5256         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
5257
5258         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
5259                 dev_priv->num_fence_regs = 32;
5260         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5261                 dev_priv->num_fence_regs = 16;
5262         else
5263                 dev_priv->num_fence_regs = 8;
5264
5265         if (intel_vgpu_active(dev))
5266                 dev_priv->num_fence_regs =
5267                                 I915_READ(vgtif_reg(avail_rs.fence_num));
5268
5269         /* Initialize fence registers to zero */
5270         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5271         i915_gem_restore_fences(dev);
5272
5273         i915_gem_detect_bit_6_swizzle(dev);
5274         init_waitqueue_head(&dev_priv->pending_flip_queue);
5275
5276         dev_priv->mm.interruptible = true;
5277
5278         i915_gem_shrinker_init(dev_priv);
5279
5280         mutex_init(&dev_priv->fb_tracking.lock);
5281 }
5282
5283 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5284 {
5285         struct drm_i915_file_private *file_priv = file->driver_priv;
5286
5287         /* Clean up our request list when the client is going away, so that
5288          * later retire_requests won't dereference our soon-to-be-gone
5289          * file_priv.
5290          */
5291         spin_lock(&file_priv->mm.lock);
5292         while (!list_empty(&file_priv->mm.request_list)) {
5293                 struct drm_i915_gem_request *request;
5294
5295                 request = list_first_entry(&file_priv->mm.request_list,
5296                                            struct drm_i915_gem_request,
5297                                            client_list);
5298                 list_del(&request->client_list);
5299                 request->file_priv = NULL;
5300         }
5301         spin_unlock(&file_priv->mm.lock);
5302
5303         if (!list_empty(&file_priv->rps.link)) {
5304                 spin_lock(&to_i915(dev)->rps.client_lock);
5305                 list_del(&file_priv->rps.link);
5306                 spin_unlock(&to_i915(dev)->rps.client_lock);
5307         }
5308 }
5309
5310 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5311 {
5312         struct drm_i915_file_private *file_priv;
5313         int ret;
5314
5315         DRM_DEBUG_DRIVER("\n");
5316
5317         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5318         if (!file_priv)
5319                 return -ENOMEM;
5320
5321         file->driver_priv = file_priv;
5322         file_priv->dev_priv = dev->dev_private;
5323         file_priv->file = file;
5324         INIT_LIST_HEAD(&file_priv->rps.link);
5325
5326         spin_lock_init(&file_priv->mm.lock);
5327         INIT_LIST_HEAD(&file_priv->mm.request_list);
5328
5329         ret = i915_gem_context_open(dev, file);
5330         if (ret)
5331                 kfree(file_priv);
5332
5333         return ret;
5334 }
5335
5336 /**
5337  * i915_gem_track_fb - update frontbuffer tracking
5338  * old: current GEM buffer for the frontbuffer slots
5339  * new: new GEM buffer for the frontbuffer slots
5340  * frontbuffer_bits: bitmask of frontbuffer slots
5341  *
5342  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5343  * from @old and setting them in @new. Both @old and @new can be NULL.
5344  */
5345 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5346                        struct drm_i915_gem_object *new,
5347                        unsigned frontbuffer_bits)
5348 {
5349         if (old) {
5350                 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5351                 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5352                 old->frontbuffer_bits &= ~frontbuffer_bits;
5353         }
5354
5355         if (new) {
5356                 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5357                 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5358                 new->frontbuffer_bits |= frontbuffer_bits;
5359         }
5360 }
5361
5362 /* All the new VM stuff */
5363 unsigned long
5364 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5365                     struct i915_address_space *vm)
5366 {
5367         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5368         struct i915_vma *vma;
5369
5370         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5371
5372         list_for_each_entry(vma, &o->vma_list, vma_link) {
5373                 if (i915_is_ggtt(vma->vm) &&
5374                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5375                         continue;
5376                 if (vma->vm == vm)
5377                         return vma->node.start;
5378         }
5379
5380         WARN(1, "%s vma for this object not found.\n",
5381              i915_is_ggtt(vm) ? "global" : "ppgtt");
5382         return -1;
5383 }
5384
5385 unsigned long
5386 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5387                               const struct i915_ggtt_view *view)
5388 {
5389         struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
5390         struct i915_vma *vma;
5391
5392         list_for_each_entry(vma, &o->vma_list, vma_link)
5393                 if (vma->vm == ggtt &&
5394                     i915_ggtt_view_equal(&vma->ggtt_view, view))
5395                         return vma->node.start;
5396
5397         WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
5398         return -1;
5399 }
5400
5401 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5402                         struct i915_address_space *vm)
5403 {
5404         struct i915_vma *vma;
5405
5406         list_for_each_entry(vma, &o->vma_list, vma_link) {
5407                 if (i915_is_ggtt(vma->vm) &&
5408                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5409                         continue;
5410                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5411                         return true;
5412         }
5413
5414         return false;
5415 }
5416
5417 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5418                                   const struct i915_ggtt_view *view)
5419 {
5420         struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
5421         struct i915_vma *vma;
5422
5423         list_for_each_entry(vma, &o->vma_list, vma_link)
5424                 if (vma->vm == ggtt &&
5425                     i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5426                     drm_mm_node_allocated(&vma->node))
5427                         return true;
5428
5429         return false;
5430 }
5431
5432 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5433 {
5434         struct i915_vma *vma;
5435
5436         list_for_each_entry(vma, &o->vma_list, vma_link)
5437                 if (drm_mm_node_allocated(&vma->node))
5438                         return true;
5439
5440         return false;
5441 }
5442
5443 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5444                                 struct i915_address_space *vm)
5445 {
5446         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5447         struct i915_vma *vma;
5448
5449         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5450
5451         BUG_ON(list_empty(&o->vma_list));
5452
5453         list_for_each_entry(vma, &o->vma_list, vma_link) {
5454                 if (i915_is_ggtt(vma->vm) &&
5455                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5456                         continue;
5457                 if (vma->vm == vm)
5458                         return vma->node.size;
5459         }
5460         return 0;
5461 }
5462
5463 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
5464 {
5465         struct i915_vma *vma;
5466         list_for_each_entry(vma, &obj->vma_list, vma_link)
5467                 if (vma->pin_count > 0)
5468                         return true;
5469
5470         return false;
5471 }
5472