89255917d9920f9d3510053f524336fe4e81f830
[linux-block.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_gem_dmabuf.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_mocs.h"
37 #include <linux/reservation.h>
38 #include <linux/shmem_fs.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/pci.h>
42 #include <linux/dma-buf.h>
43
44 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
45 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
46
47 static bool cpu_cache_is_coherent(struct drm_device *dev,
48                                   enum i915_cache_level level)
49 {
50         return HAS_LLC(dev) || level != I915_CACHE_NONE;
51 }
52
53 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
54 {
55         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
56                 return false;
57
58         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
59                 return true;
60
61         return obj->pin_display;
62 }
63
64 static int
65 insert_mappable_node(struct drm_i915_private *i915,
66                      struct drm_mm_node *node, u32 size)
67 {
68         memset(node, 0, sizeof(*node));
69         return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
70                                                    size, 0, 0, 0,
71                                                    i915->ggtt.mappable_end,
72                                                    DRM_MM_SEARCH_DEFAULT,
73                                                    DRM_MM_CREATE_DEFAULT);
74 }
75
76 static void
77 remove_mappable_node(struct drm_mm_node *node)
78 {
79         drm_mm_remove_node(node);
80 }
81
82 /* some bookkeeping */
83 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
84                                   size_t size)
85 {
86         spin_lock(&dev_priv->mm.object_stat_lock);
87         dev_priv->mm.object_count++;
88         dev_priv->mm.object_memory += size;
89         spin_unlock(&dev_priv->mm.object_stat_lock);
90 }
91
92 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
93                                      size_t size)
94 {
95         spin_lock(&dev_priv->mm.object_stat_lock);
96         dev_priv->mm.object_count--;
97         dev_priv->mm.object_memory -= size;
98         spin_unlock(&dev_priv->mm.object_stat_lock);
99 }
100
101 static int
102 i915_gem_wait_for_error(struct i915_gpu_error *error)
103 {
104         int ret;
105
106         if (!i915_reset_in_progress(error))
107                 return 0;
108
109         /*
110          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
111          * userspace. If it takes that long something really bad is going on and
112          * we should simply try to bail out and fail as gracefully as possible.
113          */
114         ret = wait_event_interruptible_timeout(error->reset_queue,
115                                                !i915_reset_in_progress(error),
116                                                10*HZ);
117         if (ret == 0) {
118                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
119                 return -EIO;
120         } else if (ret < 0) {
121                 return ret;
122         } else {
123                 return 0;
124         }
125 }
126
127 int i915_mutex_lock_interruptible(struct drm_device *dev)
128 {
129         struct drm_i915_private *dev_priv = to_i915(dev);
130         int ret;
131
132         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
133         if (ret)
134                 return ret;
135
136         ret = mutex_lock_interruptible(&dev->struct_mutex);
137         if (ret)
138                 return ret;
139
140         return 0;
141 }
142
143 int
144 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
145                             struct drm_file *file)
146 {
147         struct drm_i915_private *dev_priv = to_i915(dev);
148         struct i915_ggtt *ggtt = &dev_priv->ggtt;
149         struct drm_i915_gem_get_aperture *args = data;
150         struct i915_vma *vma;
151         size_t pinned;
152
153         pinned = 0;
154         mutex_lock(&dev->struct_mutex);
155         list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
156                 if (i915_vma_is_pinned(vma))
157                         pinned += vma->node.size;
158         list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
159                 if (i915_vma_is_pinned(vma))
160                         pinned += vma->node.size;
161         mutex_unlock(&dev->struct_mutex);
162
163         args->aper_size = ggtt->base.total;
164         args->aper_available_size = args->aper_size - pinned;
165
166         return 0;
167 }
168
169 static int
170 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
171 {
172         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
173         char *vaddr = obj->phys_handle->vaddr;
174         struct sg_table *st;
175         struct scatterlist *sg;
176         int i;
177
178         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
179                 return -EINVAL;
180
181         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
182                 struct page *page;
183                 char *src;
184
185                 page = shmem_read_mapping_page(mapping, i);
186                 if (IS_ERR(page))
187                         return PTR_ERR(page);
188
189                 src = kmap_atomic(page);
190                 memcpy(vaddr, src, PAGE_SIZE);
191                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
192                 kunmap_atomic(src);
193
194                 put_page(page);
195                 vaddr += PAGE_SIZE;
196         }
197
198         i915_gem_chipset_flush(to_i915(obj->base.dev));
199
200         st = kmalloc(sizeof(*st), GFP_KERNEL);
201         if (st == NULL)
202                 return -ENOMEM;
203
204         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
205                 kfree(st);
206                 return -ENOMEM;
207         }
208
209         sg = st->sgl;
210         sg->offset = 0;
211         sg->length = obj->base.size;
212
213         sg_dma_address(sg) = obj->phys_handle->busaddr;
214         sg_dma_len(sg) = obj->base.size;
215
216         obj->pages = st;
217         return 0;
218 }
219
220 static void
221 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
222 {
223         int ret;
224
225         BUG_ON(obj->madv == __I915_MADV_PURGED);
226
227         ret = i915_gem_object_set_to_cpu_domain(obj, true);
228         if (WARN_ON(ret)) {
229                 /* In the event of a disaster, abandon all caches and
230                  * hope for the best.
231                  */
232                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
233         }
234
235         if (obj->madv == I915_MADV_DONTNEED)
236                 obj->dirty = 0;
237
238         if (obj->dirty) {
239                 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
240                 char *vaddr = obj->phys_handle->vaddr;
241                 int i;
242
243                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
244                         struct page *page;
245                         char *dst;
246
247                         page = shmem_read_mapping_page(mapping, i);
248                         if (IS_ERR(page))
249                                 continue;
250
251                         dst = kmap_atomic(page);
252                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
253                         memcpy(dst, vaddr, PAGE_SIZE);
254                         kunmap_atomic(dst);
255
256                         set_page_dirty(page);
257                         if (obj->madv == I915_MADV_WILLNEED)
258                                 mark_page_accessed(page);
259                         put_page(page);
260                         vaddr += PAGE_SIZE;
261                 }
262                 obj->dirty = 0;
263         }
264
265         sg_free_table(obj->pages);
266         kfree(obj->pages);
267 }
268
269 static void
270 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
271 {
272         drm_pci_free(obj->base.dev, obj->phys_handle);
273 }
274
275 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
276         .get_pages = i915_gem_object_get_pages_phys,
277         .put_pages = i915_gem_object_put_pages_phys,
278         .release = i915_gem_object_release_phys,
279 };
280
281 int
282 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
283 {
284         struct i915_vma *vma;
285         LIST_HEAD(still_in_list);
286         int ret;
287
288         /* The vma will only be freed if it is marked as closed, and if we wait
289          * upon rendering to the vma, we may unbind anything in the list.
290          */
291         while ((vma = list_first_entry_or_null(&obj->vma_list,
292                                                struct i915_vma,
293                                                obj_link))) {
294                 list_move_tail(&vma->obj_link, &still_in_list);
295                 ret = i915_vma_unbind(vma);
296                 if (ret)
297                         break;
298         }
299         list_splice(&still_in_list, &obj->vma_list);
300
301         return ret;
302 }
303
304 int
305 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
306                             int align)
307 {
308         drm_dma_handle_t *phys;
309         int ret;
310
311         if (obj->phys_handle) {
312                 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
313                         return -EBUSY;
314
315                 return 0;
316         }
317
318         if (obj->madv != I915_MADV_WILLNEED)
319                 return -EFAULT;
320
321         if (obj->base.filp == NULL)
322                 return -EINVAL;
323
324         ret = i915_gem_object_unbind(obj);
325         if (ret)
326                 return ret;
327
328         ret = i915_gem_object_put_pages(obj);
329         if (ret)
330                 return ret;
331
332         /* create a new object */
333         phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
334         if (!phys)
335                 return -ENOMEM;
336
337         obj->phys_handle = phys;
338         obj->ops = &i915_gem_phys_ops;
339
340         return i915_gem_object_get_pages(obj);
341 }
342
343 static int
344 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
345                      struct drm_i915_gem_pwrite *args,
346                      struct drm_file *file_priv)
347 {
348         struct drm_device *dev = obj->base.dev;
349         void *vaddr = obj->phys_handle->vaddr + args->offset;
350         char __user *user_data = u64_to_user_ptr(args->data_ptr);
351         int ret = 0;
352
353         /* We manually control the domain here and pretend that it
354          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
355          */
356         ret = i915_gem_object_wait_rendering(obj, false);
357         if (ret)
358                 return ret;
359
360         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
361         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
362                 unsigned long unwritten;
363
364                 /* The physical object once assigned is fixed for the lifetime
365                  * of the obj, so we can safely drop the lock and continue
366                  * to access vaddr.
367                  */
368                 mutex_unlock(&dev->struct_mutex);
369                 unwritten = copy_from_user(vaddr, user_data, args->size);
370                 mutex_lock(&dev->struct_mutex);
371                 if (unwritten) {
372                         ret = -EFAULT;
373                         goto out;
374                 }
375         }
376
377         drm_clflush_virt_range(vaddr, args->size);
378         i915_gem_chipset_flush(to_i915(dev));
379
380 out:
381         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
382         return ret;
383 }
384
385 void *i915_gem_object_alloc(struct drm_device *dev)
386 {
387         struct drm_i915_private *dev_priv = to_i915(dev);
388         return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
389 }
390
391 void i915_gem_object_free(struct drm_i915_gem_object *obj)
392 {
393         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
394         kmem_cache_free(dev_priv->objects, obj);
395 }
396
397 static int
398 i915_gem_create(struct drm_file *file,
399                 struct drm_device *dev,
400                 uint64_t size,
401                 uint32_t *handle_p)
402 {
403         struct drm_i915_gem_object *obj;
404         int ret;
405         u32 handle;
406
407         size = roundup(size, PAGE_SIZE);
408         if (size == 0)
409                 return -EINVAL;
410
411         /* Allocate the new object */
412         obj = i915_gem_object_create(dev, size);
413         if (IS_ERR(obj))
414                 return PTR_ERR(obj);
415
416         ret = drm_gem_handle_create(file, &obj->base, &handle);
417         /* drop reference from allocate - handle holds it now */
418         i915_gem_object_put_unlocked(obj);
419         if (ret)
420                 return ret;
421
422         *handle_p = handle;
423         return 0;
424 }
425
426 int
427 i915_gem_dumb_create(struct drm_file *file,
428                      struct drm_device *dev,
429                      struct drm_mode_create_dumb *args)
430 {
431         /* have to work out size/pitch and return them */
432         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
433         args->size = args->pitch * args->height;
434         return i915_gem_create(file, dev,
435                                args->size, &args->handle);
436 }
437
438 /**
439  * Creates a new mm object and returns a handle to it.
440  * @dev: drm device pointer
441  * @data: ioctl data blob
442  * @file: drm file pointer
443  */
444 int
445 i915_gem_create_ioctl(struct drm_device *dev, void *data,
446                       struct drm_file *file)
447 {
448         struct drm_i915_gem_create *args = data;
449
450         return i915_gem_create(file, dev,
451                                args->size, &args->handle);
452 }
453
454 static inline int
455 __copy_to_user_swizzled(char __user *cpu_vaddr,
456                         const char *gpu_vaddr, int gpu_offset,
457                         int length)
458 {
459         int ret, cpu_offset = 0;
460
461         while (length > 0) {
462                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
463                 int this_length = min(cacheline_end - gpu_offset, length);
464                 int swizzled_gpu_offset = gpu_offset ^ 64;
465
466                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
467                                      gpu_vaddr + swizzled_gpu_offset,
468                                      this_length);
469                 if (ret)
470                         return ret + length;
471
472                 cpu_offset += this_length;
473                 gpu_offset += this_length;
474                 length -= this_length;
475         }
476
477         return 0;
478 }
479
480 static inline int
481 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
482                           const char __user *cpu_vaddr,
483                           int length)
484 {
485         int ret, cpu_offset = 0;
486
487         while (length > 0) {
488                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
489                 int this_length = min(cacheline_end - gpu_offset, length);
490                 int swizzled_gpu_offset = gpu_offset ^ 64;
491
492                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
493                                        cpu_vaddr + cpu_offset,
494                                        this_length);
495                 if (ret)
496                         return ret + length;
497
498                 cpu_offset += this_length;
499                 gpu_offset += this_length;
500                 length -= this_length;
501         }
502
503         return 0;
504 }
505
506 /*
507  * Pins the specified object's pages and synchronizes the object with
508  * GPU accesses. Sets needs_clflush to non-zero if the caller should
509  * flush the object from the CPU cache.
510  */
511 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
512                                     int *needs_clflush)
513 {
514         int ret;
515
516         *needs_clflush = 0;
517
518         if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
519                 return -EINVAL;
520
521         ret = i915_gem_object_wait_rendering(obj, true);
522         if (ret)
523                 return ret;
524
525         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
526                 /* If we're not in the cpu read domain, set ourself into the gtt
527                  * read domain and manually flush cachelines (if required). This
528                  * optimizes for the case when the gpu will dirty the data
529                  * anyway again before the next pread happens. */
530                 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
531                                                         obj->cache_level);
532         }
533
534         ret = i915_gem_object_get_pages(obj);
535         if (ret)
536                 return ret;
537
538         i915_gem_object_pin_pages(obj);
539
540         return ret;
541 }
542
543 /* Per-page copy function for the shmem pread fastpath.
544  * Flushes invalid cachelines before reading the target if
545  * needs_clflush is set. */
546 static int
547 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
548                  char __user *user_data,
549                  bool page_do_bit17_swizzling, bool needs_clflush)
550 {
551         char *vaddr;
552         int ret;
553
554         if (unlikely(page_do_bit17_swizzling))
555                 return -EINVAL;
556
557         vaddr = kmap_atomic(page);
558         if (needs_clflush)
559                 drm_clflush_virt_range(vaddr + shmem_page_offset,
560                                        page_length);
561         ret = __copy_to_user_inatomic(user_data,
562                                       vaddr + shmem_page_offset,
563                                       page_length);
564         kunmap_atomic(vaddr);
565
566         return ret ? -EFAULT : 0;
567 }
568
569 static void
570 shmem_clflush_swizzled_range(char *addr, unsigned long length,
571                              bool swizzled)
572 {
573         if (unlikely(swizzled)) {
574                 unsigned long start = (unsigned long) addr;
575                 unsigned long end = (unsigned long) addr + length;
576
577                 /* For swizzling simply ensure that we always flush both
578                  * channels. Lame, but simple and it works. Swizzled
579                  * pwrite/pread is far from a hotpath - current userspace
580                  * doesn't use it at all. */
581                 start = round_down(start, 128);
582                 end = round_up(end, 128);
583
584                 drm_clflush_virt_range((void *)start, end - start);
585         } else {
586                 drm_clflush_virt_range(addr, length);
587         }
588
589 }
590
591 /* Only difference to the fast-path function is that this can handle bit17
592  * and uses non-atomic copy and kmap functions. */
593 static int
594 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
595                  char __user *user_data,
596                  bool page_do_bit17_swizzling, bool needs_clflush)
597 {
598         char *vaddr;
599         int ret;
600
601         vaddr = kmap(page);
602         if (needs_clflush)
603                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
604                                              page_length,
605                                              page_do_bit17_swizzling);
606
607         if (page_do_bit17_swizzling)
608                 ret = __copy_to_user_swizzled(user_data,
609                                               vaddr, shmem_page_offset,
610                                               page_length);
611         else
612                 ret = __copy_to_user(user_data,
613                                      vaddr + shmem_page_offset,
614                                      page_length);
615         kunmap(page);
616
617         return ret ? - EFAULT : 0;
618 }
619
620 static inline unsigned long
621 slow_user_access(struct io_mapping *mapping,
622                  uint64_t page_base, int page_offset,
623                  char __user *user_data,
624                  unsigned long length, bool pwrite)
625 {
626         void __iomem *ioaddr;
627         void *vaddr;
628         uint64_t unwritten;
629
630         ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
631         /* We can use the cpu mem copy function because this is X86. */
632         vaddr = (void __force *)ioaddr + page_offset;
633         if (pwrite)
634                 unwritten = __copy_from_user(vaddr, user_data, length);
635         else
636                 unwritten = __copy_to_user(user_data, vaddr, length);
637
638         io_mapping_unmap(ioaddr);
639         return unwritten;
640 }
641
642 static int
643 i915_gem_gtt_pread(struct drm_device *dev,
644                    struct drm_i915_gem_object *obj, uint64_t size,
645                    uint64_t data_offset, uint64_t data_ptr)
646 {
647         struct drm_i915_private *dev_priv = to_i915(dev);
648         struct i915_ggtt *ggtt = &dev_priv->ggtt;
649         struct drm_mm_node node;
650         char __user *user_data;
651         uint64_t remain;
652         uint64_t offset;
653         int ret;
654
655         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
656         if (ret) {
657                 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
658                 if (ret)
659                         goto out;
660
661                 ret = i915_gem_object_get_pages(obj);
662                 if (ret) {
663                         remove_mappable_node(&node);
664                         goto out;
665                 }
666
667                 i915_gem_object_pin_pages(obj);
668         } else {
669                 node.start = i915_gem_obj_ggtt_offset(obj);
670                 node.allocated = false;
671                 ret = i915_gem_object_put_fence(obj);
672                 if (ret)
673                         goto out_unpin;
674         }
675
676         ret = i915_gem_object_set_to_gtt_domain(obj, false);
677         if (ret)
678                 goto out_unpin;
679
680         user_data = u64_to_user_ptr(data_ptr);
681         remain = size;
682         offset = data_offset;
683
684         mutex_unlock(&dev->struct_mutex);
685         if (likely(!i915.prefault_disable)) {
686                 ret = fault_in_multipages_writeable(user_data, remain);
687                 if (ret) {
688                         mutex_lock(&dev->struct_mutex);
689                         goto out_unpin;
690                 }
691         }
692
693         while (remain > 0) {
694                 /* Operation in this page
695                  *
696                  * page_base = page offset within aperture
697                  * page_offset = offset within page
698                  * page_length = bytes to copy for this page
699                  */
700                 u32 page_base = node.start;
701                 unsigned page_offset = offset_in_page(offset);
702                 unsigned page_length = PAGE_SIZE - page_offset;
703                 page_length = remain < page_length ? remain : page_length;
704                 if (node.allocated) {
705                         wmb();
706                         ggtt->base.insert_page(&ggtt->base,
707                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
708                                                node.start,
709                                                I915_CACHE_NONE, 0);
710                         wmb();
711                 } else {
712                         page_base += offset & PAGE_MASK;
713                 }
714                 /* This is a slow read/write as it tries to read from
715                  * and write to user memory which may result into page
716                  * faults, and so we cannot perform this under struct_mutex.
717                  */
718                 if (slow_user_access(ggtt->mappable, page_base,
719                                      page_offset, user_data,
720                                      page_length, false)) {
721                         ret = -EFAULT;
722                         break;
723                 }
724
725                 remain -= page_length;
726                 user_data += page_length;
727                 offset += page_length;
728         }
729
730         mutex_lock(&dev->struct_mutex);
731         if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
732                 /* The user has modified the object whilst we tried
733                  * reading from it, and we now have no idea what domain
734                  * the pages should be in. As we have just been touching
735                  * them directly, flush everything back to the GTT
736                  * domain.
737                  */
738                 ret = i915_gem_object_set_to_gtt_domain(obj, false);
739         }
740
741 out_unpin:
742         if (node.allocated) {
743                 wmb();
744                 ggtt->base.clear_range(&ggtt->base,
745                                        node.start, node.size,
746                                        true);
747                 i915_gem_object_unpin_pages(obj);
748                 remove_mappable_node(&node);
749         } else {
750                 i915_gem_object_ggtt_unpin(obj);
751         }
752 out:
753         return ret;
754 }
755
756 static int
757 i915_gem_shmem_pread(struct drm_device *dev,
758                      struct drm_i915_gem_object *obj,
759                      struct drm_i915_gem_pread *args,
760                      struct drm_file *file)
761 {
762         char __user *user_data;
763         ssize_t remain;
764         loff_t offset;
765         int shmem_page_offset, page_length, ret = 0;
766         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
767         int prefaulted = 0;
768         int needs_clflush = 0;
769         struct sg_page_iter sg_iter;
770
771         if (!i915_gem_object_has_struct_page(obj))
772                 return -ENODEV;
773
774         user_data = u64_to_user_ptr(args->data_ptr);
775         remain = args->size;
776
777         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
778
779         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
780         if (ret)
781                 return ret;
782
783         offset = args->offset;
784
785         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
786                          offset >> PAGE_SHIFT) {
787                 struct page *page = sg_page_iter_page(&sg_iter);
788
789                 if (remain <= 0)
790                         break;
791
792                 /* Operation in this page
793                  *
794                  * shmem_page_offset = offset within page in shmem file
795                  * page_length = bytes to copy for this page
796                  */
797                 shmem_page_offset = offset_in_page(offset);
798                 page_length = remain;
799                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
800                         page_length = PAGE_SIZE - shmem_page_offset;
801
802                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
803                         (page_to_phys(page) & (1 << 17)) != 0;
804
805                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
806                                        user_data, page_do_bit17_swizzling,
807                                        needs_clflush);
808                 if (ret == 0)
809                         goto next_page;
810
811                 mutex_unlock(&dev->struct_mutex);
812
813                 if (likely(!i915.prefault_disable) && !prefaulted) {
814                         ret = fault_in_multipages_writeable(user_data, remain);
815                         /* Userspace is tricking us, but we've already clobbered
816                          * its pages with the prefault and promised to write the
817                          * data up to the first fault. Hence ignore any errors
818                          * and just continue. */
819                         (void)ret;
820                         prefaulted = 1;
821                 }
822
823                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
824                                        user_data, page_do_bit17_swizzling,
825                                        needs_clflush);
826
827                 mutex_lock(&dev->struct_mutex);
828
829                 if (ret)
830                         goto out;
831
832 next_page:
833                 remain -= page_length;
834                 user_data += page_length;
835                 offset += page_length;
836         }
837
838 out:
839         i915_gem_object_unpin_pages(obj);
840
841         return ret;
842 }
843
844 /**
845  * Reads data from the object referenced by handle.
846  * @dev: drm device pointer
847  * @data: ioctl data blob
848  * @file: drm file pointer
849  *
850  * On error, the contents of *data are undefined.
851  */
852 int
853 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
854                      struct drm_file *file)
855 {
856         struct drm_i915_gem_pread *args = data;
857         struct drm_i915_gem_object *obj;
858         int ret = 0;
859
860         if (args->size == 0)
861                 return 0;
862
863         if (!access_ok(VERIFY_WRITE,
864                        u64_to_user_ptr(args->data_ptr),
865                        args->size))
866                 return -EFAULT;
867
868         ret = i915_mutex_lock_interruptible(dev);
869         if (ret)
870                 return ret;
871
872         obj = i915_gem_object_lookup(file, args->handle);
873         if (!obj) {
874                 ret = -ENOENT;
875                 goto unlock;
876         }
877
878         /* Bounds check source.  */
879         if (args->offset > obj->base.size ||
880             args->size > obj->base.size - args->offset) {
881                 ret = -EINVAL;
882                 goto out;
883         }
884
885         trace_i915_gem_object_pread(obj, args->offset, args->size);
886
887         ret = i915_gem_shmem_pread(dev, obj, args, file);
888
889         /* pread for non shmem backed objects */
890         if (ret == -EFAULT || ret == -ENODEV) {
891                 intel_runtime_pm_get(to_i915(dev));
892                 ret = i915_gem_gtt_pread(dev, obj, args->size,
893                                         args->offset, args->data_ptr);
894                 intel_runtime_pm_put(to_i915(dev));
895         }
896
897 out:
898         i915_gem_object_put(obj);
899 unlock:
900         mutex_unlock(&dev->struct_mutex);
901         return ret;
902 }
903
904 /* This is the fast write path which cannot handle
905  * page faults in the source data
906  */
907
908 static inline int
909 fast_user_write(struct io_mapping *mapping,
910                 loff_t page_base, int page_offset,
911                 char __user *user_data,
912                 int length)
913 {
914         void __iomem *vaddr_atomic;
915         void *vaddr;
916         unsigned long unwritten;
917
918         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
919         /* We can use the cpu mem copy function because this is X86. */
920         vaddr = (void __force*)vaddr_atomic + page_offset;
921         unwritten = __copy_from_user_inatomic_nocache(vaddr,
922                                                       user_data, length);
923         io_mapping_unmap_atomic(vaddr_atomic);
924         return unwritten;
925 }
926
927 /**
928  * This is the fast pwrite path, where we copy the data directly from the
929  * user into the GTT, uncached.
930  * @i915: i915 device private data
931  * @obj: i915 gem object
932  * @args: pwrite arguments structure
933  * @file: drm file pointer
934  */
935 static int
936 i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
937                          struct drm_i915_gem_object *obj,
938                          struct drm_i915_gem_pwrite *args,
939                          struct drm_file *file)
940 {
941         struct i915_ggtt *ggtt = &i915->ggtt;
942         struct drm_device *dev = obj->base.dev;
943         struct drm_mm_node node;
944         uint64_t remain, offset;
945         char __user *user_data;
946         int ret;
947         bool hit_slow_path = false;
948
949         if (obj->tiling_mode != I915_TILING_NONE)
950                 return -EFAULT;
951
952         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
953         if (ret) {
954                 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
955                 if (ret)
956                         goto out;
957
958                 ret = i915_gem_object_get_pages(obj);
959                 if (ret) {
960                         remove_mappable_node(&node);
961                         goto out;
962                 }
963
964                 i915_gem_object_pin_pages(obj);
965         } else {
966                 node.start = i915_gem_obj_ggtt_offset(obj);
967                 node.allocated = false;
968                 ret = i915_gem_object_put_fence(obj);
969                 if (ret)
970                         goto out_unpin;
971         }
972
973         ret = i915_gem_object_set_to_gtt_domain(obj, true);
974         if (ret)
975                 goto out_unpin;
976
977         intel_fb_obj_invalidate(obj, ORIGIN_GTT);
978         obj->dirty = true;
979
980         user_data = u64_to_user_ptr(args->data_ptr);
981         offset = args->offset;
982         remain = args->size;
983         while (remain) {
984                 /* Operation in this page
985                  *
986                  * page_base = page offset within aperture
987                  * page_offset = offset within page
988                  * page_length = bytes to copy for this page
989                  */
990                 u32 page_base = node.start;
991                 unsigned page_offset = offset_in_page(offset);
992                 unsigned page_length = PAGE_SIZE - page_offset;
993                 page_length = remain < page_length ? remain : page_length;
994                 if (node.allocated) {
995                         wmb(); /* flush the write before we modify the GGTT */
996                         ggtt->base.insert_page(&ggtt->base,
997                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
998                                                node.start, I915_CACHE_NONE, 0);
999                         wmb(); /* flush modifications to the GGTT (insert_page) */
1000                 } else {
1001                         page_base += offset & PAGE_MASK;
1002                 }
1003                 /* If we get a fault while copying data, then (presumably) our
1004                  * source page isn't available.  Return the error and we'll
1005                  * retry in the slow path.
1006                  * If the object is non-shmem backed, we retry again with the
1007                  * path that handles page fault.
1008                  */
1009                 if (fast_user_write(ggtt->mappable, page_base,
1010                                     page_offset, user_data, page_length)) {
1011                         hit_slow_path = true;
1012                         mutex_unlock(&dev->struct_mutex);
1013                         if (slow_user_access(ggtt->mappable,
1014                                              page_base,
1015                                              page_offset, user_data,
1016                                              page_length, true)) {
1017                                 ret = -EFAULT;
1018                                 mutex_lock(&dev->struct_mutex);
1019                                 goto out_flush;
1020                         }
1021
1022                         mutex_lock(&dev->struct_mutex);
1023                 }
1024
1025                 remain -= page_length;
1026                 user_data += page_length;
1027                 offset += page_length;
1028         }
1029
1030 out_flush:
1031         if (hit_slow_path) {
1032                 if (ret == 0 &&
1033                     (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1034                         /* The user has modified the object whilst we tried
1035                          * reading from it, and we now have no idea what domain
1036                          * the pages should be in. As we have just been touching
1037                          * them directly, flush everything back to the GTT
1038                          * domain.
1039                          */
1040                         ret = i915_gem_object_set_to_gtt_domain(obj, false);
1041                 }
1042         }
1043
1044         intel_fb_obj_flush(obj, false, ORIGIN_GTT);
1045 out_unpin:
1046         if (node.allocated) {
1047                 wmb();
1048                 ggtt->base.clear_range(&ggtt->base,
1049                                        node.start, node.size,
1050                                        true);
1051                 i915_gem_object_unpin_pages(obj);
1052                 remove_mappable_node(&node);
1053         } else {
1054                 i915_gem_object_ggtt_unpin(obj);
1055         }
1056 out:
1057         return ret;
1058 }
1059
1060 /* Per-page copy function for the shmem pwrite fastpath.
1061  * Flushes invalid cachelines before writing to the target if
1062  * needs_clflush_before is set and flushes out any written cachelines after
1063  * writing if needs_clflush is set. */
1064 static int
1065 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1066                   char __user *user_data,
1067                   bool page_do_bit17_swizzling,
1068                   bool needs_clflush_before,
1069                   bool needs_clflush_after)
1070 {
1071         char *vaddr;
1072         int ret;
1073
1074         if (unlikely(page_do_bit17_swizzling))
1075                 return -EINVAL;
1076
1077         vaddr = kmap_atomic(page);
1078         if (needs_clflush_before)
1079                 drm_clflush_virt_range(vaddr + shmem_page_offset,
1080                                        page_length);
1081         ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1082                                         user_data, page_length);
1083         if (needs_clflush_after)
1084                 drm_clflush_virt_range(vaddr + shmem_page_offset,
1085                                        page_length);
1086         kunmap_atomic(vaddr);
1087
1088         return ret ? -EFAULT : 0;
1089 }
1090
1091 /* Only difference to the fast-path function is that this can handle bit17
1092  * and uses non-atomic copy and kmap functions. */
1093 static int
1094 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1095                   char __user *user_data,
1096                   bool page_do_bit17_swizzling,
1097                   bool needs_clflush_before,
1098                   bool needs_clflush_after)
1099 {
1100         char *vaddr;
1101         int ret;
1102
1103         vaddr = kmap(page);
1104         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1105                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1106                                              page_length,
1107                                              page_do_bit17_swizzling);
1108         if (page_do_bit17_swizzling)
1109                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
1110                                                 user_data,
1111                                                 page_length);
1112         else
1113                 ret = __copy_from_user(vaddr + shmem_page_offset,
1114                                        user_data,
1115                                        page_length);
1116         if (needs_clflush_after)
1117                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1118                                              page_length,
1119                                              page_do_bit17_swizzling);
1120         kunmap(page);
1121
1122         return ret ? -EFAULT : 0;
1123 }
1124
1125 static int
1126 i915_gem_shmem_pwrite(struct drm_device *dev,
1127                       struct drm_i915_gem_object *obj,
1128                       struct drm_i915_gem_pwrite *args,
1129                       struct drm_file *file)
1130 {
1131         ssize_t remain;
1132         loff_t offset;
1133         char __user *user_data;
1134         int shmem_page_offset, page_length, ret = 0;
1135         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
1136         int hit_slowpath = 0;
1137         int needs_clflush_after = 0;
1138         int needs_clflush_before = 0;
1139         struct sg_page_iter sg_iter;
1140
1141         user_data = u64_to_user_ptr(args->data_ptr);
1142         remain = args->size;
1143
1144         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
1145
1146         ret = i915_gem_object_wait_rendering(obj, false);
1147         if (ret)
1148                 return ret;
1149
1150         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1151                 /* If we're not in the cpu write domain, set ourself into the gtt
1152                  * write domain and manually flush cachelines (if required). This
1153                  * optimizes for the case when the gpu will use the data
1154                  * right away and we therefore have to clflush anyway. */
1155                 needs_clflush_after = cpu_write_needs_clflush(obj);
1156         }
1157         /* Same trick applies to invalidate partially written cachelines read
1158          * before writing. */
1159         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
1160                 needs_clflush_before =
1161                         !cpu_cache_is_coherent(dev, obj->cache_level);
1162
1163         ret = i915_gem_object_get_pages(obj);
1164         if (ret)
1165                 return ret;
1166
1167         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1168
1169         i915_gem_object_pin_pages(obj);
1170
1171         offset = args->offset;
1172         obj->dirty = 1;
1173
1174         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1175                          offset >> PAGE_SHIFT) {
1176                 struct page *page = sg_page_iter_page(&sg_iter);
1177                 int partial_cacheline_write;
1178
1179                 if (remain <= 0)
1180                         break;
1181
1182                 /* Operation in this page
1183                  *
1184                  * shmem_page_offset = offset within page in shmem file
1185                  * page_length = bytes to copy for this page
1186                  */
1187                 shmem_page_offset = offset_in_page(offset);
1188
1189                 page_length = remain;
1190                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1191                         page_length = PAGE_SIZE - shmem_page_offset;
1192
1193                 /* If we don't overwrite a cacheline completely we need to be
1194                  * careful to have up-to-date data by first clflushing. Don't
1195                  * overcomplicate things and flush the entire patch. */
1196                 partial_cacheline_write = needs_clflush_before &&
1197                         ((shmem_page_offset | page_length)
1198                                 & (boot_cpu_data.x86_clflush_size - 1));
1199
1200                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1201                         (page_to_phys(page) & (1 << 17)) != 0;
1202
1203                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1204                                         user_data, page_do_bit17_swizzling,
1205                                         partial_cacheline_write,
1206                                         needs_clflush_after);
1207                 if (ret == 0)
1208                         goto next_page;
1209
1210                 hit_slowpath = 1;
1211                 mutex_unlock(&dev->struct_mutex);
1212                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1213                                         user_data, page_do_bit17_swizzling,
1214                                         partial_cacheline_write,
1215                                         needs_clflush_after);
1216
1217                 mutex_lock(&dev->struct_mutex);
1218
1219                 if (ret)
1220                         goto out;
1221
1222 next_page:
1223                 remain -= page_length;
1224                 user_data += page_length;
1225                 offset += page_length;
1226         }
1227
1228 out:
1229         i915_gem_object_unpin_pages(obj);
1230
1231         if (hit_slowpath) {
1232                 /*
1233                  * Fixup: Flush cpu caches in case we didn't flush the dirty
1234                  * cachelines in-line while writing and the object moved
1235                  * out of the cpu write domain while we've dropped the lock.
1236                  */
1237                 if (!needs_clflush_after &&
1238                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1239                         if (i915_gem_clflush_object(obj, obj->pin_display))
1240                                 needs_clflush_after = true;
1241                 }
1242         }
1243
1244         if (needs_clflush_after)
1245                 i915_gem_chipset_flush(to_i915(dev));
1246         else
1247                 obj->cache_dirty = true;
1248
1249         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1250         return ret;
1251 }
1252
1253 /**
1254  * Writes data to the object referenced by handle.
1255  * @dev: drm device
1256  * @data: ioctl data blob
1257  * @file: drm file
1258  *
1259  * On error, the contents of the buffer that were to be modified are undefined.
1260  */
1261 int
1262 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1263                       struct drm_file *file)
1264 {
1265         struct drm_i915_private *dev_priv = to_i915(dev);
1266         struct drm_i915_gem_pwrite *args = data;
1267         struct drm_i915_gem_object *obj;
1268         int ret;
1269
1270         if (args->size == 0)
1271                 return 0;
1272
1273         if (!access_ok(VERIFY_READ,
1274                        u64_to_user_ptr(args->data_ptr),
1275                        args->size))
1276                 return -EFAULT;
1277
1278         if (likely(!i915.prefault_disable)) {
1279                 ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
1280                                                    args->size);
1281                 if (ret)
1282                         return -EFAULT;
1283         }
1284
1285         intel_runtime_pm_get(dev_priv);
1286
1287         ret = i915_mutex_lock_interruptible(dev);
1288         if (ret)
1289                 goto put_rpm;
1290
1291         obj = i915_gem_object_lookup(file, args->handle);
1292         if (!obj) {
1293                 ret = -ENOENT;
1294                 goto unlock;
1295         }
1296
1297         /* Bounds check destination. */
1298         if (args->offset > obj->base.size ||
1299             args->size > obj->base.size - args->offset) {
1300                 ret = -EINVAL;
1301                 goto out;
1302         }
1303
1304         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1305
1306         ret = -EFAULT;
1307         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1308          * it would end up going through the fenced access, and we'll get
1309          * different detiling behavior between reading and writing.
1310          * pread/pwrite currently are reading and writing from the CPU
1311          * perspective, requiring manual detiling by the client.
1312          */
1313         if (!i915_gem_object_has_struct_page(obj) ||
1314             cpu_write_needs_clflush(obj)) {
1315                 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1316                 /* Note that the gtt paths might fail with non-page-backed user
1317                  * pointers (e.g. gtt mappings when moving data between
1318                  * textures). Fallback to the shmem path in that case. */
1319         }
1320
1321         if (ret == -EFAULT || ret == -ENOSPC) {
1322                 if (obj->phys_handle)
1323                         ret = i915_gem_phys_pwrite(obj, args, file);
1324                 else if (i915_gem_object_has_struct_page(obj))
1325                         ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1326                 else
1327                         ret = -ENODEV;
1328         }
1329
1330 out:
1331         i915_gem_object_put(obj);
1332 unlock:
1333         mutex_unlock(&dev->struct_mutex);
1334 put_rpm:
1335         intel_runtime_pm_put(dev_priv);
1336
1337         return ret;
1338 }
1339
1340 /**
1341  * Ensures that all rendering to the object has completed and the object is
1342  * safe to unbind from the GTT or access from the CPU.
1343  * @obj: i915 gem object
1344  * @readonly: waiting for read access or write
1345  */
1346 int
1347 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1348                                bool readonly)
1349 {
1350         struct reservation_object *resv;
1351         struct i915_gem_active *active;
1352         unsigned long active_mask;
1353         int idx, ret;
1354
1355         lockdep_assert_held(&obj->base.dev->struct_mutex);
1356
1357         if (!readonly) {
1358                 active = obj->last_read;
1359                 active_mask = obj->active;
1360         } else {
1361                 active_mask = 1;
1362                 active = &obj->last_write;
1363         }
1364
1365         for_each_active(active_mask, idx) {
1366                 ret = i915_gem_active_wait(&active[idx],
1367                                            &obj->base.dev->struct_mutex);
1368                 if (ret)
1369                         return ret;
1370         }
1371
1372         resv = i915_gem_object_get_dmabuf_resv(obj);
1373         if (resv) {
1374                 long err;
1375
1376                 err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
1377                                                           MAX_SCHEDULE_TIMEOUT);
1378                 if (err < 0)
1379                         return err;
1380         }
1381
1382         return 0;
1383 }
1384
1385 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1386  * as the object state may change during this call.
1387  */
1388 static __must_check int
1389 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1390                                             struct intel_rps_client *rps,
1391                                             bool readonly)
1392 {
1393         struct drm_device *dev = obj->base.dev;
1394         struct drm_i915_private *dev_priv = to_i915(dev);
1395         struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
1396         struct i915_gem_active *active;
1397         unsigned long active_mask;
1398         int ret, i, n = 0;
1399
1400         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1401         BUG_ON(!dev_priv->mm.interruptible);
1402
1403         active_mask = obj->active;
1404         if (!active_mask)
1405                 return 0;
1406
1407         if (!readonly) {
1408                 active = obj->last_read;
1409         } else {
1410                 active_mask = 1;
1411                 active = &obj->last_write;
1412         }
1413
1414         for_each_active(active_mask, i) {
1415                 struct drm_i915_gem_request *req;
1416
1417                 req = i915_gem_active_get(&active[i],
1418                                           &obj->base.dev->struct_mutex);
1419                 if (req)
1420                         requests[n++] = req;
1421         }
1422
1423         mutex_unlock(&dev->struct_mutex);
1424         ret = 0;
1425         for (i = 0; ret == 0 && i < n; i++)
1426                 ret = i915_wait_request(requests[i], true, NULL, rps);
1427         mutex_lock(&dev->struct_mutex);
1428
1429         for (i = 0; i < n; i++)
1430                 i915_gem_request_put(requests[i]);
1431
1432         return ret;
1433 }
1434
1435 static struct intel_rps_client *to_rps_client(struct drm_file *file)
1436 {
1437         struct drm_i915_file_private *fpriv = file->driver_priv;
1438         return &fpriv->rps;
1439 }
1440
1441 static enum fb_op_origin
1442 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1443 {
1444         return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
1445                ORIGIN_GTT : ORIGIN_CPU;
1446 }
1447
1448 /**
1449  * Called when user space prepares to use an object with the CPU, either
1450  * through the mmap ioctl's mapping or a GTT mapping.
1451  * @dev: drm device
1452  * @data: ioctl data blob
1453  * @file: drm file
1454  */
1455 int
1456 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1457                           struct drm_file *file)
1458 {
1459         struct drm_i915_gem_set_domain *args = data;
1460         struct drm_i915_gem_object *obj;
1461         uint32_t read_domains = args->read_domains;
1462         uint32_t write_domain = args->write_domain;
1463         int ret;
1464
1465         /* Only handle setting domains to types used by the CPU. */
1466         if (write_domain & I915_GEM_GPU_DOMAINS)
1467                 return -EINVAL;
1468
1469         if (read_domains & I915_GEM_GPU_DOMAINS)
1470                 return -EINVAL;
1471
1472         /* Having something in the write domain implies it's in the read
1473          * domain, and only that read domain.  Enforce that in the request.
1474          */
1475         if (write_domain != 0 && read_domains != write_domain)
1476                 return -EINVAL;
1477
1478         ret = i915_mutex_lock_interruptible(dev);
1479         if (ret)
1480                 return ret;
1481
1482         obj = i915_gem_object_lookup(file, args->handle);
1483         if (!obj) {
1484                 ret = -ENOENT;
1485                 goto unlock;
1486         }
1487
1488         /* Try to flush the object off the GPU without holding the lock.
1489          * We will repeat the flush holding the lock in the normal manner
1490          * to catch cases where we are gazumped.
1491          */
1492         ret = i915_gem_object_wait_rendering__nonblocking(obj,
1493                                                           to_rps_client(file),
1494                                                           !write_domain);
1495         if (ret)
1496                 goto unref;
1497
1498         if (read_domains & I915_GEM_DOMAIN_GTT)
1499                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1500         else
1501                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1502
1503         if (write_domain != 0)
1504                 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1505
1506 unref:
1507         i915_gem_object_put(obj);
1508 unlock:
1509         mutex_unlock(&dev->struct_mutex);
1510         return ret;
1511 }
1512
1513 /**
1514  * Called when user space has done writes to this buffer
1515  * @dev: drm device
1516  * @data: ioctl data blob
1517  * @file: drm file
1518  */
1519 int
1520 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1521                          struct drm_file *file)
1522 {
1523         struct drm_i915_gem_sw_finish *args = data;
1524         struct drm_i915_gem_object *obj;
1525         int ret = 0;
1526
1527         ret = i915_mutex_lock_interruptible(dev);
1528         if (ret)
1529                 return ret;
1530
1531         obj = i915_gem_object_lookup(file, args->handle);
1532         if (!obj) {
1533                 ret = -ENOENT;
1534                 goto unlock;
1535         }
1536
1537         /* Pinned buffers may be scanout, so flush the cache */
1538         if (obj->pin_display)
1539                 i915_gem_object_flush_cpu_write_domain(obj);
1540
1541         i915_gem_object_put(obj);
1542 unlock:
1543         mutex_unlock(&dev->struct_mutex);
1544         return ret;
1545 }
1546
1547 /**
1548  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1549  *                       it is mapped to.
1550  * @dev: drm device
1551  * @data: ioctl data blob
1552  * @file: drm file
1553  *
1554  * While the mapping holds a reference on the contents of the object, it doesn't
1555  * imply a ref on the object itself.
1556  *
1557  * IMPORTANT:
1558  *
1559  * DRM driver writers who look a this function as an example for how to do GEM
1560  * mmap support, please don't implement mmap support like here. The modern way
1561  * to implement DRM mmap support is with an mmap offset ioctl (like
1562  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1563  * That way debug tooling like valgrind will understand what's going on, hiding
1564  * the mmap call in a driver private ioctl will break that. The i915 driver only
1565  * does cpu mmaps this way because we didn't know better.
1566  */
1567 int
1568 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1569                     struct drm_file *file)
1570 {
1571         struct drm_i915_gem_mmap *args = data;
1572         struct drm_i915_gem_object *obj;
1573         unsigned long addr;
1574
1575         if (args->flags & ~(I915_MMAP_WC))
1576                 return -EINVAL;
1577
1578         if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1579                 return -ENODEV;
1580
1581         obj = i915_gem_object_lookup(file, args->handle);
1582         if (!obj)
1583                 return -ENOENT;
1584
1585         /* prime objects have no backing filp to GEM mmap
1586          * pages from.
1587          */
1588         if (!obj->base.filp) {
1589                 i915_gem_object_put_unlocked(obj);
1590                 return -EINVAL;
1591         }
1592
1593         addr = vm_mmap(obj->base.filp, 0, args->size,
1594                        PROT_READ | PROT_WRITE, MAP_SHARED,
1595                        args->offset);
1596         if (args->flags & I915_MMAP_WC) {
1597                 struct mm_struct *mm = current->mm;
1598                 struct vm_area_struct *vma;
1599
1600                 if (down_write_killable(&mm->mmap_sem)) {
1601                         i915_gem_object_put_unlocked(obj);
1602                         return -EINTR;
1603                 }
1604                 vma = find_vma(mm, addr);
1605                 if (vma)
1606                         vma->vm_page_prot =
1607                                 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1608                 else
1609                         addr = -ENOMEM;
1610                 up_write(&mm->mmap_sem);
1611
1612                 /* This may race, but that's ok, it only gets set */
1613                 WRITE_ONCE(obj->has_wc_mmap, true);
1614         }
1615         i915_gem_object_put_unlocked(obj);
1616         if (IS_ERR((void *)addr))
1617                 return addr;
1618
1619         args->addr_ptr = (uint64_t) addr;
1620
1621         return 0;
1622 }
1623
1624 /**
1625  * i915_gem_fault - fault a page into the GTT
1626  * @vma: VMA in question
1627  * @vmf: fault info
1628  *
1629  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1630  * from userspace.  The fault handler takes care of binding the object to
1631  * the GTT (if needed), allocating and programming a fence register (again,
1632  * only if needed based on whether the old reg is still valid or the object
1633  * is tiled) and inserting a new PTE into the faulting process.
1634  *
1635  * Note that the faulting process may involve evicting existing objects
1636  * from the GTT and/or fence registers to make room.  So performance may
1637  * suffer if the GTT working set is large or there are few fence registers
1638  * left.
1639  */
1640 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1641 {
1642         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1643         struct drm_device *dev = obj->base.dev;
1644         struct drm_i915_private *dev_priv = to_i915(dev);
1645         struct i915_ggtt *ggtt = &dev_priv->ggtt;
1646         struct i915_ggtt_view view = i915_ggtt_view_normal;
1647         pgoff_t page_offset;
1648         unsigned long pfn;
1649         int ret = 0;
1650         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1651
1652         intel_runtime_pm_get(dev_priv);
1653
1654         /* We don't use vmf->pgoff since that has the fake offset */
1655         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1656                 PAGE_SHIFT;
1657
1658         ret = i915_mutex_lock_interruptible(dev);
1659         if (ret)
1660                 goto out;
1661
1662         trace_i915_gem_object_fault(obj, page_offset, true, write);
1663
1664         /* Try to flush the object off the GPU first without holding the lock.
1665          * Upon reacquiring the lock, we will perform our sanity checks and then
1666          * repeat the flush holding the lock in the normal manner to catch cases
1667          * where we are gazumped.
1668          */
1669         ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1670         if (ret)
1671                 goto unlock;
1672
1673         /* Access to snoopable pages through the GTT is incoherent. */
1674         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1675                 ret = -EFAULT;
1676                 goto unlock;
1677         }
1678
1679         /* Use a partial view if the object is bigger than the aperture. */
1680         if (obj->base.size >= ggtt->mappable_end &&
1681             obj->tiling_mode == I915_TILING_NONE) {
1682                 static const unsigned int chunk_size = 256; // 1 MiB
1683
1684                 memset(&view, 0, sizeof(view));
1685                 view.type = I915_GGTT_VIEW_PARTIAL;
1686                 view.params.partial.offset = rounddown(page_offset, chunk_size);
1687                 view.params.partial.size =
1688                         min_t(unsigned int,
1689                               chunk_size,
1690                               (vma->vm_end - vma->vm_start)/PAGE_SIZE -
1691                               view.params.partial.offset);
1692         }
1693
1694         /* Now pin it into the GTT if needed */
1695         ret = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1696         if (ret)
1697                 goto unlock;
1698
1699         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1700         if (ret)
1701                 goto unpin;
1702
1703         ret = i915_gem_object_get_fence(obj);
1704         if (ret)
1705                 goto unpin;
1706
1707         /* Finally, remap it using the new GTT offset */
1708         pfn = ggtt->mappable_base +
1709                 i915_gem_obj_ggtt_offset_view(obj, &view);
1710         pfn >>= PAGE_SHIFT;
1711
1712         if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
1713                 /* Overriding existing pages in partial view does not cause
1714                  * us any trouble as TLBs are still valid because the fault
1715                  * is due to userspace losing part of the mapping or never
1716                  * having accessed it before (at this partials' range).
1717                  */
1718                 unsigned long base = vma->vm_start +
1719                                      (view.params.partial.offset << PAGE_SHIFT);
1720                 unsigned int i;
1721
1722                 for (i = 0; i < view.params.partial.size; i++) {
1723                         ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
1724                         if (ret)
1725                                 break;
1726                 }
1727
1728                 obj->fault_mappable = true;
1729         } else {
1730                 if (!obj->fault_mappable) {
1731                         unsigned long size = min_t(unsigned long,
1732                                                    vma->vm_end - vma->vm_start,
1733                                                    obj->base.size);
1734                         int i;
1735
1736                         for (i = 0; i < size >> PAGE_SHIFT; i++) {
1737                                 ret = vm_insert_pfn(vma,
1738                                                     (unsigned long)vma->vm_start + i * PAGE_SIZE,
1739                                                     pfn + i);
1740                                 if (ret)
1741                                         break;
1742                         }
1743
1744                         obj->fault_mappable = true;
1745                 } else
1746                         ret = vm_insert_pfn(vma,
1747                                             (unsigned long)vmf->virtual_address,
1748                                             pfn + page_offset);
1749         }
1750 unpin:
1751         i915_gem_object_ggtt_unpin_view(obj, &view);
1752 unlock:
1753         mutex_unlock(&dev->struct_mutex);
1754 out:
1755         switch (ret) {
1756         case -EIO:
1757                 /*
1758                  * We eat errors when the gpu is terminally wedged to avoid
1759                  * userspace unduly crashing (gl has no provisions for mmaps to
1760                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
1761                  * and so needs to be reported.
1762                  */
1763                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1764                         ret = VM_FAULT_SIGBUS;
1765                         break;
1766                 }
1767         case -EAGAIN:
1768                 /*
1769                  * EAGAIN means the gpu is hung and we'll wait for the error
1770                  * handler to reset everything when re-faulting in
1771                  * i915_mutex_lock_interruptible.
1772                  */
1773         case 0:
1774         case -ERESTARTSYS:
1775         case -EINTR:
1776         case -EBUSY:
1777                 /*
1778                  * EBUSY is ok: this just means that another thread
1779                  * already did the job.
1780                  */
1781                 ret = VM_FAULT_NOPAGE;
1782                 break;
1783         case -ENOMEM:
1784                 ret = VM_FAULT_OOM;
1785                 break;
1786         case -ENOSPC:
1787         case -EFAULT:
1788                 ret = VM_FAULT_SIGBUS;
1789                 break;
1790         default:
1791                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1792                 ret = VM_FAULT_SIGBUS;
1793                 break;
1794         }
1795
1796         intel_runtime_pm_put(dev_priv);
1797         return ret;
1798 }
1799
1800 /**
1801  * i915_gem_release_mmap - remove physical page mappings
1802  * @obj: obj in question
1803  *
1804  * Preserve the reservation of the mmapping with the DRM core code, but
1805  * relinquish ownership of the pages back to the system.
1806  *
1807  * It is vital that we remove the page mapping if we have mapped a tiled
1808  * object through the GTT and then lose the fence register due to
1809  * resource pressure. Similarly if the object has been moved out of the
1810  * aperture, than pages mapped into userspace must be revoked. Removing the
1811  * mapping will then trigger a page fault on the next user access, allowing
1812  * fixup by i915_gem_fault().
1813  */
1814 void
1815 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1816 {
1817         /* Serialisation between user GTT access and our code depends upon
1818          * revoking the CPU's PTE whilst the mutex is held. The next user
1819          * pagefault then has to wait until we release the mutex.
1820          */
1821         lockdep_assert_held(&obj->base.dev->struct_mutex);
1822
1823         if (!obj->fault_mappable)
1824                 return;
1825
1826         drm_vma_node_unmap(&obj->base.vma_node,
1827                            obj->base.dev->anon_inode->i_mapping);
1828
1829         /* Ensure that the CPU's PTE are revoked and there are not outstanding
1830          * memory transactions from userspace before we return. The TLB
1831          * flushing implied above by changing the PTE above *should* be
1832          * sufficient, an extra barrier here just provides us with a bit
1833          * of paranoid documentation about our requirement to serialise
1834          * memory writes before touching registers / GSM.
1835          */
1836         wmb();
1837
1838         obj->fault_mappable = false;
1839 }
1840
1841 void
1842 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1843 {
1844         struct drm_i915_gem_object *obj;
1845
1846         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1847                 i915_gem_release_mmap(obj);
1848 }
1849
1850 /**
1851  * i915_gem_get_ggtt_size - return required global GTT size for an object
1852  * @dev_priv: i915 device
1853  * @size: object size
1854  * @tiling_mode: tiling mode
1855  *
1856  * Return the required global GTT size for an object, taking into account
1857  * potential fence register mapping.
1858  */
1859 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
1860                            u64 size, int tiling_mode)
1861 {
1862         u64 ggtt_size;
1863
1864         GEM_BUG_ON(size == 0);
1865
1866         if (INTEL_GEN(dev_priv) >= 4 ||
1867             tiling_mode == I915_TILING_NONE)
1868                 return size;
1869
1870         /* Previous chips need a power-of-two fence region when tiling */
1871         if (IS_GEN3(dev_priv))
1872                 ggtt_size = 1024*1024;
1873         else
1874                 ggtt_size = 512*1024;
1875
1876         while (ggtt_size < size)
1877                 ggtt_size <<= 1;
1878
1879         return ggtt_size;
1880 }
1881
1882 /**
1883  * i915_gem_get_ggtt_alignment - return required global GTT alignment
1884  * @dev_priv: i915 device
1885  * @size: object size
1886  * @tiling_mode: tiling mode
1887  * @fenced: is fenced alignment required or not
1888  *
1889  * Return the required global GTT alignment for an object, taking into account
1890  * potential fence register mapping.
1891  */
1892 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
1893                                 int tiling_mode, bool fenced)
1894 {
1895         GEM_BUG_ON(size == 0);
1896
1897         /*
1898          * Minimum alignment is 4k (GTT page size), but might be greater
1899          * if a fence register is needed for the object.
1900          */
1901         if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
1902             tiling_mode == I915_TILING_NONE)
1903                 return 4096;
1904
1905         /*
1906          * Previous chips need to be aligned to the size of the smallest
1907          * fence register that can contain the object.
1908          */
1909         return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
1910 }
1911
1912 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1913 {
1914         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1915         int ret;
1916
1917         dev_priv->mm.shrinker_no_lock_stealing = true;
1918
1919         ret = drm_gem_create_mmap_offset(&obj->base);
1920         if (ret != -ENOSPC)
1921                 goto out;
1922
1923         /* Badly fragmented mmap space? The only way we can recover
1924          * space is by destroying unwanted objects. We can't randomly release
1925          * mmap_offsets as userspace expects them to be persistent for the
1926          * lifetime of the objects. The closest we can is to release the
1927          * offsets on purgeable objects by truncating it and marking it purged,
1928          * which prevents userspace from ever using that object again.
1929          */
1930         i915_gem_shrink(dev_priv,
1931                         obj->base.size >> PAGE_SHIFT,
1932                         I915_SHRINK_BOUND |
1933                         I915_SHRINK_UNBOUND |
1934                         I915_SHRINK_PURGEABLE);
1935         ret = drm_gem_create_mmap_offset(&obj->base);
1936         if (ret != -ENOSPC)
1937                 goto out;
1938
1939         i915_gem_shrink_all(dev_priv);
1940         ret = drm_gem_create_mmap_offset(&obj->base);
1941 out:
1942         dev_priv->mm.shrinker_no_lock_stealing = false;
1943
1944         return ret;
1945 }
1946
1947 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1948 {
1949         drm_gem_free_mmap_offset(&obj->base);
1950 }
1951
1952 int
1953 i915_gem_mmap_gtt(struct drm_file *file,
1954                   struct drm_device *dev,
1955                   uint32_t handle,
1956                   uint64_t *offset)
1957 {
1958         struct drm_i915_gem_object *obj;
1959         int ret;
1960
1961         ret = i915_mutex_lock_interruptible(dev);
1962         if (ret)
1963                 return ret;
1964
1965         obj = i915_gem_object_lookup(file, handle);
1966         if (!obj) {
1967                 ret = -ENOENT;
1968                 goto unlock;
1969         }
1970
1971         if (obj->madv != I915_MADV_WILLNEED) {
1972                 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1973                 ret = -EFAULT;
1974                 goto out;
1975         }
1976
1977         ret = i915_gem_object_create_mmap_offset(obj);
1978         if (ret)
1979                 goto out;
1980
1981         *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1982
1983 out:
1984         i915_gem_object_put(obj);
1985 unlock:
1986         mutex_unlock(&dev->struct_mutex);
1987         return ret;
1988 }
1989
1990 /**
1991  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1992  * @dev: DRM device
1993  * @data: GTT mapping ioctl data
1994  * @file: GEM object info
1995  *
1996  * Simply returns the fake offset to userspace so it can mmap it.
1997  * The mmap call will end up in drm_gem_mmap(), which will set things
1998  * up so we can get faults in the handler above.
1999  *
2000  * The fault handler will take care of binding the object into the GTT
2001  * (since it may have been evicted to make room for something), allocating
2002  * a fence register, and mapping the appropriate aperture address into
2003  * userspace.
2004  */
2005 int
2006 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2007                         struct drm_file *file)
2008 {
2009         struct drm_i915_gem_mmap_gtt *args = data;
2010
2011         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2012 }
2013
2014 /* Immediately discard the backing storage */
2015 static void
2016 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2017 {
2018         i915_gem_object_free_mmap_offset(obj);
2019
2020         if (obj->base.filp == NULL)
2021                 return;
2022
2023         /* Our goal here is to return as much of the memory as
2024          * is possible back to the system as we are called from OOM.
2025          * To do this we must instruct the shmfs to drop all of its
2026          * backing pages, *now*.
2027          */
2028         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2029         obj->madv = __I915_MADV_PURGED;
2030 }
2031
2032 /* Try to discard unwanted pages */
2033 static void
2034 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2035 {
2036         struct address_space *mapping;
2037
2038         switch (obj->madv) {
2039         case I915_MADV_DONTNEED:
2040                 i915_gem_object_truncate(obj);
2041         case __I915_MADV_PURGED:
2042                 return;
2043         }
2044
2045         if (obj->base.filp == NULL)
2046                 return;
2047
2048         mapping = file_inode(obj->base.filp)->i_mapping,
2049         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2050 }
2051
2052 static void
2053 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2054 {
2055         struct sgt_iter sgt_iter;
2056         struct page *page;
2057         int ret;
2058
2059         BUG_ON(obj->madv == __I915_MADV_PURGED);
2060
2061         ret = i915_gem_object_set_to_cpu_domain(obj, true);
2062         if (WARN_ON(ret)) {
2063                 /* In the event of a disaster, abandon all caches and
2064                  * hope for the best.
2065                  */
2066                 i915_gem_clflush_object(obj, true);
2067                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2068         }
2069
2070         i915_gem_gtt_finish_object(obj);
2071
2072         if (i915_gem_object_needs_bit17_swizzle(obj))
2073                 i915_gem_object_save_bit_17_swizzle(obj);
2074
2075         if (obj->madv == I915_MADV_DONTNEED)
2076                 obj->dirty = 0;
2077
2078         for_each_sgt_page(page, sgt_iter, obj->pages) {
2079                 if (obj->dirty)
2080                         set_page_dirty(page);
2081
2082                 if (obj->madv == I915_MADV_WILLNEED)
2083                         mark_page_accessed(page);
2084
2085                 put_page(page);
2086         }
2087         obj->dirty = 0;
2088
2089         sg_free_table(obj->pages);
2090         kfree(obj->pages);
2091 }
2092
2093 int
2094 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2095 {
2096         const struct drm_i915_gem_object_ops *ops = obj->ops;
2097
2098         if (obj->pages == NULL)
2099                 return 0;
2100
2101         if (obj->pages_pin_count)
2102                 return -EBUSY;
2103
2104         GEM_BUG_ON(obj->bind_count);
2105
2106         /* ->put_pages might need to allocate memory for the bit17 swizzle
2107          * array, hence protect them from being reaped by removing them from gtt
2108          * lists early. */
2109         list_del(&obj->global_list);
2110
2111         if (obj->mapping) {
2112                 if (is_vmalloc_addr(obj->mapping))
2113                         vunmap(obj->mapping);
2114                 else
2115                         kunmap(kmap_to_page(obj->mapping));
2116                 obj->mapping = NULL;
2117         }
2118
2119         ops->put_pages(obj);
2120         obj->pages = NULL;
2121
2122         i915_gem_object_invalidate(obj);
2123
2124         return 0;
2125 }
2126
2127 static int
2128 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2129 {
2130         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2131         int page_count, i;
2132         struct address_space *mapping;
2133         struct sg_table *st;
2134         struct scatterlist *sg;
2135         struct sgt_iter sgt_iter;
2136         struct page *page;
2137         unsigned long last_pfn = 0;     /* suppress gcc warning */
2138         int ret;
2139         gfp_t gfp;
2140
2141         /* Assert that the object is not currently in any GPU domain. As it
2142          * wasn't in the GTT, there shouldn't be any way it could have been in
2143          * a GPU cache
2144          */
2145         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2146         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2147
2148         st = kmalloc(sizeof(*st), GFP_KERNEL);
2149         if (st == NULL)
2150                 return -ENOMEM;
2151
2152         page_count = obj->base.size / PAGE_SIZE;
2153         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2154                 kfree(st);
2155                 return -ENOMEM;
2156         }
2157
2158         /* Get the list of pages out of our struct file.  They'll be pinned
2159          * at this point until we release them.
2160          *
2161          * Fail silently without starting the shrinker
2162          */
2163         mapping = file_inode(obj->base.filp)->i_mapping;
2164         gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2165         gfp |= __GFP_NORETRY | __GFP_NOWARN;
2166         sg = st->sgl;
2167         st->nents = 0;
2168         for (i = 0; i < page_count; i++) {
2169                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2170                 if (IS_ERR(page)) {
2171                         i915_gem_shrink(dev_priv,
2172                                         page_count,
2173                                         I915_SHRINK_BOUND |
2174                                         I915_SHRINK_UNBOUND |
2175                                         I915_SHRINK_PURGEABLE);
2176                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2177                 }
2178                 if (IS_ERR(page)) {
2179                         /* We've tried hard to allocate the memory by reaping
2180                          * our own buffer, now let the real VM do its job and
2181                          * go down in flames if truly OOM.
2182                          */
2183                         i915_gem_shrink_all(dev_priv);
2184                         page = shmem_read_mapping_page(mapping, i);
2185                         if (IS_ERR(page)) {
2186                                 ret = PTR_ERR(page);
2187                                 goto err_pages;
2188                         }
2189                 }
2190 #ifdef CONFIG_SWIOTLB
2191                 if (swiotlb_nr_tbl()) {
2192                         st->nents++;
2193                         sg_set_page(sg, page, PAGE_SIZE, 0);
2194                         sg = sg_next(sg);
2195                         continue;
2196                 }
2197 #endif
2198                 if (!i || page_to_pfn(page) != last_pfn + 1) {
2199                         if (i)
2200                                 sg = sg_next(sg);
2201                         st->nents++;
2202                         sg_set_page(sg, page, PAGE_SIZE, 0);
2203                 } else {
2204                         sg->length += PAGE_SIZE;
2205                 }
2206                 last_pfn = page_to_pfn(page);
2207
2208                 /* Check that the i965g/gm workaround works. */
2209                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2210         }
2211 #ifdef CONFIG_SWIOTLB
2212         if (!swiotlb_nr_tbl())
2213 #endif
2214                 sg_mark_end(sg);
2215         obj->pages = st;
2216
2217         ret = i915_gem_gtt_prepare_object(obj);
2218         if (ret)
2219                 goto err_pages;
2220
2221         if (i915_gem_object_needs_bit17_swizzle(obj))
2222                 i915_gem_object_do_bit_17_swizzle(obj);
2223
2224         if (obj->tiling_mode != I915_TILING_NONE &&
2225             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2226                 i915_gem_object_pin_pages(obj);
2227
2228         return 0;
2229
2230 err_pages:
2231         sg_mark_end(sg);
2232         for_each_sgt_page(page, sgt_iter, st)
2233                 put_page(page);
2234         sg_free_table(st);
2235         kfree(st);
2236
2237         /* shmemfs first checks if there is enough memory to allocate the page
2238          * and reports ENOSPC should there be insufficient, along with the usual
2239          * ENOMEM for a genuine allocation failure.
2240          *
2241          * We use ENOSPC in our driver to mean that we have run out of aperture
2242          * space and so want to translate the error from shmemfs back to our
2243          * usual understanding of ENOMEM.
2244          */
2245         if (ret == -ENOSPC)
2246                 ret = -ENOMEM;
2247
2248         return ret;
2249 }
2250
2251 /* Ensure that the associated pages are gathered from the backing storage
2252  * and pinned into our object. i915_gem_object_get_pages() may be called
2253  * multiple times before they are released by a single call to
2254  * i915_gem_object_put_pages() - once the pages are no longer referenced
2255  * either as a result of memory pressure (reaping pages under the shrinker)
2256  * or as the object is itself released.
2257  */
2258 int
2259 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2260 {
2261         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2262         const struct drm_i915_gem_object_ops *ops = obj->ops;
2263         int ret;
2264
2265         if (obj->pages)
2266                 return 0;
2267
2268         if (obj->madv != I915_MADV_WILLNEED) {
2269                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2270                 return -EFAULT;
2271         }
2272
2273         BUG_ON(obj->pages_pin_count);
2274
2275         ret = ops->get_pages(obj);
2276         if (ret)
2277                 return ret;
2278
2279         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2280
2281         obj->get_page.sg = obj->pages->sgl;
2282         obj->get_page.last = 0;
2283
2284         return 0;
2285 }
2286
2287 /* The 'mapping' part of i915_gem_object_pin_map() below */
2288 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2289 {
2290         unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2291         struct sg_table *sgt = obj->pages;
2292         struct sgt_iter sgt_iter;
2293         struct page *page;
2294         struct page *stack_pages[32];
2295         struct page **pages = stack_pages;
2296         unsigned long i = 0;
2297         void *addr;
2298
2299         /* A single page can always be kmapped */
2300         if (n_pages == 1)
2301                 return kmap(sg_page(sgt->sgl));
2302
2303         if (n_pages > ARRAY_SIZE(stack_pages)) {
2304                 /* Too big for stack -- allocate temporary array instead */
2305                 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2306                 if (!pages)
2307                         return NULL;
2308         }
2309
2310         for_each_sgt_page(page, sgt_iter, sgt)
2311                 pages[i++] = page;
2312
2313         /* Check that we have the expected number of pages */
2314         GEM_BUG_ON(i != n_pages);
2315
2316         addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
2317
2318         if (pages != stack_pages)
2319                 drm_free_large(pages);
2320
2321         return addr;
2322 }
2323
2324 /* get, pin, and map the pages of the object into kernel space */
2325 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2326 {
2327         int ret;
2328
2329         lockdep_assert_held(&obj->base.dev->struct_mutex);
2330
2331         ret = i915_gem_object_get_pages(obj);
2332         if (ret)
2333                 return ERR_PTR(ret);
2334
2335         i915_gem_object_pin_pages(obj);
2336
2337         if (!obj->mapping) {
2338                 obj->mapping = i915_gem_object_map(obj);
2339                 if (!obj->mapping) {
2340                         i915_gem_object_unpin_pages(obj);
2341                         return ERR_PTR(-ENOMEM);
2342                 }
2343         }
2344
2345         return obj->mapping;
2346 }
2347
2348 static void
2349 i915_gem_object_retire__write(struct i915_gem_active *active,
2350                               struct drm_i915_gem_request *request)
2351 {
2352         struct drm_i915_gem_object *obj =
2353                 container_of(active, struct drm_i915_gem_object, last_write);
2354
2355         intel_fb_obj_flush(obj, true, ORIGIN_CS);
2356 }
2357
2358 static void
2359 i915_gem_object_retire__read(struct i915_gem_active *active,
2360                              struct drm_i915_gem_request *request)
2361 {
2362         int idx = request->engine->id;
2363         struct drm_i915_gem_object *obj =
2364                 container_of(active, struct drm_i915_gem_object, last_read[idx]);
2365
2366         GEM_BUG_ON((obj->active & (1 << idx)) == 0);
2367
2368         obj->active &= ~(1 << idx);
2369         if (obj->active)
2370                 return;
2371
2372         /* Bump our place on the bound list to keep it roughly in LRU order
2373          * so that we don't steal from recently used but inactive objects
2374          * (unless we are forced to ofc!)
2375          */
2376         if (obj->bind_count)
2377                 list_move_tail(&obj->global_list,
2378                                &request->i915->mm.bound_list);
2379
2380         i915_gem_object_put(obj);
2381 }
2382
2383 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2384 {
2385         unsigned long elapsed;
2386
2387         if (ctx->hang_stats.banned)
2388                 return true;
2389
2390         elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2391         if (ctx->hang_stats.ban_period_seconds &&
2392             elapsed <= ctx->hang_stats.ban_period_seconds) {
2393                 DRM_DEBUG("context hanging too fast, banning!\n");
2394                 return true;
2395         }
2396
2397         return false;
2398 }
2399
2400 static void i915_set_reset_status(struct i915_gem_context *ctx,
2401                                   const bool guilty)
2402 {
2403         struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2404
2405         if (guilty) {
2406                 hs->banned = i915_context_is_banned(ctx);
2407                 hs->batch_active++;
2408                 hs->guilty_ts = get_seconds();
2409         } else {
2410                 hs->batch_pending++;
2411         }
2412 }
2413
2414 struct drm_i915_gem_request *
2415 i915_gem_find_active_request(struct intel_engine_cs *engine)
2416 {
2417         struct drm_i915_gem_request *request;
2418
2419         /* We are called by the error capture and reset at a random
2420          * point in time. In particular, note that neither is crucially
2421          * ordered with an interrupt. After a hang, the GPU is dead and we
2422          * assume that no more writes can happen (we waited long enough for
2423          * all writes that were in transaction to be flushed) - adding an
2424          * extra delay for a recent interrupt is pointless. Hence, we do
2425          * not need an engine->irq_seqno_barrier() before the seqno reads.
2426          */
2427         list_for_each_entry(request, &engine->request_list, link) {
2428                 if (i915_gem_request_completed(request))
2429                         continue;
2430
2431                 return request;
2432         }
2433
2434         return NULL;
2435 }
2436
2437 static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
2438 {
2439         struct drm_i915_gem_request *request;
2440         bool ring_hung;
2441
2442         request = i915_gem_find_active_request(engine);
2443         if (request == NULL)
2444                 return;
2445
2446         ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2447
2448         i915_set_reset_status(request->ctx, ring_hung);
2449         list_for_each_entry_continue(request, &engine->request_list, link)
2450                 i915_set_reset_status(request->ctx, false);
2451 }
2452
2453 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
2454 {
2455         struct intel_ring *ring;
2456
2457         /* Mark all pending requests as complete so that any concurrent
2458          * (lockless) lookup doesn't try and wait upon the request as we
2459          * reset it.
2460          */
2461         intel_engine_init_seqno(engine, engine->last_submitted_seqno);
2462
2463         /*
2464          * Clear the execlists queue up before freeing the requests, as those
2465          * are the ones that keep the context and ringbuffer backing objects
2466          * pinned in place.
2467          */
2468
2469         if (i915.enable_execlists) {
2470                 /* Ensure irq handler finishes or is cancelled. */
2471                 tasklet_kill(&engine->irq_tasklet);
2472
2473                 intel_execlists_cancel_requests(engine);
2474         }
2475
2476         /*
2477          * We must free the requests after all the corresponding objects have
2478          * been moved off active lists. Which is the same order as the normal
2479          * retire_requests function does. This is important if object hold
2480          * implicit references on things like e.g. ppgtt address spaces through
2481          * the request.
2482          */
2483         if (!list_empty(&engine->request_list)) {
2484                 struct drm_i915_gem_request *request;
2485
2486                 request = list_last_entry(&engine->request_list,
2487                                           struct drm_i915_gem_request,
2488                                           link);
2489
2490                 i915_gem_request_retire_upto(request);
2491         }
2492
2493         /* Having flushed all requests from all queues, we know that all
2494          * ringbuffers must now be empty. However, since we do not reclaim
2495          * all space when retiring the request (to prevent HEADs colliding
2496          * with rapid ringbuffer wraparound) the amount of available space
2497          * upon reset is less than when we start. Do one more pass over
2498          * all the ringbuffers to reset last_retired_head.
2499          */
2500         list_for_each_entry(ring, &engine->buffers, link) {
2501                 ring->last_retired_head = ring->tail;
2502                 intel_ring_update_space(ring);
2503         }
2504
2505         engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
2506 }
2507
2508 void i915_gem_reset(struct drm_device *dev)
2509 {
2510         struct drm_i915_private *dev_priv = to_i915(dev);
2511         struct intel_engine_cs *engine;
2512
2513         /*
2514          * Before we free the objects from the requests, we need to inspect
2515          * them for finding the guilty party. As the requests only borrow
2516          * their reference to the objects, the inspection must be done first.
2517          */
2518         for_each_engine(engine, dev_priv)
2519                 i915_gem_reset_engine_status(engine);
2520
2521         for_each_engine(engine, dev_priv)
2522                 i915_gem_reset_engine_cleanup(engine);
2523         mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2524
2525         i915_gem_context_reset(dev);
2526
2527         i915_gem_restore_fences(dev);
2528 }
2529
2530 static void
2531 i915_gem_retire_work_handler(struct work_struct *work)
2532 {
2533         struct drm_i915_private *dev_priv =
2534                 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2535         struct drm_device *dev = &dev_priv->drm;
2536
2537         /* Come back later if the device is busy... */
2538         if (mutex_trylock(&dev->struct_mutex)) {
2539                 i915_gem_retire_requests(dev_priv);
2540                 mutex_unlock(&dev->struct_mutex);
2541         }
2542
2543         /* Keep the retire handler running until we are finally idle.
2544          * We do not need to do this test under locking as in the worst-case
2545          * we queue the retire worker once too often.
2546          */
2547         if (READ_ONCE(dev_priv->gt.awake)) {
2548                 i915_queue_hangcheck(dev_priv);
2549                 queue_delayed_work(dev_priv->wq,
2550                                    &dev_priv->gt.retire_work,
2551                                    round_jiffies_up_relative(HZ));
2552         }
2553 }
2554
2555 static void
2556 i915_gem_idle_work_handler(struct work_struct *work)
2557 {
2558         struct drm_i915_private *dev_priv =
2559                 container_of(work, typeof(*dev_priv), gt.idle_work.work);
2560         struct drm_device *dev = &dev_priv->drm;
2561         struct intel_engine_cs *engine;
2562         unsigned int stuck_engines;
2563         bool rearm_hangcheck;
2564
2565         if (!READ_ONCE(dev_priv->gt.awake))
2566                 return;
2567
2568         if (READ_ONCE(dev_priv->gt.active_engines))
2569                 return;
2570
2571         rearm_hangcheck =
2572                 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2573
2574         if (!mutex_trylock(&dev->struct_mutex)) {
2575                 /* Currently busy, come back later */
2576                 mod_delayed_work(dev_priv->wq,
2577                                  &dev_priv->gt.idle_work,
2578                                  msecs_to_jiffies(50));
2579                 goto out_rearm;
2580         }
2581
2582         if (dev_priv->gt.active_engines)
2583                 goto out_unlock;
2584
2585         for_each_engine(engine, dev_priv)
2586                 i915_gem_batch_pool_fini(&engine->batch_pool);
2587
2588         GEM_BUG_ON(!dev_priv->gt.awake);
2589         dev_priv->gt.awake = false;
2590         rearm_hangcheck = false;
2591
2592         /* As we have disabled hangcheck, we need to unstick any waiters still
2593          * hanging around. However, as we may be racing against the interrupt
2594          * handler or the waiters themselves, we skip enabling the fake-irq.
2595          */
2596         stuck_engines = intel_kick_waiters(dev_priv);
2597         if (unlikely(stuck_engines))
2598                 DRM_DEBUG_DRIVER("kicked stuck waiters (%x)...missed irq?\n",
2599                                  stuck_engines);
2600
2601         if (INTEL_GEN(dev_priv) >= 6)
2602                 gen6_rps_idle(dev_priv);
2603         intel_runtime_pm_put(dev_priv);
2604 out_unlock:
2605         mutex_unlock(&dev->struct_mutex);
2606
2607 out_rearm:
2608         if (rearm_hangcheck) {
2609                 GEM_BUG_ON(!dev_priv->gt.awake);
2610                 i915_queue_hangcheck(dev_priv);
2611         }
2612 }
2613
2614 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2615 {
2616         struct drm_i915_gem_object *obj = to_intel_bo(gem);
2617         struct drm_i915_file_private *fpriv = file->driver_priv;
2618         struct i915_vma *vma, *vn;
2619
2620         mutex_lock(&obj->base.dev->struct_mutex);
2621         list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2622                 if (vma->vm->file == fpriv)
2623                         i915_vma_close(vma);
2624         mutex_unlock(&obj->base.dev->struct_mutex);
2625 }
2626
2627 /**
2628  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2629  * @dev: drm device pointer
2630  * @data: ioctl data blob
2631  * @file: drm file pointer
2632  *
2633  * Returns 0 if successful, else an error is returned with the remaining time in
2634  * the timeout parameter.
2635  *  -ETIME: object is still busy after timeout
2636  *  -ERESTARTSYS: signal interrupted the wait
2637  *  -ENONENT: object doesn't exist
2638  * Also possible, but rare:
2639  *  -EAGAIN: GPU wedged
2640  *  -ENOMEM: damn
2641  *  -ENODEV: Internal IRQ fail
2642  *  -E?: The add request failed
2643  *
2644  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2645  * non-zero timeout parameter the wait ioctl will wait for the given number of
2646  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2647  * without holding struct_mutex the object may become re-busied before this
2648  * function completes. A similar but shorter * race condition exists in the busy
2649  * ioctl
2650  */
2651 int
2652 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2653 {
2654         struct drm_i915_gem_wait *args = data;
2655         struct drm_i915_gem_object *obj;
2656         struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
2657         int i, n = 0;
2658         int ret;
2659
2660         if (args->flags != 0)
2661                 return -EINVAL;
2662
2663         ret = i915_mutex_lock_interruptible(dev);
2664         if (ret)
2665                 return ret;
2666
2667         obj = i915_gem_object_lookup(file, args->bo_handle);
2668         if (!obj) {
2669                 mutex_unlock(&dev->struct_mutex);
2670                 return -ENOENT;
2671         }
2672
2673         if (!obj->active)
2674                 goto out;
2675
2676         for (i = 0; i < I915_NUM_ENGINES; i++) {
2677                 struct drm_i915_gem_request *req;
2678
2679                 req = i915_gem_active_get(&obj->last_read[i],
2680                                           &obj->base.dev->struct_mutex);
2681                 if (req)
2682                         requests[n++] = req;
2683         }
2684
2685 out:
2686         i915_gem_object_put(obj);
2687         mutex_unlock(&dev->struct_mutex);
2688
2689         for (i = 0; i < n; i++) {
2690                 if (ret == 0)
2691                         ret = i915_wait_request(requests[i], true,
2692                                                 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
2693                                                 to_rps_client(file));
2694                 i915_gem_request_put(requests[i]);
2695         }
2696         return ret;
2697 }
2698
2699 static int
2700 __i915_gem_object_sync(struct drm_i915_gem_request *to,
2701                        struct drm_i915_gem_request *from)
2702 {
2703         int ret;
2704
2705         if (to->engine == from->engine)
2706                 return 0;
2707
2708         if (!i915.semaphores) {
2709                 ret = i915_wait_request(from,
2710                                         from->i915->mm.interruptible,
2711                                         NULL,
2712                                         NO_WAITBOOST);
2713                 if (ret)
2714                         return ret;
2715         } else {
2716                 int idx = intel_engine_sync_index(from->engine, to->engine);
2717                 if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
2718                         return 0;
2719
2720                 trace_i915_gem_ring_sync_to(to, from);
2721                 ret = to->engine->semaphore.sync_to(to, from);
2722                 if (ret)
2723                         return ret;
2724
2725                 from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
2726         }
2727
2728         return 0;
2729 }
2730
2731 /**
2732  * i915_gem_object_sync - sync an object to a ring.
2733  *
2734  * @obj: object which may be in use on another ring.
2735  * @to: request we are wishing to use
2736  *
2737  * This code is meant to abstract object synchronization with the GPU.
2738  * Conceptually we serialise writes between engines inside the GPU.
2739  * We only allow one engine to write into a buffer at any time, but
2740  * multiple readers. To ensure each has a coherent view of memory, we must:
2741  *
2742  * - If there is an outstanding write request to the object, the new
2743  *   request must wait for it to complete (either CPU or in hw, requests
2744  *   on the same ring will be naturally ordered).
2745  *
2746  * - If we are a write request (pending_write_domain is set), the new
2747  *   request must wait for outstanding read requests to complete.
2748  *
2749  * Returns 0 if successful, else propagates up the lower layer error.
2750  */
2751 int
2752 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2753                      struct drm_i915_gem_request *to)
2754 {
2755         struct i915_gem_active *active;
2756         unsigned long active_mask;
2757         int idx;
2758
2759         lockdep_assert_held(&obj->base.dev->struct_mutex);
2760
2761         active_mask = obj->active;
2762         if (!active_mask)
2763                 return 0;
2764
2765         if (obj->base.pending_write_domain) {
2766                 active = obj->last_read;
2767         } else {
2768                 active_mask = 1;
2769                 active = &obj->last_write;
2770         }
2771
2772         for_each_active(active_mask, idx) {
2773                 struct drm_i915_gem_request *request;
2774                 int ret;
2775
2776                 request = i915_gem_active_peek(&active[idx],
2777                                                &obj->base.dev->struct_mutex);
2778                 if (!request)
2779                         continue;
2780
2781                 ret = __i915_gem_object_sync(to, request);
2782                 if (ret)
2783                         return ret;
2784         }
2785
2786         return 0;
2787 }
2788
2789 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2790 {
2791         u32 old_write_domain, old_read_domains;
2792
2793         /* Force a pagefault for domain tracking on next user access */
2794         i915_gem_release_mmap(obj);
2795
2796         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2797                 return;
2798
2799         old_read_domains = obj->base.read_domains;
2800         old_write_domain = obj->base.write_domain;
2801
2802         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2803         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2804
2805         trace_i915_gem_object_change_domain(obj,
2806                                             old_read_domains,
2807                                             old_write_domain);
2808 }
2809
2810 static void __i915_vma_iounmap(struct i915_vma *vma)
2811 {
2812         GEM_BUG_ON(i915_vma_is_pinned(vma));
2813
2814         if (vma->iomap == NULL)
2815                 return;
2816
2817         io_mapping_unmap(vma->iomap);
2818         vma->iomap = NULL;
2819 }
2820
2821 int i915_vma_unbind(struct i915_vma *vma)
2822 {
2823         struct drm_i915_gem_object *obj = vma->obj;
2824         unsigned long active;
2825         int ret;
2826
2827         /* First wait upon any activity as retiring the request may
2828          * have side-effects such as unpinning or even unbinding this vma.
2829          */
2830         active = i915_vma_get_active(vma);
2831         if (active) {
2832                 int idx;
2833
2834                 /* When a closed VMA is retired, it is unbound - eek.
2835                  * In order to prevent it from being recursively closed,
2836                  * take a pin on the vma so that the second unbind is
2837                  * aborted.
2838                  */
2839                 __i915_vma_pin(vma);
2840
2841                 for_each_active(active, idx) {
2842                         ret = i915_gem_active_retire(&vma->last_read[idx],
2843                                                    &vma->vm->dev->struct_mutex);
2844                         if (ret)
2845                                 break;
2846                 }
2847
2848                 __i915_vma_unpin(vma);
2849                 if (ret)
2850                         return ret;
2851
2852                 GEM_BUG_ON(i915_vma_is_active(vma));
2853         }
2854
2855         if (i915_vma_is_pinned(vma))
2856                 return -EBUSY;
2857
2858         if (!drm_mm_node_allocated(&vma->node))
2859                 goto destroy;
2860
2861         GEM_BUG_ON(obj->bind_count == 0);
2862         GEM_BUG_ON(!obj->pages);
2863
2864         if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
2865                 i915_gem_object_finish_gtt(obj);
2866
2867                 /* release the fence reg _after_ flushing */
2868                 ret = i915_gem_object_put_fence(obj);
2869                 if (ret)
2870                         return ret;
2871
2872                 __i915_vma_iounmap(vma);
2873         }
2874
2875         if (likely(!vma->vm->closed)) {
2876                 trace_i915_vma_unbind(vma);
2877                 vma->vm->unbind_vma(vma);
2878         }
2879         vma->bound = 0;
2880
2881         drm_mm_remove_node(&vma->node);
2882         list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
2883
2884         if (vma->is_ggtt) {
2885                 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
2886                         obj->map_and_fenceable = false;
2887                 } else if (vma->ggtt_view.pages) {
2888                         sg_free_table(vma->ggtt_view.pages);
2889                         kfree(vma->ggtt_view.pages);
2890                 }
2891                 vma->ggtt_view.pages = NULL;
2892         }
2893
2894         /* Since the unbound list is global, only move to that list if
2895          * no more VMAs exist. */
2896         if (--obj->bind_count == 0)
2897                 list_move_tail(&obj->global_list,
2898                                &to_i915(obj->base.dev)->mm.unbound_list);
2899
2900         /* And finally now the object is completely decoupled from this vma,
2901          * we can drop its hold on the backing storage and allow it to be
2902          * reaped by the shrinker.
2903          */
2904         i915_gem_object_unpin_pages(obj);
2905
2906 destroy:
2907         if (unlikely(vma->closed))
2908                 i915_vma_destroy(vma);
2909
2910         return 0;
2911 }
2912
2913 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
2914 {
2915         struct intel_engine_cs *engine;
2916         int ret;
2917
2918         lockdep_assert_held(&dev_priv->drm.struct_mutex);
2919
2920         for_each_engine(engine, dev_priv) {
2921                 if (engine->last_context == NULL)
2922                         continue;
2923
2924                 ret = intel_engine_idle(engine);
2925                 if (ret)
2926                         return ret;
2927         }
2928
2929         return 0;
2930 }
2931
2932 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
2933                                      unsigned long cache_level)
2934 {
2935         struct drm_mm_node *gtt_space = &vma->node;
2936         struct drm_mm_node *other;
2937
2938         /*
2939          * On some machines we have to be careful when putting differing types
2940          * of snoopable memory together to avoid the prefetcher crossing memory
2941          * domains and dying. During vm initialisation, we decide whether or not
2942          * these constraints apply and set the drm_mm.color_adjust
2943          * appropriately.
2944          */
2945         if (vma->vm->mm.color_adjust == NULL)
2946                 return true;
2947
2948         if (!drm_mm_node_allocated(gtt_space))
2949                 return true;
2950
2951         if (list_empty(&gtt_space->node_list))
2952                 return true;
2953
2954         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2955         if (other->allocated && !other->hole_follows && other->color != cache_level)
2956                 return false;
2957
2958         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2959         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2960                 return false;
2961
2962         return true;
2963 }
2964
2965 /**
2966  * Finds free space in the GTT aperture and binds the object or a view of it
2967  * there.
2968  * @obj: object to bind
2969  * @vm: address space to bind into
2970  * @ggtt_view: global gtt view if applicable
2971  * @size: requested size in bytes (can be larger than the VMA)
2972  * @alignment: requested alignment
2973  * @flags: mask of PIN_* flags to use
2974  */
2975 static struct i915_vma *
2976 i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
2977                                struct i915_address_space *vm,
2978                                const struct i915_ggtt_view *ggtt_view,
2979                                u64 size,
2980                                u64 alignment,
2981                                u64 flags)
2982 {
2983         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2984         struct i915_vma *vma;
2985         u64 start, end;
2986         u64 min_alignment;
2987         int ret;
2988
2989         vma = ggtt_view ?
2990                 i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
2991                 i915_gem_obj_lookup_or_create_vma(obj, vm);
2992         if (IS_ERR(vma))
2993                 return vma;
2994
2995         size = max(size, vma->size);
2996         if (flags & PIN_MAPPABLE)
2997                 size = i915_gem_get_ggtt_size(dev_priv, size, obj->tiling_mode);
2998
2999         min_alignment =
3000                 i915_gem_get_ggtt_alignment(dev_priv, size, obj->tiling_mode,
3001                                             flags & PIN_MAPPABLE);
3002         if (alignment == 0)
3003                 alignment = min_alignment;
3004         if (alignment & (min_alignment - 1)) {
3005                 DRM_DEBUG("Invalid object alignment requested %llu, minimum %llu\n",
3006                           alignment, min_alignment);
3007                 return ERR_PTR(-EINVAL);
3008         }
3009
3010         start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3011
3012         end = vma->vm->total;
3013         if (flags & PIN_MAPPABLE)
3014                 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
3015         if (flags & PIN_ZONE_4G)
3016                 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3017
3018         /* If binding the object/GGTT view requires more space than the entire
3019          * aperture has, reject it early before evicting everything in a vain
3020          * attempt to find space.
3021          */
3022         if (size > end) {
3023                 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
3024                           size, obj->base.size,
3025                           flags & PIN_MAPPABLE ? "mappable" : "total",
3026                           end);
3027                 return ERR_PTR(-E2BIG);
3028         }
3029
3030         ret = i915_gem_object_get_pages(obj);
3031         if (ret)
3032                 return ERR_PTR(ret);
3033
3034         i915_gem_object_pin_pages(obj);
3035
3036         if (flags & PIN_OFFSET_FIXED) {
3037                 uint64_t offset = flags & PIN_OFFSET_MASK;
3038                 if (offset & (alignment - 1) || offset > end - size) {
3039                         ret = -EINVAL;
3040                         goto err_unpin;
3041                 }
3042
3043                 vma->node.start = offset;
3044                 vma->node.size = size;
3045                 vma->node.color = obj->cache_level;
3046                 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3047                 if (ret) {
3048                         ret = i915_gem_evict_for_vma(vma);
3049                         if (ret == 0)
3050                                 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3051                         if (ret)
3052                                 goto err_unpin;
3053                 }
3054         } else {
3055                 u32 search_flag, alloc_flag;
3056
3057                 if (flags & PIN_HIGH) {
3058                         search_flag = DRM_MM_SEARCH_BELOW;
3059                         alloc_flag = DRM_MM_CREATE_TOP;
3060                 } else {
3061                         search_flag = DRM_MM_SEARCH_DEFAULT;
3062                         alloc_flag = DRM_MM_CREATE_DEFAULT;
3063                 }
3064
3065                 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3066                  * so we know that we always have a minimum alignment of 4096.
3067                  * The drm_mm range manager is optimised to return results
3068                  * with zero alignment, so where possible use the optimal
3069                  * path.
3070                  */
3071                 if (alignment <= 4096)
3072                         alignment = 0;
3073
3074 search_free:
3075                 ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
3076                                                           &vma->node,
3077                                                           size, alignment,
3078                                                           obj->cache_level,
3079                                                           start, end,
3080                                                           search_flag,
3081                                                           alloc_flag);
3082                 if (ret) {
3083                         ret = i915_gem_evict_something(vma->vm, size, alignment,
3084                                                        obj->cache_level,
3085                                                        start, end,
3086                                                        flags);
3087                         if (ret == 0)
3088                                 goto search_free;
3089
3090                         goto err_unpin;
3091                 }
3092         }
3093         GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
3094
3095         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3096         list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3097         obj->bind_count++;
3098
3099         return vma;
3100
3101 err_unpin:
3102         i915_gem_object_unpin_pages(obj);
3103         return ERR_PTR(ret);
3104 }
3105
3106 bool
3107 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3108                         bool force)
3109 {
3110         /* If we don't have a page list set up, then we're not pinned
3111          * to GPU, and we can ignore the cache flush because it'll happen
3112          * again at bind time.
3113          */
3114         if (obj->pages == NULL)
3115                 return false;
3116
3117         /*
3118          * Stolen memory is always coherent with the GPU as it is explicitly
3119          * marked as wc by the system, or the system is cache-coherent.
3120          */
3121         if (obj->stolen || obj->phys_handle)
3122                 return false;
3123
3124         /* If the GPU is snooping the contents of the CPU cache,
3125          * we do not need to manually clear the CPU cache lines.  However,
3126          * the caches are only snooped when the render cache is
3127          * flushed/invalidated.  As we always have to emit invalidations
3128          * and flushes when moving into and out of the RENDER domain, correct
3129          * snooping behaviour occurs naturally as the result of our domain
3130          * tracking.
3131          */
3132         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3133                 obj->cache_dirty = true;
3134                 return false;
3135         }
3136
3137         trace_i915_gem_object_clflush(obj);
3138         drm_clflush_sg(obj->pages);
3139         obj->cache_dirty = false;
3140
3141         return true;
3142 }
3143
3144 /** Flushes the GTT write domain for the object if it's dirty. */
3145 static void
3146 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3147 {
3148         uint32_t old_write_domain;
3149
3150         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3151                 return;
3152
3153         /* No actual flushing is required for the GTT write domain.  Writes
3154          * to it immediately go to main memory as far as we know, so there's
3155          * no chipset flush.  It also doesn't land in render cache.
3156          *
3157          * However, we do have to enforce the order so that all writes through
3158          * the GTT land before any writes to the device, such as updates to
3159          * the GATT itself.
3160          */
3161         wmb();
3162
3163         old_write_domain = obj->base.write_domain;
3164         obj->base.write_domain = 0;
3165
3166         intel_fb_obj_flush(obj, false, ORIGIN_GTT);
3167
3168         trace_i915_gem_object_change_domain(obj,
3169                                             obj->base.read_domains,
3170                                             old_write_domain);
3171 }
3172
3173 /** Flushes the CPU write domain for the object if it's dirty. */
3174 static void
3175 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3176 {
3177         uint32_t old_write_domain;
3178
3179         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3180                 return;
3181
3182         if (i915_gem_clflush_object(obj, obj->pin_display))
3183                 i915_gem_chipset_flush(to_i915(obj->base.dev));
3184
3185         old_write_domain = obj->base.write_domain;
3186         obj->base.write_domain = 0;
3187
3188         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3189
3190         trace_i915_gem_object_change_domain(obj,
3191                                             obj->base.read_domains,
3192                                             old_write_domain);
3193 }
3194
3195 /**
3196  * Moves a single object to the GTT read, and possibly write domain.
3197  * @obj: object to act on
3198  * @write: ask for write access or read only
3199  *
3200  * This function returns when the move is complete, including waiting on
3201  * flushes to occur.
3202  */
3203 int
3204 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3205 {
3206         uint32_t old_write_domain, old_read_domains;
3207         struct i915_vma *vma;
3208         int ret;
3209
3210         ret = i915_gem_object_wait_rendering(obj, !write);
3211         if (ret)
3212                 return ret;
3213
3214         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3215                 return 0;
3216
3217         /* Flush and acquire obj->pages so that we are coherent through
3218          * direct access in memory with previous cached writes through
3219          * shmemfs and that our cache domain tracking remains valid.
3220          * For example, if the obj->filp was moved to swap without us
3221          * being notified and releasing the pages, we would mistakenly
3222          * continue to assume that the obj remained out of the CPU cached
3223          * domain.
3224          */
3225         ret = i915_gem_object_get_pages(obj);
3226         if (ret)
3227                 return ret;
3228
3229         i915_gem_object_flush_cpu_write_domain(obj);
3230
3231         /* Serialise direct access to this object with the barriers for
3232          * coherent writes from the GPU, by effectively invalidating the
3233          * GTT domain upon first access.
3234          */
3235         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3236                 mb();
3237
3238         old_write_domain = obj->base.write_domain;
3239         old_read_domains = obj->base.read_domains;
3240
3241         /* It should now be out of any other write domains, and we can update
3242          * the domain values for our changes.
3243          */
3244         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3245         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3246         if (write) {
3247                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3248                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3249                 obj->dirty = 1;
3250         }
3251
3252         trace_i915_gem_object_change_domain(obj,
3253                                             old_read_domains,
3254                                             old_write_domain);
3255
3256         /* And bump the LRU for this access */
3257         vma = i915_gem_obj_to_ggtt(obj);
3258         if (vma &&
3259             drm_mm_node_allocated(&vma->node) &&
3260             !i915_vma_is_active(vma))
3261                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3262
3263         return 0;
3264 }
3265
3266 /**
3267  * Changes the cache-level of an object across all VMA.
3268  * @obj: object to act on
3269  * @cache_level: new cache level to set for the object
3270  *
3271  * After this function returns, the object will be in the new cache-level
3272  * across all GTT and the contents of the backing storage will be coherent,
3273  * with respect to the new cache-level. In order to keep the backing storage
3274  * coherent for all users, we only allow a single cache level to be set
3275  * globally on the object and prevent it from being changed whilst the
3276  * hardware is reading from the object. That is if the object is currently
3277  * on the scanout it will be set to uncached (or equivalent display
3278  * cache coherency) and all non-MOCS GPU access will also be uncached so
3279  * that all direct access to the scanout remains coherent.
3280  */
3281 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3282                                     enum i915_cache_level cache_level)
3283 {
3284         struct i915_vma *vma;
3285         int ret = 0;
3286
3287         if (obj->cache_level == cache_level)
3288                 goto out;
3289
3290         /* Inspect the list of currently bound VMA and unbind any that would
3291          * be invalid given the new cache-level. This is principally to
3292          * catch the issue of the CS prefetch crossing page boundaries and
3293          * reading an invalid PTE on older architectures.
3294          */
3295 restart:
3296         list_for_each_entry(vma, &obj->vma_list, obj_link) {
3297                 if (!drm_mm_node_allocated(&vma->node))
3298                         continue;
3299
3300                 if (i915_vma_is_pinned(vma)) {
3301                         DRM_DEBUG("can not change the cache level of pinned objects\n");
3302                         return -EBUSY;
3303                 }
3304
3305                 if (i915_gem_valid_gtt_space(vma, cache_level))
3306                         continue;
3307
3308                 ret = i915_vma_unbind(vma);
3309                 if (ret)
3310                         return ret;
3311
3312                 /* As unbinding may affect other elements in the
3313                  * obj->vma_list (due to side-effects from retiring
3314                  * an active vma), play safe and restart the iterator.
3315                  */
3316                 goto restart;
3317         }
3318
3319         /* We can reuse the existing drm_mm nodes but need to change the
3320          * cache-level on the PTE. We could simply unbind them all and
3321          * rebind with the correct cache-level on next use. However since
3322          * we already have a valid slot, dma mapping, pages etc, we may as
3323          * rewrite the PTE in the belief that doing so tramples upon less
3324          * state and so involves less work.
3325          */
3326         if (obj->bind_count) {
3327                 /* Before we change the PTE, the GPU must not be accessing it.
3328                  * If we wait upon the object, we know that all the bound
3329                  * VMA are no longer active.
3330                  */
3331                 ret = i915_gem_object_wait_rendering(obj, false);
3332                 if (ret)
3333                         return ret;
3334
3335                 if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
3336                         /* Access to snoopable pages through the GTT is
3337                          * incoherent and on some machines causes a hard
3338                          * lockup. Relinquish the CPU mmaping to force
3339                          * userspace to refault in the pages and we can
3340                          * then double check if the GTT mapping is still
3341                          * valid for that pointer access.
3342                          */
3343                         i915_gem_release_mmap(obj);
3344
3345                         /* As we no longer need a fence for GTT access,
3346                          * we can relinquish it now (and so prevent having
3347                          * to steal a fence from someone else on the next
3348                          * fence request). Note GPU activity would have
3349                          * dropped the fence as all snoopable access is
3350                          * supposed to be linear.
3351                          */
3352                         ret = i915_gem_object_put_fence(obj);
3353                         if (ret)
3354                                 return ret;
3355                 } else {
3356                         /* We either have incoherent backing store and
3357                          * so no GTT access or the architecture is fully
3358                          * coherent. In such cases, existing GTT mmaps
3359                          * ignore the cache bit in the PTE and we can
3360                          * rewrite it without confusing the GPU or having
3361                          * to force userspace to fault back in its mmaps.
3362                          */
3363                 }
3364
3365                 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3366                         if (!drm_mm_node_allocated(&vma->node))
3367                                 continue;
3368
3369                         ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3370                         if (ret)
3371                                 return ret;
3372                 }
3373         }
3374
3375         list_for_each_entry(vma, &obj->vma_list, obj_link)
3376                 vma->node.color = cache_level;
3377         obj->cache_level = cache_level;
3378
3379 out:
3380         /* Flush the dirty CPU caches to the backing storage so that the
3381          * object is now coherent at its new cache level (with respect
3382          * to the access domain).
3383          */
3384         if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
3385                 if (i915_gem_clflush_object(obj, true))
3386                         i915_gem_chipset_flush(to_i915(obj->base.dev));
3387         }
3388
3389         return 0;
3390 }
3391
3392 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3393                                struct drm_file *file)
3394 {
3395         struct drm_i915_gem_caching *args = data;
3396         struct drm_i915_gem_object *obj;
3397
3398         obj = i915_gem_object_lookup(file, args->handle);
3399         if (!obj)
3400                 return -ENOENT;
3401
3402         switch (obj->cache_level) {
3403         case I915_CACHE_LLC:
3404         case I915_CACHE_L3_LLC:
3405                 args->caching = I915_CACHING_CACHED;
3406                 break;
3407
3408         case I915_CACHE_WT:
3409                 args->caching = I915_CACHING_DISPLAY;
3410                 break;
3411
3412         default:
3413                 args->caching = I915_CACHING_NONE;
3414                 break;
3415         }
3416
3417         i915_gem_object_put_unlocked(obj);
3418         return 0;
3419 }
3420
3421 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3422                                struct drm_file *file)
3423 {
3424         struct drm_i915_private *dev_priv = to_i915(dev);
3425         struct drm_i915_gem_caching *args = data;
3426         struct drm_i915_gem_object *obj;
3427         enum i915_cache_level level;
3428         int ret;
3429
3430         switch (args->caching) {
3431         case I915_CACHING_NONE:
3432                 level = I915_CACHE_NONE;
3433                 break;
3434         case I915_CACHING_CACHED:
3435                 /*
3436                  * Due to a HW issue on BXT A stepping, GPU stores via a
3437                  * snooped mapping may leave stale data in a corresponding CPU
3438                  * cacheline, whereas normally such cachelines would get
3439                  * invalidated.
3440                  */
3441                 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
3442                         return -ENODEV;
3443
3444                 level = I915_CACHE_LLC;
3445                 break;
3446         case I915_CACHING_DISPLAY:
3447                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3448                 break;
3449         default:
3450                 return -EINVAL;
3451         }
3452
3453         intel_runtime_pm_get(dev_priv);
3454
3455         ret = i915_mutex_lock_interruptible(dev);
3456         if (ret)
3457                 goto rpm_put;
3458
3459         obj = i915_gem_object_lookup(file, args->handle);
3460         if (!obj) {
3461                 ret = -ENOENT;
3462                 goto unlock;
3463         }
3464
3465         ret = i915_gem_object_set_cache_level(obj, level);
3466
3467         i915_gem_object_put(obj);
3468 unlock:
3469         mutex_unlock(&dev->struct_mutex);
3470 rpm_put:
3471         intel_runtime_pm_put(dev_priv);
3472
3473         return ret;
3474 }
3475
3476 /*
3477  * Prepare buffer for display plane (scanout, cursors, etc).
3478  * Can be called from an uninterruptible phase (modesetting) and allows
3479  * any flushes to be pipelined (for pageflips).
3480  */
3481 int
3482 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3483                                      u32 alignment,
3484                                      const struct i915_ggtt_view *view)
3485 {
3486         u32 old_read_domains, old_write_domain;
3487         int ret;
3488
3489         /* Mark the pin_display early so that we account for the
3490          * display coherency whilst setting up the cache domains.
3491          */
3492         obj->pin_display++;
3493
3494         /* The display engine is not coherent with the LLC cache on gen6.  As
3495          * a result, we make sure that the pinning that is about to occur is
3496          * done with uncached PTEs. This is lowest common denominator for all
3497          * chipsets.
3498          *
3499          * However for gen6+, we could do better by using the GFDT bit instead
3500          * of uncaching, which would allow us to flush all the LLC-cached data
3501          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3502          */
3503         ret = i915_gem_object_set_cache_level(obj,
3504                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3505         if (ret)
3506                 goto err_unpin_display;
3507
3508         /* As the user may map the buffer once pinned in the display plane
3509          * (e.g. libkms for the bootup splash), we have to ensure that we
3510          * always use map_and_fenceable for all scanout buffers.
3511          */
3512         ret = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3513                                        view->type == I915_GGTT_VIEW_NORMAL ?
3514                                        PIN_MAPPABLE : 0);
3515         if (ret)
3516                 goto err_unpin_display;
3517
3518         i915_gem_object_flush_cpu_write_domain(obj);
3519
3520         old_write_domain = obj->base.write_domain;
3521         old_read_domains = obj->base.read_domains;
3522
3523         /* It should now be out of any other write domains, and we can update
3524          * the domain values for our changes.
3525          */
3526         obj->base.write_domain = 0;
3527         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3528
3529         trace_i915_gem_object_change_domain(obj,
3530                                             old_read_domains,
3531                                             old_write_domain);
3532
3533         return 0;
3534
3535 err_unpin_display:
3536         obj->pin_display--;
3537         return ret;
3538 }
3539
3540 void
3541 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
3542                                          const struct i915_ggtt_view *view)
3543 {
3544         if (WARN_ON(obj->pin_display == 0))
3545                 return;
3546
3547         i915_gem_object_ggtt_unpin_view(obj, view);
3548
3549         obj->pin_display--;
3550 }
3551
3552 /**
3553  * Moves a single object to the CPU read, and possibly write domain.
3554  * @obj: object to act on
3555  * @write: requesting write or read-only access
3556  *
3557  * This function returns when the move is complete, including waiting on
3558  * flushes to occur.
3559  */
3560 int
3561 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3562 {
3563         uint32_t old_write_domain, old_read_domains;
3564         int ret;
3565
3566         ret = i915_gem_object_wait_rendering(obj, !write);
3567         if (ret)
3568                 return ret;
3569
3570         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3571                 return 0;
3572
3573         i915_gem_object_flush_gtt_write_domain(obj);
3574
3575         old_write_domain = obj->base.write_domain;
3576         old_read_domains = obj->base.read_domains;
3577
3578         /* Flush the CPU cache if it's still invalid. */
3579         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3580                 i915_gem_clflush_object(obj, false);
3581
3582                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3583         }
3584
3585         /* It should now be out of any other write domains, and we can update
3586          * the domain values for our changes.
3587          */
3588         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3589
3590         /* If we're writing through the CPU, then the GPU read domains will
3591          * need to be invalidated at next use.
3592          */
3593         if (write) {
3594                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3595                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3596         }
3597
3598         trace_i915_gem_object_change_domain(obj,
3599                                             old_read_domains,
3600                                             old_write_domain);
3601
3602         return 0;
3603 }
3604
3605 /* Throttle our rendering by waiting until the ring has completed our requests
3606  * emitted over 20 msec ago.
3607  *
3608  * Note that if we were to use the current jiffies each time around the loop,
3609  * we wouldn't escape the function with any frames outstanding if the time to
3610  * render a frame was over 20ms.
3611  *
3612  * This should get us reasonable parallelism between CPU and GPU but also
3613  * relatively low latency when blocking on a particular request to finish.
3614  */
3615 static int
3616 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3617 {
3618         struct drm_i915_private *dev_priv = to_i915(dev);
3619         struct drm_i915_file_private *file_priv = file->driver_priv;
3620         unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3621         struct drm_i915_gem_request *request, *target = NULL;
3622         int ret;
3623
3624         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3625         if (ret)
3626                 return ret;
3627
3628         /* ABI: return -EIO if already wedged */
3629         if (i915_terminally_wedged(&dev_priv->gpu_error))
3630                 return -EIO;
3631
3632         spin_lock(&file_priv->mm.lock);
3633         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3634                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3635                         break;
3636
3637                 /*
3638                  * Note that the request might not have been submitted yet.
3639                  * In which case emitted_jiffies will be zero.
3640                  */
3641                 if (!request->emitted_jiffies)
3642                         continue;
3643
3644                 target = request;
3645         }
3646         if (target)
3647                 i915_gem_request_get(target);
3648         spin_unlock(&file_priv->mm.lock);
3649
3650         if (target == NULL)
3651                 return 0;
3652
3653         ret = i915_wait_request(target, true, NULL, NULL);
3654         i915_gem_request_put(target);
3655
3656         return ret;
3657 }
3658
3659 static bool
3660 i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
3661 {
3662         struct drm_i915_gem_object *obj = vma->obj;
3663
3664         if (vma->node.size < size)
3665                 return true;
3666
3667         if (alignment && vma->node.start & (alignment - 1))
3668                 return true;
3669
3670         if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
3671                 return true;
3672
3673         if (flags & PIN_OFFSET_BIAS &&
3674             vma->node.start < (flags & PIN_OFFSET_MASK))
3675                 return true;
3676
3677         if (flags & PIN_OFFSET_FIXED &&
3678             vma->node.start != (flags & PIN_OFFSET_MASK))
3679                 return true;
3680
3681         return false;
3682 }
3683
3684 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
3685 {
3686         struct drm_i915_gem_object *obj = vma->obj;
3687         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3688         bool mappable, fenceable;
3689         u32 fence_size, fence_alignment;
3690
3691         fence_size = i915_gem_get_ggtt_size(dev_priv,
3692                                             obj->base.size,
3693                                             obj->tiling_mode);
3694         fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
3695                                                       obj->base.size,
3696                                                       obj->tiling_mode,
3697                                                       true);
3698
3699         fenceable = (vma->node.size == fence_size &&
3700                      (vma->node.start & (fence_alignment - 1)) == 0);
3701
3702         mappable = (vma->node.start + fence_size <=
3703                     dev_priv->ggtt.mappable_end);
3704
3705         obj->map_and_fenceable = mappable && fenceable;
3706 }
3707
3708 static int
3709 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
3710                        struct i915_address_space *vm,
3711                        const struct i915_ggtt_view *ggtt_view,
3712                        u64 size,
3713                        u64 alignment,
3714                        u64 flags)
3715 {
3716         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3717         struct i915_vma *vma;
3718         unsigned bound;
3719         int ret;
3720
3721         if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
3722                 return -ENODEV;
3723
3724         if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
3725                 return -EINVAL;
3726
3727         if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
3728                 return -EINVAL;
3729
3730         if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
3731                 return -EINVAL;
3732
3733         vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
3734                           i915_gem_obj_to_vma(obj, vm);
3735
3736         if (vma) {
3737                 if (WARN_ON(i915_vma_pin_count(vma) == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3738                         return -EBUSY;
3739
3740                 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3741                         WARN(i915_vma_is_pinned(vma),
3742                              "bo is already pinned in %s with incorrect alignment:"
3743                              " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
3744                              " obj->map_and_fenceable=%d\n",
3745                              ggtt_view ? "ggtt" : "ppgtt",
3746                              upper_32_bits(vma->node.start),
3747                              lower_32_bits(vma->node.start),
3748                              alignment,
3749                              !!(flags & PIN_MAPPABLE),
3750                              obj->map_and_fenceable);
3751                         ret = i915_vma_unbind(vma);
3752                         if (ret)
3753                                 return ret;
3754
3755                         vma = NULL;
3756                 }
3757         }
3758
3759         if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
3760                 vma = i915_gem_object_insert_into_vm(obj, vm, ggtt_view,
3761                                                      size, alignment, flags);
3762                 if (IS_ERR(vma))
3763                         return PTR_ERR(vma);
3764         }
3765
3766         bound = vma->bound;
3767         ret = i915_vma_bind(vma, obj->cache_level, flags);
3768         if (ret)
3769                 return ret;
3770
3771         if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
3772             (bound ^ vma->bound) & GLOBAL_BIND) {
3773                 __i915_vma_set_map_and_fenceable(vma);
3774                 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3775         }
3776
3777         GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
3778
3779         __i915_vma_pin(vma);
3780         return 0;
3781 }
3782
3783 int
3784 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3785                     struct i915_address_space *vm,
3786                     u64 size,
3787                     u64 alignment,
3788                     u64 flags)
3789 {
3790         return i915_gem_object_do_pin(obj, vm,
3791                                       i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
3792                                       size, alignment, flags);
3793 }
3794
3795 int
3796 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3797                          const struct i915_ggtt_view *view,
3798                          u64 size,
3799                          u64 alignment,
3800                          u64 flags)
3801 {
3802         struct drm_device *dev = obj->base.dev;
3803         struct drm_i915_private *dev_priv = to_i915(dev);
3804         struct i915_ggtt *ggtt = &dev_priv->ggtt;
3805
3806         BUG_ON(!view);
3807
3808         return i915_gem_object_do_pin(obj, &ggtt->base, view,
3809                                       size, alignment, flags | PIN_GLOBAL);
3810 }
3811
3812 void
3813 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
3814                                 const struct i915_ggtt_view *view)
3815 {
3816         struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
3817
3818         WARN_ON(!i915_vma_is_pinned(vma));
3819         WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
3820
3821         __i915_vma_unpin(vma);
3822 }
3823
3824 int
3825 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3826                     struct drm_file *file)
3827 {
3828         struct drm_i915_gem_busy *args = data;
3829         struct drm_i915_gem_object *obj;
3830         int ret;
3831
3832         ret = i915_mutex_lock_interruptible(dev);
3833         if (ret)
3834                 return ret;
3835
3836         obj = i915_gem_object_lookup(file, args->handle);
3837         if (!obj) {
3838                 ret = -ENOENT;
3839                 goto unlock;
3840         }
3841
3842         /* Count all active objects as busy, even if they are currently not used
3843          * by the gpu. Users of this interface expect objects to eventually
3844          * become non-busy without any further actions.
3845          */
3846         args->busy = 0;
3847         if (obj->active) {
3848                 struct drm_i915_gem_request *req;
3849                 int i;
3850
3851                 for (i = 0; i < I915_NUM_ENGINES; i++) {
3852                         req = i915_gem_active_peek(&obj->last_read[i],
3853                                                    &obj->base.dev->struct_mutex);
3854                         if (req)
3855                                 args->busy |= 1 << (16 + req->engine->exec_id);
3856                 }
3857                 req = i915_gem_active_peek(&obj->last_write,
3858                                            &obj->base.dev->struct_mutex);
3859                 if (req)
3860                         args->busy |= req->engine->exec_id;
3861         }
3862
3863         i915_gem_object_put(obj);
3864 unlock:
3865         mutex_unlock(&dev->struct_mutex);
3866         return ret;
3867 }
3868
3869 int
3870 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3871                         struct drm_file *file_priv)
3872 {
3873         return i915_gem_ring_throttle(dev, file_priv);
3874 }
3875
3876 int
3877 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3878                        struct drm_file *file_priv)
3879 {
3880         struct drm_i915_private *dev_priv = to_i915(dev);
3881         struct drm_i915_gem_madvise *args = data;
3882         struct drm_i915_gem_object *obj;
3883         int ret;
3884
3885         switch (args->madv) {
3886         case I915_MADV_DONTNEED:
3887         case I915_MADV_WILLNEED:
3888             break;
3889         default:
3890             return -EINVAL;
3891         }
3892
3893         ret = i915_mutex_lock_interruptible(dev);
3894         if (ret)
3895                 return ret;
3896
3897         obj = i915_gem_object_lookup(file_priv, args->handle);
3898         if (!obj) {
3899                 ret = -ENOENT;
3900                 goto unlock;
3901         }
3902
3903         if (i915_gem_obj_is_pinned(obj)) {
3904                 ret = -EINVAL;
3905                 goto out;
3906         }
3907
3908         if (obj->pages &&
3909             obj->tiling_mode != I915_TILING_NONE &&
3910             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
3911                 if (obj->madv == I915_MADV_WILLNEED)
3912                         i915_gem_object_unpin_pages(obj);
3913                 if (args->madv == I915_MADV_WILLNEED)
3914                         i915_gem_object_pin_pages(obj);
3915         }
3916
3917         if (obj->madv != __I915_MADV_PURGED)
3918                 obj->madv = args->madv;
3919
3920         /* if the object is no longer attached, discard its backing storage */
3921         if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
3922                 i915_gem_object_truncate(obj);
3923
3924         args->retained = obj->madv != __I915_MADV_PURGED;
3925
3926 out:
3927         i915_gem_object_put(obj);
3928 unlock:
3929         mutex_unlock(&dev->struct_mutex);
3930         return ret;
3931 }
3932
3933 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3934                           const struct drm_i915_gem_object_ops *ops)
3935 {
3936         int i;
3937
3938         INIT_LIST_HEAD(&obj->global_list);
3939         for (i = 0; i < I915_NUM_ENGINES; i++)
3940                 init_request_active(&obj->last_read[i],
3941                                     i915_gem_object_retire__read);
3942         init_request_active(&obj->last_write,
3943                             i915_gem_object_retire__write);
3944         init_request_active(&obj->last_fence, NULL);
3945         INIT_LIST_HEAD(&obj->obj_exec_link);
3946         INIT_LIST_HEAD(&obj->vma_list);
3947         INIT_LIST_HEAD(&obj->batch_pool_link);
3948
3949         obj->ops = ops;
3950
3951         obj->fence_reg = I915_FENCE_REG_NONE;
3952         obj->madv = I915_MADV_WILLNEED;
3953
3954         i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
3955 }
3956
3957 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3958         .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
3959         .get_pages = i915_gem_object_get_pages_gtt,
3960         .put_pages = i915_gem_object_put_pages_gtt,
3961 };
3962
3963 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
3964                                                   size_t size)
3965 {
3966         struct drm_i915_gem_object *obj;
3967         struct address_space *mapping;
3968         gfp_t mask;
3969         int ret;
3970
3971         obj = i915_gem_object_alloc(dev);
3972         if (obj == NULL)
3973                 return ERR_PTR(-ENOMEM);
3974
3975         ret = drm_gem_object_init(dev, &obj->base, size);
3976         if (ret)
3977                 goto fail;
3978
3979         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3980         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3981                 /* 965gm cannot relocate objects above 4GiB. */
3982                 mask &= ~__GFP_HIGHMEM;
3983                 mask |= __GFP_DMA32;
3984         }
3985
3986         mapping = file_inode(obj->base.filp)->i_mapping;
3987         mapping_set_gfp_mask(mapping, mask);
3988
3989         i915_gem_object_init(obj, &i915_gem_object_ops);
3990
3991         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3992         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3993
3994         if (HAS_LLC(dev)) {
3995                 /* On some devices, we can have the GPU use the LLC (the CPU
3996                  * cache) for about a 10% performance improvement
3997                  * compared to uncached.  Graphics requests other than
3998                  * display scanout are coherent with the CPU in
3999                  * accessing this cache.  This means in this mode we
4000                  * don't need to clflush on the CPU side, and on the
4001                  * GPU side we only need to flush internal caches to
4002                  * get data visible to the CPU.
4003                  *
4004                  * However, we maintain the display planes as UC, and so
4005                  * need to rebind when first used as such.
4006                  */
4007                 obj->cache_level = I915_CACHE_LLC;
4008         } else
4009                 obj->cache_level = I915_CACHE_NONE;
4010
4011         trace_i915_gem_object_create(obj);
4012
4013         return obj;
4014
4015 fail:
4016         i915_gem_object_free(obj);
4017
4018         return ERR_PTR(ret);
4019 }
4020
4021 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4022 {
4023         /* If we are the last user of the backing storage (be it shmemfs
4024          * pages or stolen etc), we know that the pages are going to be
4025          * immediately released. In this case, we can then skip copying
4026          * back the contents from the GPU.
4027          */
4028
4029         if (obj->madv != I915_MADV_WILLNEED)
4030                 return false;
4031
4032         if (obj->base.filp == NULL)
4033                 return true;
4034
4035         /* At first glance, this looks racy, but then again so would be
4036          * userspace racing mmap against close. However, the first external
4037          * reference to the filp can only be obtained through the
4038          * i915_gem_mmap_ioctl() which safeguards us against the user
4039          * acquiring such a reference whilst we are in the middle of
4040          * freeing the object.
4041          */
4042         return atomic_long_read(&obj->base.filp->f_count) == 1;
4043 }
4044
4045 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4046 {
4047         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4048         struct drm_device *dev = obj->base.dev;
4049         struct drm_i915_private *dev_priv = to_i915(dev);
4050         struct i915_vma *vma, *next;
4051
4052         intel_runtime_pm_get(dev_priv);
4053
4054         trace_i915_gem_object_destroy(obj);
4055
4056         /* All file-owned VMA should have been released by this point through
4057          * i915_gem_close_object(), or earlier by i915_gem_context_close().
4058          * However, the object may also be bound into the global GTT (e.g.
4059          * older GPUs without per-process support, or for direct access through
4060          * the GTT either for the user or for scanout). Those VMA still need to
4061          * unbound now.
4062          */
4063         list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4064                 GEM_BUG_ON(!vma->is_ggtt);
4065                 GEM_BUG_ON(i915_vma_is_active(vma));
4066                 vma->pin_count = 0;
4067                 i915_vma_close(vma);
4068         }
4069         GEM_BUG_ON(obj->bind_count);
4070
4071         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4072          * before progressing. */
4073         if (obj->stolen)
4074                 i915_gem_object_unpin_pages(obj);
4075
4076         WARN_ON(obj->frontbuffer_bits);
4077
4078         if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4079             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4080             obj->tiling_mode != I915_TILING_NONE)
4081                 i915_gem_object_unpin_pages(obj);
4082
4083         if (WARN_ON(obj->pages_pin_count))
4084                 obj->pages_pin_count = 0;
4085         if (discard_backing_storage(obj))
4086                 obj->madv = I915_MADV_DONTNEED;
4087         i915_gem_object_put_pages(obj);
4088
4089         BUG_ON(obj->pages);
4090
4091         if (obj->base.import_attach)
4092                 drm_prime_gem_destroy(&obj->base, NULL);
4093
4094         if (obj->ops->release)
4095                 obj->ops->release(obj);
4096
4097         drm_gem_object_release(&obj->base);
4098         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4099
4100         kfree(obj->bit_17);
4101         i915_gem_object_free(obj);
4102
4103         intel_runtime_pm_put(dev_priv);
4104 }
4105
4106 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4107                                      struct i915_address_space *vm)
4108 {
4109         struct i915_vma *vma;
4110         list_for_each_entry(vma, &obj->vma_list, obj_link) {
4111                 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4112                     vma->vm == vm)
4113                         return vma;
4114         }
4115         return NULL;
4116 }
4117
4118 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4119                                            const struct i915_ggtt_view *view)
4120 {
4121         struct i915_vma *vma;
4122
4123         GEM_BUG_ON(!view);
4124
4125         list_for_each_entry(vma, &obj->vma_list, obj_link)
4126                 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4127                         return vma;
4128         return NULL;
4129 }
4130
4131 static void
4132 i915_gem_stop_engines(struct drm_device *dev)
4133 {
4134         struct drm_i915_private *dev_priv = to_i915(dev);
4135         struct intel_engine_cs *engine;
4136
4137         for_each_engine(engine, dev_priv)
4138                 dev_priv->gt.stop_engine(engine);
4139 }
4140
4141 int
4142 i915_gem_suspend(struct drm_device *dev)
4143 {
4144         struct drm_i915_private *dev_priv = to_i915(dev);
4145         int ret = 0;
4146
4147         intel_suspend_gt_powersave(dev_priv);
4148
4149         mutex_lock(&dev->struct_mutex);
4150
4151         /* We have to flush all the executing contexts to main memory so
4152          * that they can saved in the hibernation image. To ensure the last
4153          * context image is coherent, we have to switch away from it. That
4154          * leaves the dev_priv->kernel_context still active when
4155          * we actually suspend, and its image in memory may not match the GPU
4156          * state. Fortunately, the kernel_context is disposable and we do
4157          * not rely on its state.
4158          */
4159         ret = i915_gem_switch_to_kernel_context(dev_priv);
4160         if (ret)
4161                 goto err;
4162
4163         ret = i915_gem_wait_for_idle(dev_priv);
4164         if (ret)
4165                 goto err;
4166
4167         i915_gem_retire_requests(dev_priv);
4168
4169         /* Note that rather than stopping the engines, all we have to do
4170          * is assert that every RING_HEAD == RING_TAIL (all execution complete)
4171          * and similar for all logical context images (to ensure they are
4172          * all ready for hibernation).
4173          */
4174         i915_gem_stop_engines(dev);
4175         i915_gem_context_lost(dev_priv);
4176         mutex_unlock(&dev->struct_mutex);
4177
4178         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4179         cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4180         flush_delayed_work(&dev_priv->gt.idle_work);
4181
4182         /* Assert that we sucessfully flushed all the work and
4183          * reset the GPU back to its idle, low power state.
4184          */
4185         WARN_ON(dev_priv->gt.awake);
4186
4187         return 0;
4188
4189 err:
4190         mutex_unlock(&dev->struct_mutex);
4191         return ret;
4192 }
4193
4194 void i915_gem_resume(struct drm_device *dev)
4195 {
4196         struct drm_i915_private *dev_priv = to_i915(dev);
4197
4198         mutex_lock(&dev->struct_mutex);
4199         i915_gem_restore_gtt_mappings(dev);
4200
4201         /* As we didn't flush the kernel context before suspend, we cannot
4202          * guarantee that the context image is complete. So let's just reset
4203          * it and start again.
4204          */
4205         if (i915.enable_execlists)
4206                 intel_lr_context_reset(dev_priv, dev_priv->kernel_context);
4207
4208         mutex_unlock(&dev->struct_mutex);
4209 }
4210
4211 void i915_gem_init_swizzling(struct drm_device *dev)
4212 {
4213         struct drm_i915_private *dev_priv = to_i915(dev);
4214
4215         if (INTEL_INFO(dev)->gen < 5 ||
4216             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4217                 return;
4218
4219         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4220                                  DISP_TILE_SURFACE_SWIZZLING);
4221
4222         if (IS_GEN5(dev))
4223                 return;
4224
4225         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4226         if (IS_GEN6(dev))
4227                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4228         else if (IS_GEN7(dev))
4229                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4230         else if (IS_GEN8(dev))
4231                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4232         else
4233                 BUG();
4234 }
4235
4236 static void init_unused_ring(struct drm_device *dev, u32 base)
4237 {
4238         struct drm_i915_private *dev_priv = to_i915(dev);
4239
4240         I915_WRITE(RING_CTL(base), 0);
4241         I915_WRITE(RING_HEAD(base), 0);
4242         I915_WRITE(RING_TAIL(base), 0);
4243         I915_WRITE(RING_START(base), 0);
4244 }
4245
4246 static void init_unused_rings(struct drm_device *dev)
4247 {
4248         if (IS_I830(dev)) {
4249                 init_unused_ring(dev, PRB1_BASE);
4250                 init_unused_ring(dev, SRB0_BASE);
4251                 init_unused_ring(dev, SRB1_BASE);
4252                 init_unused_ring(dev, SRB2_BASE);
4253                 init_unused_ring(dev, SRB3_BASE);
4254         } else if (IS_GEN2(dev)) {
4255                 init_unused_ring(dev, SRB0_BASE);
4256                 init_unused_ring(dev, SRB1_BASE);
4257         } else if (IS_GEN3(dev)) {
4258                 init_unused_ring(dev, PRB1_BASE);
4259                 init_unused_ring(dev, PRB2_BASE);
4260         }
4261 }
4262
4263 int
4264 i915_gem_init_hw(struct drm_device *dev)
4265 {
4266         struct drm_i915_private *dev_priv = to_i915(dev);
4267         struct intel_engine_cs *engine;
4268         int ret;
4269
4270         /* Double layer security blanket, see i915_gem_init() */
4271         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4272
4273         if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4274                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4275
4276         if (IS_HASWELL(dev))
4277                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4278                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4279
4280         if (HAS_PCH_NOP(dev)) {
4281                 if (IS_IVYBRIDGE(dev)) {
4282                         u32 temp = I915_READ(GEN7_MSG_CTL);
4283                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4284                         I915_WRITE(GEN7_MSG_CTL, temp);
4285                 } else if (INTEL_INFO(dev)->gen >= 7) {
4286                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4287                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4288                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4289                 }
4290         }
4291
4292         i915_gem_init_swizzling(dev);
4293
4294         /*
4295          * At least 830 can leave some of the unused rings
4296          * "active" (ie. head != tail) after resume which
4297          * will prevent c3 entry. Makes sure all unused rings
4298          * are totally idle.
4299          */
4300         init_unused_rings(dev);
4301
4302         BUG_ON(!dev_priv->kernel_context);
4303
4304         ret = i915_ppgtt_init_hw(dev);
4305         if (ret) {
4306                 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4307                 goto out;
4308         }
4309
4310         /* Need to do basic initialisation of all rings first: */
4311         for_each_engine(engine, dev_priv) {
4312                 ret = engine->init_hw(engine);
4313                 if (ret)
4314                         goto out;
4315         }
4316
4317         intel_mocs_init_l3cc_table(dev);
4318
4319         /* We can't enable contexts until all firmware is loaded */
4320         ret = intel_guc_setup(dev);
4321         if (ret)
4322                 goto out;
4323
4324 out:
4325         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4326         return ret;
4327 }
4328
4329 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4330 {
4331         if (INTEL_INFO(dev_priv)->gen < 6)
4332                 return false;
4333
4334         /* TODO: make semaphores and Execlists play nicely together */
4335         if (i915.enable_execlists)
4336                 return false;
4337
4338         if (value >= 0)
4339                 return value;
4340
4341 #ifdef CONFIG_INTEL_IOMMU
4342         /* Enable semaphores on SNB when IO remapping is off */
4343         if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4344                 return false;
4345 #endif
4346
4347         return true;
4348 }
4349
4350 int i915_gem_init(struct drm_device *dev)
4351 {
4352         struct drm_i915_private *dev_priv = to_i915(dev);
4353         int ret;
4354
4355         mutex_lock(&dev->struct_mutex);
4356
4357         if (!i915.enable_execlists) {
4358                 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4359                 dev_priv->gt.stop_engine = intel_engine_stop;
4360         } else {
4361                 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4362                 dev_priv->gt.stop_engine = intel_logical_ring_stop;
4363         }
4364
4365         /* This is just a security blanket to placate dragons.
4366          * On some systems, we very sporadically observe that the first TLBs
4367          * used by the CS may be stale, despite us poking the TLB reset. If
4368          * we hold the forcewake during initialisation these problems
4369          * just magically go away.
4370          */
4371         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4372
4373         i915_gem_init_userptr(dev_priv);
4374
4375         ret = i915_gem_init_ggtt(dev_priv);
4376         if (ret)
4377                 goto out_unlock;
4378
4379         ret = i915_gem_context_init(dev);
4380         if (ret)
4381                 goto out_unlock;
4382
4383         ret = intel_engines_init(dev);
4384         if (ret)
4385                 goto out_unlock;
4386
4387         ret = i915_gem_init_hw(dev);
4388         if (ret == -EIO) {
4389                 /* Allow engine initialisation to fail by marking the GPU as
4390                  * wedged. But we only want to do this where the GPU is angry,
4391                  * for all other failure, such as an allocation failure, bail.
4392                  */
4393                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4394                 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4395                 ret = 0;
4396         }
4397
4398 out_unlock:
4399         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4400         mutex_unlock(&dev->struct_mutex);
4401
4402         return ret;
4403 }
4404
4405 void
4406 i915_gem_cleanup_engines(struct drm_device *dev)
4407 {
4408         struct drm_i915_private *dev_priv = to_i915(dev);
4409         struct intel_engine_cs *engine;
4410
4411         for_each_engine(engine, dev_priv)
4412                 dev_priv->gt.cleanup_engine(engine);
4413 }
4414
4415 static void
4416 init_engine_lists(struct intel_engine_cs *engine)
4417 {
4418         INIT_LIST_HEAD(&engine->request_list);
4419 }
4420
4421 void
4422 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4423 {
4424         struct drm_device *dev = &dev_priv->drm;
4425
4426         if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4427             !IS_CHERRYVIEW(dev_priv))
4428                 dev_priv->num_fence_regs = 32;
4429         else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4430                  IS_I945GM(dev_priv) || IS_G33(dev_priv))
4431                 dev_priv->num_fence_regs = 16;
4432         else
4433                 dev_priv->num_fence_regs = 8;
4434
4435         if (intel_vgpu_active(dev_priv))
4436                 dev_priv->num_fence_regs =
4437                                 I915_READ(vgtif_reg(avail_rs.fence_num));
4438
4439         /* Initialize fence registers to zero */
4440         i915_gem_restore_fences(dev);
4441
4442         i915_gem_detect_bit_6_swizzle(dev);
4443 }
4444
4445 void
4446 i915_gem_load_init(struct drm_device *dev)
4447 {
4448         struct drm_i915_private *dev_priv = to_i915(dev);
4449         int i;
4450
4451         dev_priv->objects =
4452                 kmem_cache_create("i915_gem_object",
4453                                   sizeof(struct drm_i915_gem_object), 0,
4454                                   SLAB_HWCACHE_ALIGN,
4455                                   NULL);
4456         dev_priv->vmas =
4457                 kmem_cache_create("i915_gem_vma",
4458                                   sizeof(struct i915_vma), 0,
4459                                   SLAB_HWCACHE_ALIGN,
4460                                   NULL);
4461         dev_priv->requests =
4462                 kmem_cache_create("i915_gem_request",
4463                                   sizeof(struct drm_i915_gem_request), 0,
4464                                   SLAB_HWCACHE_ALIGN,
4465                                   NULL);
4466
4467         INIT_LIST_HEAD(&dev_priv->context_list);
4468         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4469         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4470         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4471         for (i = 0; i < I915_NUM_ENGINES; i++)
4472                 init_engine_lists(&dev_priv->engine[i]);
4473         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4474                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4475         INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4476                           i915_gem_retire_work_handler);
4477         INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4478                           i915_gem_idle_work_handler);
4479         init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4480         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4481
4482         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4483
4484         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4485
4486         init_waitqueue_head(&dev_priv->pending_flip_queue);
4487
4488         dev_priv->mm.interruptible = true;
4489
4490         mutex_init(&dev_priv->fb_tracking.lock);
4491 }
4492
4493 void i915_gem_load_cleanup(struct drm_device *dev)
4494 {
4495         struct drm_i915_private *dev_priv = to_i915(dev);
4496
4497         kmem_cache_destroy(dev_priv->requests);
4498         kmem_cache_destroy(dev_priv->vmas);
4499         kmem_cache_destroy(dev_priv->objects);
4500 }
4501
4502 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4503 {
4504         struct drm_i915_gem_object *obj;
4505
4506         /* Called just before we write the hibernation image.
4507          *
4508          * We need to update the domain tracking to reflect that the CPU
4509          * will be accessing all the pages to create and restore from the
4510          * hibernation, and so upon restoration those pages will be in the
4511          * CPU domain.
4512          *
4513          * To make sure the hibernation image contains the latest state,
4514          * we update that state just before writing out the image.
4515          */
4516
4517         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
4518                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4519                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4520         }
4521
4522         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4523                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4524                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4525         }
4526
4527         return 0;
4528 }
4529
4530 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4531 {
4532         struct drm_i915_file_private *file_priv = file->driver_priv;
4533         struct drm_i915_gem_request *request;
4534
4535         /* Clean up our request list when the client is going away, so that
4536          * later retire_requests won't dereference our soon-to-be-gone
4537          * file_priv.
4538          */
4539         spin_lock(&file_priv->mm.lock);
4540         list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4541                 request->file_priv = NULL;
4542         spin_unlock(&file_priv->mm.lock);
4543
4544         if (!list_empty(&file_priv->rps.link)) {
4545                 spin_lock(&to_i915(dev)->rps.client_lock);
4546                 list_del(&file_priv->rps.link);
4547                 spin_unlock(&to_i915(dev)->rps.client_lock);
4548         }
4549 }
4550
4551 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4552 {
4553         struct drm_i915_file_private *file_priv;
4554         int ret;
4555
4556         DRM_DEBUG_DRIVER("\n");
4557
4558         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4559         if (!file_priv)
4560                 return -ENOMEM;
4561
4562         file->driver_priv = file_priv;
4563         file_priv->dev_priv = to_i915(dev);
4564         file_priv->file = file;
4565         INIT_LIST_HEAD(&file_priv->rps.link);
4566
4567         spin_lock_init(&file_priv->mm.lock);
4568         INIT_LIST_HEAD(&file_priv->mm.request_list);
4569
4570         file_priv->bsd_engine = -1;
4571
4572         ret = i915_gem_context_open(dev, file);
4573         if (ret)
4574                 kfree(file_priv);
4575
4576         return ret;
4577 }
4578
4579 /**
4580  * i915_gem_track_fb - update frontbuffer tracking
4581  * @old: current GEM buffer for the frontbuffer slots
4582  * @new: new GEM buffer for the frontbuffer slots
4583  * @frontbuffer_bits: bitmask of frontbuffer slots
4584  *
4585  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4586  * from @old and setting them in @new. Both @old and @new can be NULL.
4587  */
4588 void i915_gem_track_fb(struct drm_i915_gem_object *old,
4589                        struct drm_i915_gem_object *new,
4590                        unsigned frontbuffer_bits)
4591 {
4592         if (old) {
4593                 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
4594                 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
4595                 old->frontbuffer_bits &= ~frontbuffer_bits;
4596         }
4597
4598         if (new) {
4599                 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
4600                 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
4601                 new->frontbuffer_bits |= frontbuffer_bits;
4602         }
4603 }
4604
4605 /* All the new VM stuff */
4606 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
4607                         struct i915_address_space *vm)
4608 {
4609         struct drm_i915_private *dev_priv = to_i915(o->base.dev);
4610         struct i915_vma *vma;
4611
4612         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
4613
4614         list_for_each_entry(vma, &o->vma_list, obj_link) {
4615                 if (vma->is_ggtt &&
4616                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4617                         continue;
4618                 if (vma->vm == vm)
4619                         return vma->node.start;
4620         }
4621
4622         WARN(1, "%s vma for this object not found.\n",
4623              i915_is_ggtt(vm) ? "global" : "ppgtt");
4624         return -1;
4625 }
4626
4627 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
4628                                   const struct i915_ggtt_view *view)
4629 {
4630         struct i915_vma *vma;
4631
4632         list_for_each_entry(vma, &o->vma_list, obj_link)
4633                 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4634                         return vma->node.start;
4635
4636         WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
4637         return -1;
4638 }
4639
4640 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4641                         struct i915_address_space *vm)
4642 {
4643         struct i915_vma *vma;
4644
4645         list_for_each_entry(vma, &o->vma_list, obj_link) {
4646                 if (vma->is_ggtt &&
4647                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4648                         continue;
4649                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4650                         return true;
4651         }
4652
4653         return false;
4654 }
4655
4656 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
4657                                   const struct i915_ggtt_view *view)
4658 {
4659         struct i915_vma *vma;
4660
4661         list_for_each_entry(vma, &o->vma_list, obj_link)
4662                 if (vma->is_ggtt &&
4663                     i915_ggtt_view_equal(&vma->ggtt_view, view) &&
4664                     drm_mm_node_allocated(&vma->node))
4665                         return true;
4666
4667         return false;
4668 }
4669
4670 unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
4671 {
4672         struct i915_vma *vma;
4673
4674         GEM_BUG_ON(list_empty(&o->vma_list));
4675
4676         list_for_each_entry(vma, &o->vma_list, obj_link) {
4677                 if (vma->is_ggtt &&
4678                     vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
4679                         return vma->node.size;
4680         }
4681
4682         return 0;
4683 }
4684
4685 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
4686 {
4687         struct i915_vma *vma;
4688         list_for_each_entry(vma, &obj->vma_list, obj_link)
4689                 if (i915_vma_is_pinned(vma))
4690                         return true;
4691
4692         return false;
4693 }
4694
4695 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
4696 struct page *
4697 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
4698 {
4699         struct page *page;
4700
4701         /* Only default objects have per-page dirty tracking */
4702         if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
4703                 return NULL;
4704
4705         page = i915_gem_object_get_page(obj, n);
4706         set_page_dirty(page);
4707         return page;
4708 }
4709
4710 /* Allocate a new GEM object and fill it with the supplied data */
4711 struct drm_i915_gem_object *
4712 i915_gem_object_create_from_data(struct drm_device *dev,
4713                                  const void *data, size_t size)
4714 {
4715         struct drm_i915_gem_object *obj;
4716         struct sg_table *sg;
4717         size_t bytes;
4718         int ret;
4719
4720         obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
4721         if (IS_ERR(obj))
4722                 return obj;
4723
4724         ret = i915_gem_object_set_to_cpu_domain(obj, true);
4725         if (ret)
4726                 goto fail;
4727
4728         ret = i915_gem_object_get_pages(obj);
4729         if (ret)
4730                 goto fail;
4731
4732         i915_gem_object_pin_pages(obj);
4733         sg = obj->pages;
4734         bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
4735         obj->dirty = 1;         /* Backing store is now out of date */
4736         i915_gem_object_unpin_pages(obj);
4737
4738         if (WARN_ON(bytes != size)) {
4739                 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4740                 ret = -EFAULT;
4741                 goto fail;
4742         }
4743
4744         return obj;
4745
4746 fail:
4747         i915_gem_object_put(obj);
4748         return ERR_PTR(ret);
4749 }