Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
1 /*
2  * Copyright © 2008,2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/dma_remapping.h>
35
36 struct eb_objects {
37         int and;
38         struct hlist_head buckets[0];
39 };
40
41 static struct eb_objects *
42 eb_create(int size)
43 {
44         struct eb_objects *eb;
45         int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
46         BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
47         while (count > size)
48                 count >>= 1;
49         eb = kzalloc(count*sizeof(struct hlist_head) +
50                      sizeof(struct eb_objects),
51                      GFP_KERNEL);
52         if (eb == NULL)
53                 return eb;
54
55         eb->and = count - 1;
56         return eb;
57 }
58
59 static void
60 eb_reset(struct eb_objects *eb)
61 {
62         memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
63 }
64
65 static void
66 eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
67 {
68         hlist_add_head(&obj->exec_node,
69                        &eb->buckets[obj->exec_handle & eb->and]);
70 }
71
72 static struct drm_i915_gem_object *
73 eb_get_object(struct eb_objects *eb, unsigned long handle)
74 {
75         struct hlist_head *head;
76         struct hlist_node *node;
77         struct drm_i915_gem_object *obj;
78
79         head = &eb->buckets[handle & eb->and];
80         hlist_for_each(node, head) {
81                 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
82                 if (obj->exec_handle == handle)
83                         return obj;
84         }
85
86         return NULL;
87 }
88
89 static void
90 eb_destroy(struct eb_objects *eb)
91 {
92         kfree(eb);
93 }
94
95 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
96 {
97         return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
98                 !obj->map_and_fenceable ||
99                 obj->cache_level != I915_CACHE_NONE);
100 }
101
102 static int
103 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
104                                    struct eb_objects *eb,
105                                    struct drm_i915_gem_relocation_entry *reloc)
106 {
107         struct drm_device *dev = obj->base.dev;
108         struct drm_gem_object *target_obj;
109         struct drm_i915_gem_object *target_i915_obj;
110         uint32_t target_offset;
111         int ret = -EINVAL;
112
113         /* we've already hold a reference to all valid objects */
114         target_obj = &eb_get_object(eb, reloc->target_handle)->base;
115         if (unlikely(target_obj == NULL))
116                 return -ENOENT;
117
118         target_i915_obj = to_intel_bo(target_obj);
119         target_offset = target_i915_obj->gtt_offset;
120
121         /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
122          * pipe_control writes because the gpu doesn't properly redirect them
123          * through the ppgtt for non_secure batchbuffers. */
124         if (unlikely(IS_GEN6(dev) &&
125             reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
126             !target_i915_obj->has_global_gtt_mapping)) {
127                 i915_gem_gtt_bind_object(target_i915_obj,
128                                          target_i915_obj->cache_level);
129         }
130
131         /* Validate that the target is in a valid r/w GPU domain */
132         if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
133                 DRM_DEBUG("reloc with multiple write domains: "
134                           "obj %p target %d offset %d "
135                           "read %08x write %08x",
136                           obj, reloc->target_handle,
137                           (int) reloc->offset,
138                           reloc->read_domains,
139                           reloc->write_domain);
140                 return ret;
141         }
142         if (unlikely((reloc->write_domain | reloc->read_domains)
143                      & ~I915_GEM_GPU_DOMAINS)) {
144                 DRM_DEBUG("reloc with read/write non-GPU domains: "
145                           "obj %p target %d offset %d "
146                           "read %08x write %08x",
147                           obj, reloc->target_handle,
148                           (int) reloc->offset,
149                           reloc->read_domains,
150                           reloc->write_domain);
151                 return ret;
152         }
153         if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
154                      reloc->write_domain != target_obj->pending_write_domain)) {
155                 DRM_DEBUG("Write domain conflict: "
156                           "obj %p target %d offset %d "
157                           "new %08x old %08x\n",
158                           obj, reloc->target_handle,
159                           (int) reloc->offset,
160                           reloc->write_domain,
161                           target_obj->pending_write_domain);
162                 return ret;
163         }
164
165         target_obj->pending_read_domains |= reloc->read_domains;
166         target_obj->pending_write_domain |= reloc->write_domain;
167
168         /* If the relocation already has the right value in it, no
169          * more work needs to be done.
170          */
171         if (target_offset == reloc->presumed_offset)
172                 return 0;
173
174         /* Check that the relocation address is valid... */
175         if (unlikely(reloc->offset > obj->base.size - 4)) {
176                 DRM_DEBUG("Relocation beyond object bounds: "
177                           "obj %p target %d offset %d size %d.\n",
178                           obj, reloc->target_handle,
179                           (int) reloc->offset,
180                           (int) obj->base.size);
181                 return ret;
182         }
183         if (unlikely(reloc->offset & 3)) {
184                 DRM_DEBUG("Relocation not 4-byte aligned: "
185                           "obj %p target %d offset %d.\n",
186                           obj, reloc->target_handle,
187                           (int) reloc->offset);
188                 return ret;
189         }
190
191         /* We can't wait for rendering with pagefaults disabled */
192         if (obj->active && in_atomic())
193                 return -EFAULT;
194
195         reloc->delta += target_offset;
196         if (use_cpu_reloc(obj)) {
197                 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
198                 char *vaddr;
199
200                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
201                 if (ret)
202                         return ret;
203
204                 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
205                                                              reloc->offset >> PAGE_SHIFT));
206                 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
207                 kunmap_atomic(vaddr);
208         } else {
209                 struct drm_i915_private *dev_priv = dev->dev_private;
210                 uint32_t __iomem *reloc_entry;
211                 void __iomem *reloc_page;
212
213                 ret = i915_gem_object_set_to_gtt_domain(obj, true);
214                 if (ret)
215                         return ret;
216
217                 ret = i915_gem_object_put_fence(obj);
218                 if (ret)
219                         return ret;
220
221                 /* Map the page containing the relocation we're going to perform.  */
222                 reloc->offset += obj->gtt_offset;
223                 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
224                                                       reloc->offset & PAGE_MASK);
225                 reloc_entry = (uint32_t __iomem *)
226                         (reloc_page + (reloc->offset & ~PAGE_MASK));
227                 iowrite32(reloc->delta, reloc_entry);
228                 io_mapping_unmap_atomic(reloc_page);
229         }
230
231         /* and update the user's relocation entry */
232         reloc->presumed_offset = target_offset;
233
234         return 0;
235 }
236
237 static int
238 i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
239                                     struct eb_objects *eb)
240 {
241 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
242         struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
243         struct drm_i915_gem_relocation_entry __user *user_relocs;
244         struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
245         int remain, ret;
246
247         user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
248
249         remain = entry->relocation_count;
250         while (remain) {
251                 struct drm_i915_gem_relocation_entry *r = stack_reloc;
252                 int count = remain;
253                 if (count > ARRAY_SIZE(stack_reloc))
254                         count = ARRAY_SIZE(stack_reloc);
255                 remain -= count;
256
257                 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
258                         return -EFAULT;
259
260                 do {
261                         u64 offset = r->presumed_offset;
262
263                         ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
264                         if (ret)
265                                 return ret;
266
267                         if (r->presumed_offset != offset &&
268                             __copy_to_user_inatomic(&user_relocs->presumed_offset,
269                                                     &r->presumed_offset,
270                                                     sizeof(r->presumed_offset))) {
271                                 return -EFAULT;
272                         }
273
274                         user_relocs++;
275                         r++;
276                 } while (--count);
277         }
278
279         return 0;
280 #undef N_RELOC
281 }
282
283 static int
284 i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
285                                          struct eb_objects *eb,
286                                          struct drm_i915_gem_relocation_entry *relocs)
287 {
288         const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
289         int i, ret;
290
291         for (i = 0; i < entry->relocation_count; i++) {
292                 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
293                 if (ret)
294                         return ret;
295         }
296
297         return 0;
298 }
299
300 static int
301 i915_gem_execbuffer_relocate(struct drm_device *dev,
302                              struct eb_objects *eb,
303                              struct list_head *objects)
304 {
305         struct drm_i915_gem_object *obj;
306         int ret = 0;
307
308         /* This is the fast path and we cannot handle a pagefault whilst
309          * holding the struct mutex lest the user pass in the relocations
310          * contained within a mmaped bo. For in such a case we, the page
311          * fault handler would call i915_gem_fault() and we would try to
312          * acquire the struct mutex again. Obviously this is bad and so
313          * lockdep complains vehemently.
314          */
315         pagefault_disable();
316         list_for_each_entry(obj, objects, exec_list) {
317                 ret = i915_gem_execbuffer_relocate_object(obj, eb);
318                 if (ret)
319                         break;
320         }
321         pagefault_enable();
322
323         return ret;
324 }
325
326 #define  __EXEC_OBJECT_HAS_PIN (1<<31)
327 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
328
329 static int
330 need_reloc_mappable(struct drm_i915_gem_object *obj)
331 {
332         struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
333         return entry->relocation_count && !use_cpu_reloc(obj);
334 }
335
336 static int
337 i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
338                                    struct intel_ring_buffer *ring)
339 {
340         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
341         struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
342         bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
343         bool need_fence, need_mappable;
344         int ret;
345
346         need_fence =
347                 has_fenced_gpu_access &&
348                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
349                 obj->tiling_mode != I915_TILING_NONE;
350         need_mappable = need_fence || need_reloc_mappable(obj);
351
352         ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
353         if (ret)
354                 return ret;
355
356         entry->flags |= __EXEC_OBJECT_HAS_PIN;
357
358         if (has_fenced_gpu_access) {
359                 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
360                         ret = i915_gem_object_get_fence(obj);
361                         if (ret)
362                                 return ret;
363
364                         if (i915_gem_object_pin_fence(obj))
365                                 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
366
367                         obj->pending_fenced_gpu_access = true;
368                 }
369         }
370
371         /* Ensure ppgtt mapping exists if needed */
372         if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
373                 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
374                                        obj, obj->cache_level);
375
376                 obj->has_aliasing_ppgtt_mapping = 1;
377         }
378
379         entry->offset = obj->gtt_offset;
380         return 0;
381 }
382
383 static void
384 i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
385 {
386         struct drm_i915_gem_exec_object2 *entry;
387
388         if (!obj->gtt_space)
389                 return;
390
391         entry = obj->exec_entry;
392
393         if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
394                 i915_gem_object_unpin_fence(obj);
395
396         if (entry->flags & __EXEC_OBJECT_HAS_PIN)
397                 i915_gem_object_unpin(obj);
398
399         entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
400 }
401
402 static int
403 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
404                             struct drm_file *file,
405                             struct list_head *objects)
406 {
407         struct drm_i915_gem_object *obj;
408         struct list_head ordered_objects;
409         bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
410         int retry;
411
412         INIT_LIST_HEAD(&ordered_objects);
413         while (!list_empty(objects)) {
414                 struct drm_i915_gem_exec_object2 *entry;
415                 bool need_fence, need_mappable;
416
417                 obj = list_first_entry(objects,
418                                        struct drm_i915_gem_object,
419                                        exec_list);
420                 entry = obj->exec_entry;
421
422                 need_fence =
423                         has_fenced_gpu_access &&
424                         entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
425                         obj->tiling_mode != I915_TILING_NONE;
426                 need_mappable = need_fence || need_reloc_mappable(obj);
427
428                 if (need_mappable)
429                         list_move(&obj->exec_list, &ordered_objects);
430                 else
431                         list_move_tail(&obj->exec_list, &ordered_objects);
432
433                 obj->base.pending_read_domains = 0;
434                 obj->base.pending_write_domain = 0;
435                 obj->pending_fenced_gpu_access = false;
436         }
437         list_splice(&ordered_objects, objects);
438
439         /* Attempt to pin all of the buffers into the GTT.
440          * This is done in 3 phases:
441          *
442          * 1a. Unbind all objects that do not match the GTT constraints for
443          *     the execbuffer (fenceable, mappable, alignment etc).
444          * 1b. Increment pin count for already bound objects.
445          * 2.  Bind new objects.
446          * 3.  Decrement pin count.
447          *
448          * This avoid unnecessary unbinding of later objects in order to make
449          * room for the earlier objects *unless* we need to defragment.
450          */
451         retry = 0;
452         do {
453                 int ret = 0;
454
455                 /* Unbind any ill-fitting objects or pin. */
456                 list_for_each_entry(obj, objects, exec_list) {
457                         struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
458                         bool need_fence, need_mappable;
459
460                         if (!obj->gtt_space)
461                                 continue;
462
463                         need_fence =
464                                 has_fenced_gpu_access &&
465                                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
466                                 obj->tiling_mode != I915_TILING_NONE;
467                         need_mappable = need_fence || need_reloc_mappable(obj);
468
469                         if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
470                             (need_mappable && !obj->map_and_fenceable))
471                                 ret = i915_gem_object_unbind(obj);
472                         else
473                                 ret = i915_gem_execbuffer_reserve_object(obj, ring);
474                         if (ret)
475                                 goto err;
476                 }
477
478                 /* Bind fresh objects */
479                 list_for_each_entry(obj, objects, exec_list) {
480                         if (obj->gtt_space)
481                                 continue;
482
483                         ret = i915_gem_execbuffer_reserve_object(obj, ring);
484                         if (ret)
485                                 goto err;
486                 }
487
488 err:            /* Decrement pin count for bound objects */
489                 list_for_each_entry(obj, objects, exec_list)
490                         i915_gem_execbuffer_unreserve_object(obj);
491
492                 if (ret != -ENOSPC || retry++)
493                         return ret;
494
495                 ret = i915_gem_evict_everything(ring->dev);
496                 if (ret)
497                         return ret;
498         } while (1);
499 }
500
501 static int
502 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
503                                   struct drm_file *file,
504                                   struct intel_ring_buffer *ring,
505                                   struct list_head *objects,
506                                   struct eb_objects *eb,
507                                   struct drm_i915_gem_exec_object2 *exec,
508                                   int count)
509 {
510         struct drm_i915_gem_relocation_entry *reloc;
511         struct drm_i915_gem_object *obj;
512         int *reloc_offset;
513         int i, total, ret;
514
515         /* We may process another execbuffer during the unlock... */
516         while (!list_empty(objects)) {
517                 obj = list_first_entry(objects,
518                                        struct drm_i915_gem_object,
519                                        exec_list);
520                 list_del_init(&obj->exec_list);
521                 drm_gem_object_unreference(&obj->base);
522         }
523
524         mutex_unlock(&dev->struct_mutex);
525
526         total = 0;
527         for (i = 0; i < count; i++)
528                 total += exec[i].relocation_count;
529
530         reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
531         reloc = drm_malloc_ab(total, sizeof(*reloc));
532         if (reloc == NULL || reloc_offset == NULL) {
533                 drm_free_large(reloc);
534                 drm_free_large(reloc_offset);
535                 mutex_lock(&dev->struct_mutex);
536                 return -ENOMEM;
537         }
538
539         total = 0;
540         for (i = 0; i < count; i++) {
541                 struct drm_i915_gem_relocation_entry __user *user_relocs;
542
543                 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
544
545                 if (copy_from_user(reloc+total, user_relocs,
546                                    exec[i].relocation_count * sizeof(*reloc))) {
547                         ret = -EFAULT;
548                         mutex_lock(&dev->struct_mutex);
549                         goto err;
550                 }
551
552                 reloc_offset[i] = total;
553                 total += exec[i].relocation_count;
554         }
555
556         ret = i915_mutex_lock_interruptible(dev);
557         if (ret) {
558                 mutex_lock(&dev->struct_mutex);
559                 goto err;
560         }
561
562         /* reacquire the objects */
563         eb_reset(eb);
564         for (i = 0; i < count; i++) {
565                 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
566                                                         exec[i].handle));
567                 if (&obj->base == NULL) {
568                         DRM_DEBUG("Invalid object handle %d at index %d\n",
569                                    exec[i].handle, i);
570                         ret = -ENOENT;
571                         goto err;
572                 }
573
574                 list_add_tail(&obj->exec_list, objects);
575                 obj->exec_handle = exec[i].handle;
576                 obj->exec_entry = &exec[i];
577                 eb_add_object(eb, obj);
578         }
579
580         ret = i915_gem_execbuffer_reserve(ring, file, objects);
581         if (ret)
582                 goto err;
583
584         list_for_each_entry(obj, objects, exec_list) {
585                 int offset = obj->exec_entry - exec;
586                 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
587                                                                reloc + reloc_offset[offset]);
588                 if (ret)
589                         goto err;
590         }
591
592         /* Leave the user relocations as are, this is the painfully slow path,
593          * and we want to avoid the complication of dropping the lock whilst
594          * having buffers reserved in the aperture and so causing spurious
595          * ENOSPC for random operations.
596          */
597
598 err:
599         drm_free_large(reloc);
600         drm_free_large(reloc_offset);
601         return ret;
602 }
603
604 static int
605 i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
606 {
607         u32 plane, flip_mask;
608         int ret;
609
610         /* Check for any pending flips. As we only maintain a flip queue depth
611          * of 1, we can simply insert a WAIT for the next display flip prior
612          * to executing the batch and avoid stalling the CPU.
613          */
614
615         for (plane = 0; flips >> plane; plane++) {
616                 if (((flips >> plane) & 1) == 0)
617                         continue;
618
619                 if (plane)
620                         flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
621                 else
622                         flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
623
624                 ret = intel_ring_begin(ring, 2);
625                 if (ret)
626                         return ret;
627
628                 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
629                 intel_ring_emit(ring, MI_NOOP);
630                 intel_ring_advance(ring);
631         }
632
633         return 0;
634 }
635
636 static int
637 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
638                                 struct list_head *objects)
639 {
640         struct drm_i915_gem_object *obj;
641         uint32_t flush_domains = 0;
642         uint32_t flips = 0;
643         int ret;
644
645         list_for_each_entry(obj, objects, exec_list) {
646                 ret = i915_gem_object_sync(obj, ring);
647                 if (ret)
648                         return ret;
649
650                 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
651                         i915_gem_clflush_object(obj);
652
653                 if (obj->base.pending_write_domain)
654                         flips |= atomic_read(&obj->pending_flip);
655
656                 flush_domains |= obj->base.write_domain;
657         }
658
659         if (flips) {
660                 ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
661                 if (ret)
662                         return ret;
663         }
664
665         if (flush_domains & I915_GEM_DOMAIN_CPU)
666                 i915_gem_chipset_flush(ring->dev);
667
668         if (flush_domains & I915_GEM_DOMAIN_GTT)
669                 wmb();
670
671         /* Unconditionally invalidate gpu caches and ensure that we do flush
672          * any residual writes from the previous batch.
673          */
674         return intel_ring_invalidate_all_caches(ring);
675 }
676
677 static bool
678 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
679 {
680         return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
681 }
682
683 static int
684 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
685                    int count)
686 {
687         int i;
688
689         for (i = 0; i < count; i++) {
690                 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
691                 int length; /* limited by fault_in_pages_readable() */
692
693                 /* First check for malicious input causing overflow */
694                 if (exec[i].relocation_count >
695                     INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
696                         return -EINVAL;
697
698                 length = exec[i].relocation_count *
699                         sizeof(struct drm_i915_gem_relocation_entry);
700                 if (!access_ok(VERIFY_READ, ptr, length))
701                         return -EFAULT;
702
703                 /* we may also need to update the presumed offsets */
704                 if (!access_ok(VERIFY_WRITE, ptr, length))
705                         return -EFAULT;
706
707                 if (fault_in_multipages_readable(ptr, length))
708                         return -EFAULT;
709         }
710
711         return 0;
712 }
713
714 static void
715 i915_gem_execbuffer_move_to_active(struct list_head *objects,
716                                    struct intel_ring_buffer *ring)
717 {
718         struct drm_i915_gem_object *obj;
719
720         list_for_each_entry(obj, objects, exec_list) {
721                 u32 old_read = obj->base.read_domains;
722                 u32 old_write = obj->base.write_domain;
723
724                 obj->base.read_domains = obj->base.pending_read_domains;
725                 obj->base.write_domain = obj->base.pending_write_domain;
726                 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
727
728                 i915_gem_object_move_to_active(obj, ring);
729                 if (obj->base.write_domain) {
730                         obj->dirty = 1;
731                         obj->last_write_seqno = intel_ring_get_seqno(ring);
732                         if (obj->pin_count) /* check for potential scanout */
733                                 intel_mark_fb_busy(obj);
734                 }
735
736                 trace_i915_gem_object_change_domain(obj, old_read, old_write);
737         }
738 }
739
740 static void
741 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
742                                     struct drm_file *file,
743                                     struct intel_ring_buffer *ring)
744 {
745         /* Unconditionally force add_request to emit a full flush. */
746         ring->gpu_caches_dirty = true;
747
748         /* Add a breadcrumb for the completion of the batch buffer */
749         (void)i915_add_request(ring, file, NULL);
750 }
751
752 static int
753 i915_reset_gen7_sol_offsets(struct drm_device *dev,
754                             struct intel_ring_buffer *ring)
755 {
756         drm_i915_private_t *dev_priv = dev->dev_private;
757         int ret, i;
758
759         if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
760                 return 0;
761
762         ret = intel_ring_begin(ring, 4 * 3);
763         if (ret)
764                 return ret;
765
766         for (i = 0; i < 4; i++) {
767                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
768                 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
769                 intel_ring_emit(ring, 0);
770         }
771
772         intel_ring_advance(ring);
773
774         return 0;
775 }
776
777 static int
778 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
779                        struct drm_file *file,
780                        struct drm_i915_gem_execbuffer2 *args,
781                        struct drm_i915_gem_exec_object2 *exec)
782 {
783         drm_i915_private_t *dev_priv = dev->dev_private;
784         struct list_head objects;
785         struct eb_objects *eb;
786         struct drm_i915_gem_object *batch_obj;
787         struct drm_clip_rect *cliprects = NULL;
788         struct intel_ring_buffer *ring;
789         u32 ctx_id = i915_execbuffer2_get_context_id(*args);
790         u32 exec_start, exec_len;
791         u32 mask;
792         u32 flags;
793         int ret, mode, i;
794
795         if (!i915_gem_check_execbuffer(args)) {
796                 DRM_DEBUG("execbuf with invalid offset/length\n");
797                 return -EINVAL;
798         }
799
800         ret = validate_exec_list(exec, args->buffer_count);
801         if (ret)
802                 return ret;
803
804         flags = 0;
805         if (args->flags & I915_EXEC_SECURE) {
806                 if (!file->is_master || !capable(CAP_SYS_ADMIN))
807                     return -EPERM;
808
809                 flags |= I915_DISPATCH_SECURE;
810         }
811
812         switch (args->flags & I915_EXEC_RING_MASK) {
813         case I915_EXEC_DEFAULT:
814         case I915_EXEC_RENDER:
815                 ring = &dev_priv->ring[RCS];
816                 break;
817         case I915_EXEC_BSD:
818                 ring = &dev_priv->ring[VCS];
819                 if (ctx_id != 0) {
820                         DRM_DEBUG("Ring %s doesn't support contexts\n",
821                                   ring->name);
822                         return -EPERM;
823                 }
824                 break;
825         case I915_EXEC_BLT:
826                 ring = &dev_priv->ring[BCS];
827                 if (ctx_id != 0) {
828                         DRM_DEBUG("Ring %s doesn't support contexts\n",
829                                   ring->name);
830                         return -EPERM;
831                 }
832                 break;
833         default:
834                 DRM_DEBUG("execbuf with unknown ring: %d\n",
835                           (int)(args->flags & I915_EXEC_RING_MASK));
836                 return -EINVAL;
837         }
838         if (!intel_ring_initialized(ring)) {
839                 DRM_DEBUG("execbuf with invalid ring: %d\n",
840                           (int)(args->flags & I915_EXEC_RING_MASK));
841                 return -EINVAL;
842         }
843
844         mode = args->flags & I915_EXEC_CONSTANTS_MASK;
845         mask = I915_EXEC_CONSTANTS_MASK;
846         switch (mode) {
847         case I915_EXEC_CONSTANTS_REL_GENERAL:
848         case I915_EXEC_CONSTANTS_ABSOLUTE:
849         case I915_EXEC_CONSTANTS_REL_SURFACE:
850                 if (ring == &dev_priv->ring[RCS] &&
851                     mode != dev_priv->relative_constants_mode) {
852                         if (INTEL_INFO(dev)->gen < 4)
853                                 return -EINVAL;
854
855                         if (INTEL_INFO(dev)->gen > 5 &&
856                             mode == I915_EXEC_CONSTANTS_REL_SURFACE)
857                                 return -EINVAL;
858
859                         /* The HW changed the meaning on this bit on gen6 */
860                         if (INTEL_INFO(dev)->gen >= 6)
861                                 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
862                 }
863                 break;
864         default:
865                 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
866                 return -EINVAL;
867         }
868
869         if (args->buffer_count < 1) {
870                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
871                 return -EINVAL;
872         }
873
874         if (args->num_cliprects != 0) {
875                 if (ring != &dev_priv->ring[RCS]) {
876                         DRM_DEBUG("clip rectangles are only valid with the render ring\n");
877                         return -EINVAL;
878                 }
879
880                 if (INTEL_INFO(dev)->gen >= 5) {
881                         DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
882                         return -EINVAL;
883                 }
884
885                 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
886                         DRM_DEBUG("execbuf with %u cliprects\n",
887                                   args->num_cliprects);
888                         return -EINVAL;
889                 }
890
891                 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
892                                     GFP_KERNEL);
893                 if (cliprects == NULL) {
894                         ret = -ENOMEM;
895                         goto pre_mutex_err;
896                 }
897
898                 if (copy_from_user(cliprects,
899                                      (struct drm_clip_rect __user *)(uintptr_t)
900                                      args->cliprects_ptr,
901                                      sizeof(*cliprects)*args->num_cliprects)) {
902                         ret = -EFAULT;
903                         goto pre_mutex_err;
904                 }
905         }
906
907         ret = i915_mutex_lock_interruptible(dev);
908         if (ret)
909                 goto pre_mutex_err;
910
911         if (dev_priv->mm.suspended) {
912                 mutex_unlock(&dev->struct_mutex);
913                 ret = -EBUSY;
914                 goto pre_mutex_err;
915         }
916
917         eb = eb_create(args->buffer_count);
918         if (eb == NULL) {
919                 mutex_unlock(&dev->struct_mutex);
920                 ret = -ENOMEM;
921                 goto pre_mutex_err;
922         }
923
924         /* Look up object handles */
925         INIT_LIST_HEAD(&objects);
926         for (i = 0; i < args->buffer_count; i++) {
927                 struct drm_i915_gem_object *obj;
928
929                 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
930                                                         exec[i].handle));
931                 if (&obj->base == NULL) {
932                         DRM_DEBUG("Invalid object handle %d at index %d\n",
933                                    exec[i].handle, i);
934                         /* prevent error path from reading uninitialized data */
935                         ret = -ENOENT;
936                         goto err;
937                 }
938
939                 if (!list_empty(&obj->exec_list)) {
940                         DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
941                                    obj, exec[i].handle, i);
942                         ret = -EINVAL;
943                         goto err;
944                 }
945
946                 list_add_tail(&obj->exec_list, &objects);
947                 obj->exec_handle = exec[i].handle;
948                 obj->exec_entry = &exec[i];
949                 eb_add_object(eb, obj);
950         }
951
952         /* take note of the batch buffer before we might reorder the lists */
953         batch_obj = list_entry(objects.prev,
954                                struct drm_i915_gem_object,
955                                exec_list);
956
957         /* Move the objects en-masse into the GTT, evicting if necessary. */
958         ret = i915_gem_execbuffer_reserve(ring, file, &objects);
959         if (ret)
960                 goto err;
961
962         /* The objects are in their final locations, apply the relocations. */
963         ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
964         if (ret) {
965                 if (ret == -EFAULT) {
966                         ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
967                                                                 &objects, eb,
968                                                                 exec,
969                                                                 args->buffer_count);
970                         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
971                 }
972                 if (ret)
973                         goto err;
974         }
975
976         /* Set the pending read domains for the batch buffer to COMMAND */
977         if (batch_obj->base.pending_write_domain) {
978                 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
979                 ret = -EINVAL;
980                 goto err;
981         }
982         batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
983
984         /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
985          * batch" bit. Hence we need to pin secure batches into the global gtt.
986          * hsw should have this fixed, but let's be paranoid and do it
987          * unconditionally for now. */
988         if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
989                 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
990
991         ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
992         if (ret)
993                 goto err;
994
995         ret = i915_switch_context(ring, file, ctx_id);
996         if (ret)
997                 goto err;
998
999         if (ring == &dev_priv->ring[RCS] &&
1000             mode != dev_priv->relative_constants_mode) {
1001                 ret = intel_ring_begin(ring, 4);
1002                 if (ret)
1003                                 goto err;
1004
1005                 intel_ring_emit(ring, MI_NOOP);
1006                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1007                 intel_ring_emit(ring, INSTPM);
1008                 intel_ring_emit(ring, mask << 16 | mode);
1009                 intel_ring_advance(ring);
1010
1011                 dev_priv->relative_constants_mode = mode;
1012         }
1013
1014         if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1015                 ret = i915_reset_gen7_sol_offsets(dev, ring);
1016                 if (ret)
1017                         goto err;
1018         }
1019
1020         exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1021         exec_len = args->batch_len;
1022         if (cliprects) {
1023                 for (i = 0; i < args->num_cliprects; i++) {
1024                         ret = i915_emit_box(dev, &cliprects[i],
1025                                             args->DR1, args->DR4);
1026                         if (ret)
1027                                 goto err;
1028
1029                         ret = ring->dispatch_execbuffer(ring,
1030                                                         exec_start, exec_len,
1031                                                         flags);
1032                         if (ret)
1033                                 goto err;
1034                 }
1035         } else {
1036                 ret = ring->dispatch_execbuffer(ring,
1037                                                 exec_start, exec_len,
1038                                                 flags);
1039                 if (ret)
1040                         goto err;
1041         }
1042
1043         trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1044
1045         i915_gem_execbuffer_move_to_active(&objects, ring);
1046         i915_gem_execbuffer_retire_commands(dev, file, ring);
1047
1048 err:
1049         eb_destroy(eb);
1050         while (!list_empty(&objects)) {
1051                 struct drm_i915_gem_object *obj;
1052
1053                 obj = list_first_entry(&objects,
1054                                        struct drm_i915_gem_object,
1055                                        exec_list);
1056                 list_del_init(&obj->exec_list);
1057                 drm_gem_object_unreference(&obj->base);
1058         }
1059
1060         mutex_unlock(&dev->struct_mutex);
1061
1062 pre_mutex_err:
1063         kfree(cliprects);
1064         return ret;
1065 }
1066
1067 /*
1068  * Legacy execbuffer just creates an exec2 list from the original exec object
1069  * list array and passes it to the real function.
1070  */
1071 int
1072 i915_gem_execbuffer(struct drm_device *dev, void *data,
1073                     struct drm_file *file)
1074 {
1075         struct drm_i915_gem_execbuffer *args = data;
1076         struct drm_i915_gem_execbuffer2 exec2;
1077         struct drm_i915_gem_exec_object *exec_list = NULL;
1078         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1079         int ret, i;
1080
1081         if (args->buffer_count < 1) {
1082                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1083                 return -EINVAL;
1084         }
1085
1086         /* Copy in the exec list from userland */
1087         exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1088         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1089         if (exec_list == NULL || exec2_list == NULL) {
1090                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1091                           args->buffer_count);
1092                 drm_free_large(exec_list);
1093                 drm_free_large(exec2_list);
1094                 return -ENOMEM;
1095         }
1096         ret = copy_from_user(exec_list,
1097                              (void __user *)(uintptr_t)args->buffers_ptr,
1098                              sizeof(*exec_list) * args->buffer_count);
1099         if (ret != 0) {
1100                 DRM_DEBUG("copy %d exec entries failed %d\n",
1101                           args->buffer_count, ret);
1102                 drm_free_large(exec_list);
1103                 drm_free_large(exec2_list);
1104                 return -EFAULT;
1105         }
1106
1107         for (i = 0; i < args->buffer_count; i++) {
1108                 exec2_list[i].handle = exec_list[i].handle;
1109                 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1110                 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1111                 exec2_list[i].alignment = exec_list[i].alignment;
1112                 exec2_list[i].offset = exec_list[i].offset;
1113                 if (INTEL_INFO(dev)->gen < 4)
1114                         exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1115                 else
1116                         exec2_list[i].flags = 0;
1117         }
1118
1119         exec2.buffers_ptr = args->buffers_ptr;
1120         exec2.buffer_count = args->buffer_count;
1121         exec2.batch_start_offset = args->batch_start_offset;
1122         exec2.batch_len = args->batch_len;
1123         exec2.DR1 = args->DR1;
1124         exec2.DR4 = args->DR4;
1125         exec2.num_cliprects = args->num_cliprects;
1126         exec2.cliprects_ptr = args->cliprects_ptr;
1127         exec2.flags = I915_EXEC_RENDER;
1128         i915_execbuffer2_set_context_id(exec2, 0);
1129
1130         ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1131         if (!ret) {
1132                 /* Copy the new buffer offsets back to the user's exec list. */
1133                 for (i = 0; i < args->buffer_count; i++)
1134                         exec_list[i].offset = exec2_list[i].offset;
1135                 /* ... and back out to userspace */
1136                 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
1137                                    exec_list,
1138                                    sizeof(*exec_list) * args->buffer_count);
1139                 if (ret) {
1140                         ret = -EFAULT;
1141                         DRM_DEBUG("failed to copy %d exec entries "
1142                                   "back to user (%d)\n",
1143                                   args->buffer_count, ret);
1144                 }
1145         }
1146
1147         drm_free_large(exec_list);
1148         drm_free_large(exec2_list);
1149         return ret;
1150 }
1151
1152 int
1153 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1154                      struct drm_file *file)
1155 {
1156         struct drm_i915_gem_execbuffer2 *args = data;
1157         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1158         int ret;
1159
1160         if (args->buffer_count < 1 ||
1161             args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1162                 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1163                 return -EINVAL;
1164         }
1165
1166         exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1167                              GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
1168         if (exec2_list == NULL)
1169                 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1170                                            args->buffer_count);
1171         if (exec2_list == NULL) {
1172                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1173                           args->buffer_count);
1174                 return -ENOMEM;
1175         }
1176         ret = copy_from_user(exec2_list,
1177                              (struct drm_i915_relocation_entry __user *)
1178                              (uintptr_t) args->buffers_ptr,
1179                              sizeof(*exec2_list) * args->buffer_count);
1180         if (ret != 0) {
1181                 DRM_DEBUG("copy %d exec entries failed %d\n",
1182                           args->buffer_count, ret);
1183                 drm_free_large(exec2_list);
1184                 return -EFAULT;
1185         }
1186
1187         ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1188         if (!ret) {
1189                 /* Copy the new buffer offsets back to the user's exec list. */
1190                 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
1191                                    exec2_list,
1192                                    sizeof(*exec2_list) * args->buffer_count);
1193                 if (ret) {
1194                         ret = -EFAULT;
1195                         DRM_DEBUG("failed to copy %d exec entries "
1196                                   "back to user (%d)\n",
1197                                   args->buffer_count, ret);
1198                 }
1199         }
1200
1201         drm_free_large(exec2_list);
1202         return ret;
1203 }