drm/i915: Pin relocations for the duration of constructing the execbuffer
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
CommitLineData
54cf91dc
CW
1/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
760285e7
DH
29#include <drm/drmP.h>
30#include <drm/i915_drm.h>
54cf91dc
CW
31#include "i915_drv.h"
32#include "i915_trace.h"
33#include "intel_drv.h"
f45b5557 34#include <linux/dma_remapping.h>
54cf91dc 35
a415d355
CW
36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38
27173f1f
BW
39struct eb_vmas {
40 struct list_head vmas;
67731b87 41 int and;
eef90ccb 42 union {
27173f1f 43 struct i915_vma *lut[0];
eef90ccb
CW
44 struct hlist_head buckets[0];
45 };
67731b87
CW
46};
47
27173f1f
BW
48static struct eb_vmas *
49eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
67731b87 50{
27173f1f 51 struct eb_vmas *eb = NULL;
eef90ccb
CW
52
53 if (args->flags & I915_EXEC_HANDLE_LUT) {
b205ca57 54 unsigned size = args->buffer_count;
27173f1f
BW
55 size *= sizeof(struct i915_vma *);
56 size += sizeof(struct eb_vmas);
eef90ccb
CW
57 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
58 }
59
60 if (eb == NULL) {
b205ca57
DV
61 unsigned size = args->buffer_count;
62 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
27b7c63a 63 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
eef90ccb
CW
64 while (count > 2*size)
65 count >>= 1;
66 eb = kzalloc(count*sizeof(struct hlist_head) +
27173f1f 67 sizeof(struct eb_vmas),
eef90ccb
CW
68 GFP_TEMPORARY);
69 if (eb == NULL)
70 return eb;
71
72 eb->and = count - 1;
73 } else
74 eb->and = -args->buffer_count;
75
27173f1f 76 INIT_LIST_HEAD(&eb->vmas);
67731b87
CW
77 return eb;
78}
79
80static void
27173f1f 81eb_reset(struct eb_vmas *eb)
67731b87 82{
eef90ccb
CW
83 if (eb->and >= 0)
84 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
67731b87
CW
85}
86
3b96eff4 87static int
27173f1f
BW
88eb_lookup_vmas(struct eb_vmas *eb,
89 struct drm_i915_gem_exec_object2 *exec,
90 const struct drm_i915_gem_execbuffer2 *args,
91 struct i915_address_space *vm,
92 struct drm_file *file)
3b96eff4 93{
27173f1f
BW
94 struct drm_i915_gem_object *obj;
95 struct list_head objects;
96 int i, ret = 0;
3b96eff4 97
27173f1f 98 INIT_LIST_HEAD(&objects);
3b96eff4 99 spin_lock(&file->table_lock);
27173f1f
BW
100 /* Grab a reference to the object and release the lock so we can lookup
101 * or create the VMA without using GFP_ATOMIC */
eef90ccb 102 for (i = 0; i < args->buffer_count; i++) {
3b96eff4
CW
103 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
104 if (obj == NULL) {
105 spin_unlock(&file->table_lock);
106 DRM_DEBUG("Invalid object handle %d at index %d\n",
107 exec[i].handle, i);
27173f1f
BW
108 ret = -ENOENT;
109 goto out;
3b96eff4
CW
110 }
111
27173f1f 112 if (!list_empty(&obj->obj_exec_link)) {
3b96eff4
CW
113 spin_unlock(&file->table_lock);
114 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
115 obj, exec[i].handle, i);
27173f1f
BW
116 ret = -EINVAL;
117 goto out;
3b96eff4
CW
118 }
119
120 drm_gem_object_reference(&obj->base);
27173f1f
BW
121 list_add_tail(&obj->obj_exec_link, &objects);
122 }
123 spin_unlock(&file->table_lock);
3b96eff4 124
27173f1f
BW
125 i = 0;
126 list_for_each_entry(obj, &objects, obj_exec_link) {
127 struct i915_vma *vma;
128
e656a6cb
DV
129 /*
130 * NOTE: We can leak any vmas created here when something fails
131 * later on. But that's no issue since vma_unbind can deal with
132 * vmas which are not actually bound. And since only
133 * lookup_or_create exists as an interface to get at the vma
134 * from the (obj, vm) we don't run the risk of creating
135 * duplicated vmas for the same vm.
136 */
27173f1f
BW
137 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
138 if (IS_ERR(vma)) {
27173f1f
BW
139 DRM_DEBUG("Failed to lookup VMA\n");
140 ret = PTR_ERR(vma);
141 goto out;
142 }
143
144 list_add_tail(&vma->exec_list, &eb->vmas);
145
146 vma->exec_entry = &exec[i];
eef90ccb 147 if (eb->and < 0) {
27173f1f 148 eb->lut[i] = vma;
eef90ccb
CW
149 } else {
150 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
27173f1f
BW
151 vma->exec_handle = handle;
152 hlist_add_head(&vma->exec_node,
eef90ccb
CW
153 &eb->buckets[handle & eb->and]);
154 }
27173f1f 155 ++i;
3b96eff4 156 }
3b96eff4 157
27173f1f
BW
158
159out:
160 while (!list_empty(&objects)) {
161 obj = list_first_entry(&objects,
162 struct drm_i915_gem_object,
163 obj_exec_link);
164 list_del_init(&obj->obj_exec_link);
165 if (ret)
166 drm_gem_object_unreference(&obj->base);
167 }
168 return ret;
3b96eff4
CW
169}
170
27173f1f 171static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
67731b87 172{
eef90ccb
CW
173 if (eb->and < 0) {
174 if (handle >= -eb->and)
175 return NULL;
176 return eb->lut[handle];
177 } else {
178 struct hlist_head *head;
179 struct hlist_node *node;
67731b87 180
eef90ccb
CW
181 head = &eb->buckets[handle & eb->and];
182 hlist_for_each(node, head) {
27173f1f 183 struct i915_vma *vma;
67731b87 184
27173f1f
BW
185 vma = hlist_entry(node, struct i915_vma, exec_node);
186 if (vma->exec_handle == handle)
187 return vma;
eef90ccb
CW
188 }
189 return NULL;
190 }
67731b87
CW
191}
192
a415d355
CW
193static void
194i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
195{
196 struct drm_i915_gem_exec_object2 *entry;
197 struct drm_i915_gem_object *obj = vma->obj;
198
199 if (!drm_mm_node_allocated(&vma->node))
200 return;
201
202 entry = vma->exec_entry;
203
204 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
205 i915_gem_object_unpin_fence(obj);
206
207 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
208 i915_gem_object_unpin(obj);
209
210 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
211}
212
213static void eb_destroy(struct eb_vmas *eb)
214{
27173f1f
BW
215 while (!list_empty(&eb->vmas)) {
216 struct i915_vma *vma;
bcffc3fa 217
27173f1f
BW
218 vma = list_first_entry(&eb->vmas,
219 struct i915_vma,
bcffc3fa 220 exec_list);
27173f1f 221 list_del_init(&vma->exec_list);
a415d355 222 i915_gem_execbuffer_unreserve_vma(vma);
27173f1f 223 drm_gem_object_unreference(&vma->obj->base);
bcffc3fa 224 }
67731b87
CW
225 kfree(eb);
226}
227
dabdfe02
CW
228static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
229{
2cc86b82
CW
230 return (HAS_LLC(obj->base.dev) ||
231 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
504c7267 232 !obj->map_and_fenceable ||
dabdfe02
CW
233 obj->cache_level != I915_CACHE_NONE);
234}
235
5032d871
RB
236static int
237relocate_entry_cpu(struct drm_i915_gem_object *obj,
238 struct drm_i915_gem_relocation_entry *reloc)
239{
3c94ceee 240 struct drm_device *dev = obj->base.dev;
5032d871
RB
241 uint32_t page_offset = offset_in_page(reloc->offset);
242 char *vaddr;
243 int ret = -EINVAL;
244
2cc86b82 245 ret = i915_gem_object_set_to_cpu_domain(obj, true);
5032d871
RB
246 if (ret)
247 return ret;
248
249 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
250 reloc->offset >> PAGE_SHIFT));
251 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
3c94ceee
BW
252
253 if (INTEL_INFO(dev)->gen >= 8) {
254 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
255
256 if (page_offset == 0) {
257 kunmap_atomic(vaddr);
258 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
259 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
260 }
261
262 *(uint32_t *)(vaddr + page_offset) = 0;
263 }
264
5032d871
RB
265 kunmap_atomic(vaddr);
266
267 return 0;
268}
269
270static int
271relocate_entry_gtt(struct drm_i915_gem_object *obj,
272 struct drm_i915_gem_relocation_entry *reloc)
273{
274 struct drm_device *dev = obj->base.dev;
275 struct drm_i915_private *dev_priv = dev->dev_private;
276 uint32_t __iomem *reloc_entry;
277 void __iomem *reloc_page;
278 int ret = -EINVAL;
279
280 ret = i915_gem_object_set_to_gtt_domain(obj, true);
281 if (ret)
282 return ret;
283
284 ret = i915_gem_object_put_fence(obj);
285 if (ret)
286 return ret;
287
288 /* Map the page containing the relocation we're going to perform. */
289 reloc->offset += i915_gem_obj_ggtt_offset(obj);
290 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
291 reloc->offset & PAGE_MASK);
292 reloc_entry = (uint32_t __iomem *)
293 (reloc_page + offset_in_page(reloc->offset));
294 iowrite32(reloc->delta, reloc_entry);
3c94ceee
BW
295
296 if (INTEL_INFO(dev)->gen >= 8) {
297 reloc_entry += 1;
298
299 if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
300 io_mapping_unmap_atomic(reloc_page);
301 reloc_page = io_mapping_map_atomic_wc(
302 dev_priv->gtt.mappable,
303 reloc->offset + sizeof(uint32_t));
304 reloc_entry = reloc_page;
305 }
306
307 iowrite32(0, reloc_entry);
308 }
309
5032d871
RB
310 io_mapping_unmap_atomic(reloc_page);
311
312 return 0;
313}
314
54cf91dc
CW
315static int
316i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
27173f1f 317 struct eb_vmas *eb,
28d6a7bf
BW
318 struct drm_i915_gem_relocation_entry *reloc,
319 struct i915_address_space *vm)
54cf91dc
CW
320{
321 struct drm_device *dev = obj->base.dev;
322 struct drm_gem_object *target_obj;
149c8407 323 struct drm_i915_gem_object *target_i915_obj;
27173f1f 324 struct i915_vma *target_vma;
54cf91dc
CW
325 uint32_t target_offset;
326 int ret = -EINVAL;
327
67731b87 328 /* we've already hold a reference to all valid objects */
27173f1f
BW
329 target_vma = eb_get_vma(eb, reloc->target_handle);
330 if (unlikely(target_vma == NULL))
54cf91dc 331 return -ENOENT;
27173f1f
BW
332 target_i915_obj = target_vma->obj;
333 target_obj = &target_vma->obj->base;
54cf91dc 334
f343c5f6 335 target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
54cf91dc 336
e844b990
EA
337 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
338 * pipe_control writes because the gpu doesn't properly redirect them
339 * through the ppgtt for non_secure batchbuffers. */
340 if (unlikely(IS_GEN6(dev) &&
341 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
342 !target_i915_obj->has_global_gtt_mapping)) {
343 i915_gem_gtt_bind_object(target_i915_obj,
344 target_i915_obj->cache_level);
345 }
346
54cf91dc 347 /* Validate that the target is in a valid r/w GPU domain */
b8f7ab17 348 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
ff240199 349 DRM_DEBUG("reloc with multiple write domains: "
54cf91dc
CW
350 "obj %p target %d offset %d "
351 "read %08x write %08x",
352 obj, reloc->target_handle,
353 (int) reloc->offset,
354 reloc->read_domains,
355 reloc->write_domain);
67731b87 356 return ret;
54cf91dc 357 }
4ca4a250
DV
358 if (unlikely((reloc->write_domain | reloc->read_domains)
359 & ~I915_GEM_GPU_DOMAINS)) {
ff240199 360 DRM_DEBUG("reloc with read/write non-GPU domains: "
54cf91dc
CW
361 "obj %p target %d offset %d "
362 "read %08x write %08x",
363 obj, reloc->target_handle,
364 (int) reloc->offset,
365 reloc->read_domains,
366 reloc->write_domain);
67731b87 367 return ret;
54cf91dc 368 }
54cf91dc
CW
369
370 target_obj->pending_read_domains |= reloc->read_domains;
371 target_obj->pending_write_domain |= reloc->write_domain;
372
373 /* If the relocation already has the right value in it, no
374 * more work needs to be done.
375 */
376 if (target_offset == reloc->presumed_offset)
67731b87 377 return 0;
54cf91dc
CW
378
379 /* Check that the relocation address is valid... */
3c94ceee
BW
380 if (unlikely(reloc->offset >
381 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
ff240199 382 DRM_DEBUG("Relocation beyond object bounds: "
54cf91dc
CW
383 "obj %p target %d offset %d size %d.\n",
384 obj, reloc->target_handle,
385 (int) reloc->offset,
386 (int) obj->base.size);
67731b87 387 return ret;
54cf91dc 388 }
b8f7ab17 389 if (unlikely(reloc->offset & 3)) {
ff240199 390 DRM_DEBUG("Relocation not 4-byte aligned: "
54cf91dc
CW
391 "obj %p target %d offset %d.\n",
392 obj, reloc->target_handle,
393 (int) reloc->offset);
67731b87 394 return ret;
54cf91dc
CW
395 }
396
dabdfe02
CW
397 /* We can't wait for rendering with pagefaults disabled */
398 if (obj->active && in_atomic())
399 return -EFAULT;
400
54cf91dc 401 reloc->delta += target_offset;
5032d871
RB
402 if (use_cpu_reloc(obj))
403 ret = relocate_entry_cpu(obj, reloc);
404 else
405 ret = relocate_entry_gtt(obj, reloc);
54cf91dc 406
d4d36014
DV
407 if (ret)
408 return ret;
409
54cf91dc
CW
410 /* and update the user's relocation entry */
411 reloc->presumed_offset = target_offset;
412
67731b87 413 return 0;
54cf91dc
CW
414}
415
416static int
27173f1f
BW
417i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
418 struct eb_vmas *eb)
54cf91dc 419{
1d83f442
CW
420#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
421 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
54cf91dc 422 struct drm_i915_gem_relocation_entry __user *user_relocs;
27173f1f 423 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1d83f442 424 int remain, ret;
54cf91dc 425
2bb4629a 426 user_relocs = to_user_ptr(entry->relocs_ptr);
54cf91dc 427
1d83f442
CW
428 remain = entry->relocation_count;
429 while (remain) {
430 struct drm_i915_gem_relocation_entry *r = stack_reloc;
431 int count = remain;
432 if (count > ARRAY_SIZE(stack_reloc))
433 count = ARRAY_SIZE(stack_reloc);
434 remain -= count;
435
436 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
54cf91dc
CW
437 return -EFAULT;
438
1d83f442
CW
439 do {
440 u64 offset = r->presumed_offset;
54cf91dc 441
27173f1f
BW
442 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
443 vma->vm);
1d83f442
CW
444 if (ret)
445 return ret;
446
447 if (r->presumed_offset != offset &&
448 __copy_to_user_inatomic(&user_relocs->presumed_offset,
449 &r->presumed_offset,
450 sizeof(r->presumed_offset))) {
451 return -EFAULT;
452 }
453
454 user_relocs++;
455 r++;
456 } while (--count);
54cf91dc
CW
457 }
458
459 return 0;
1d83f442 460#undef N_RELOC
54cf91dc
CW
461}
462
463static int
27173f1f
BW
464i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
465 struct eb_vmas *eb,
466 struct drm_i915_gem_relocation_entry *relocs)
54cf91dc 467{
27173f1f 468 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
54cf91dc
CW
469 int i, ret;
470
471 for (i = 0; i < entry->relocation_count; i++) {
27173f1f
BW
472 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
473 vma->vm);
54cf91dc
CW
474 if (ret)
475 return ret;
476 }
477
478 return 0;
479}
480
481static int
27173f1f 482i915_gem_execbuffer_relocate(struct eb_vmas *eb,
28d6a7bf 483 struct i915_address_space *vm)
54cf91dc 484{
27173f1f 485 struct i915_vma *vma;
d4aeee77
CW
486 int ret = 0;
487
488 /* This is the fast path and we cannot handle a pagefault whilst
489 * holding the struct mutex lest the user pass in the relocations
490 * contained within a mmaped bo. For in such a case we, the page
491 * fault handler would call i915_gem_fault() and we would try to
492 * acquire the struct mutex again. Obviously this is bad and so
493 * lockdep complains vehemently.
494 */
495 pagefault_disable();
27173f1f
BW
496 list_for_each_entry(vma, &eb->vmas, exec_list) {
497 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
54cf91dc 498 if (ret)
d4aeee77 499 break;
54cf91dc 500 }
d4aeee77 501 pagefault_enable();
54cf91dc 502
d4aeee77 503 return ret;
54cf91dc
CW
504}
505
dabdfe02 506static int
27173f1f 507need_reloc_mappable(struct i915_vma *vma)
dabdfe02 508{
27173f1f
BW
509 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
510 return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
511 i915_is_ggtt(vma->vm);
dabdfe02
CW
512}
513
1690e1eb 514static int
27173f1f
BW
515i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
516 struct intel_ring_buffer *ring,
517 bool *need_reloc)
1690e1eb 518{
27173f1f
BW
519 struct drm_i915_private *dev_priv = ring->dev->dev_private;
520 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1690e1eb
CW
521 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
522 bool need_fence, need_mappable;
27173f1f 523 struct drm_i915_gem_object *obj = vma->obj;
1690e1eb
CW
524 int ret;
525
526 need_fence =
527 has_fenced_gpu_access &&
528 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
529 obj->tiling_mode != I915_TILING_NONE;
27173f1f 530 need_mappable = need_fence || need_reloc_mappable(vma);
1690e1eb 531
27173f1f 532 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
28d6a7bf 533 false);
1690e1eb
CW
534 if (ret)
535 return ret;
536
7788a765
CW
537 entry->flags |= __EXEC_OBJECT_HAS_PIN;
538
1690e1eb
CW
539 if (has_fenced_gpu_access) {
540 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
06d98131 541 ret = i915_gem_object_get_fence(obj);
9a5a53b3 542 if (ret)
7788a765 543 return ret;
1690e1eb 544
9a5a53b3 545 if (i915_gem_object_pin_fence(obj))
1690e1eb 546 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
9a5a53b3 547
7dd49065 548 obj->pending_fenced_gpu_access = true;
1690e1eb 549 }
1690e1eb
CW
550 }
551
7788a765
CW
552 /* Ensure ppgtt mapping exists if needed */
553 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
554 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
555 obj, obj->cache_level);
556
557 obj->has_aliasing_ppgtt_mapping = 1;
558 }
559
27173f1f
BW
560 if (entry->offset != vma->node.start) {
561 entry->offset = vma->node.start;
ed5982e6
DV
562 *need_reloc = true;
563 }
564
565 if (entry->flags & EXEC_OBJECT_WRITE) {
566 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
567 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
568 }
569
570 if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
571 !obj->has_global_gtt_mapping)
572 i915_gem_gtt_bind_object(obj, obj->cache_level);
573
1690e1eb 574 return 0;
7788a765 575}
1690e1eb 576
54cf91dc 577static int
d9e86c0e 578i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
27173f1f 579 struct list_head *vmas,
ed5982e6 580 bool *need_relocs)
54cf91dc 581{
432e58ed 582 struct drm_i915_gem_object *obj;
27173f1f 583 struct i915_vma *vma;
68c8c17f 584 struct i915_address_space *vm;
27173f1f 585 struct list_head ordered_vmas;
7788a765
CW
586 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
587 int retry;
6fe4f140 588
68c8c17f
BW
589 if (list_empty(vmas))
590 return 0;
591
592 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
593
27173f1f
BW
594 INIT_LIST_HEAD(&ordered_vmas);
595 while (!list_empty(vmas)) {
6fe4f140
CW
596 struct drm_i915_gem_exec_object2 *entry;
597 bool need_fence, need_mappable;
598
27173f1f
BW
599 vma = list_first_entry(vmas, struct i915_vma, exec_list);
600 obj = vma->obj;
601 entry = vma->exec_entry;
6fe4f140
CW
602
603 need_fence =
604 has_fenced_gpu_access &&
605 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
606 obj->tiling_mode != I915_TILING_NONE;
27173f1f 607 need_mappable = need_fence || need_reloc_mappable(vma);
6fe4f140
CW
608
609 if (need_mappable)
27173f1f 610 list_move(&vma->exec_list, &ordered_vmas);
6fe4f140 611 else
27173f1f 612 list_move_tail(&vma->exec_list, &ordered_vmas);
595dad76 613
ed5982e6 614 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
595dad76 615 obj->base.pending_write_domain = 0;
016fd0c1 616 obj->pending_fenced_gpu_access = false;
6fe4f140 617 }
27173f1f 618 list_splice(&ordered_vmas, vmas);
54cf91dc
CW
619
620 /* Attempt to pin all of the buffers into the GTT.
621 * This is done in 3 phases:
622 *
623 * 1a. Unbind all objects that do not match the GTT constraints for
624 * the execbuffer (fenceable, mappable, alignment etc).
625 * 1b. Increment pin count for already bound objects.
626 * 2. Bind new objects.
627 * 3. Decrement pin count.
628 *
7788a765 629 * This avoid unnecessary unbinding of later objects in order to make
54cf91dc
CW
630 * room for the earlier objects *unless* we need to defragment.
631 */
632 retry = 0;
633 do {
7788a765 634 int ret = 0;
54cf91dc
CW
635
636 /* Unbind any ill-fitting objects or pin. */
27173f1f
BW
637 list_for_each_entry(vma, vmas, exec_list) {
638 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
54cf91dc 639 bool need_fence, need_mappable;
1690e1eb 640
27173f1f
BW
641 obj = vma->obj;
642
643 if (!drm_mm_node_allocated(&vma->node))
54cf91dc
CW
644 continue;
645
646 need_fence =
9b3826bf 647 has_fenced_gpu_access &&
54cf91dc
CW
648 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
649 obj->tiling_mode != I915_TILING_NONE;
27173f1f 650 need_mappable = need_fence || need_reloc_mappable(vma);
54cf91dc 651
28d6a7bf 652 WARN_ON((need_mappable || need_fence) &&
27173f1f 653 !i915_is_ggtt(vma->vm));
28d6a7bf 654
f343c5f6 655 if ((entry->alignment &&
27173f1f 656 vma->node.start & (entry->alignment - 1)) ||
54cf91dc 657 (need_mappable && !obj->map_and_fenceable))
27173f1f 658 ret = i915_vma_unbind(vma);
54cf91dc 659 else
27173f1f 660 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
432e58ed 661 if (ret)
54cf91dc 662 goto err;
54cf91dc
CW
663 }
664
665 /* Bind fresh objects */
27173f1f
BW
666 list_for_each_entry(vma, vmas, exec_list) {
667 if (drm_mm_node_allocated(&vma->node))
1690e1eb 668 continue;
54cf91dc 669
27173f1f 670 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
7788a765
CW
671 if (ret)
672 goto err;
54cf91dc
CW
673 }
674
a415d355 675err:
6c085a72 676 if (ret != -ENOSPC || retry++)
54cf91dc
CW
677 return ret;
678
a415d355
CW
679 /* Decrement pin count for bound objects */
680 list_for_each_entry(vma, vmas, exec_list)
681 i915_gem_execbuffer_unreserve_vma(vma);
682
68c8c17f 683 ret = i915_gem_evict_vm(vm, true);
54cf91dc
CW
684 if (ret)
685 return ret;
54cf91dc
CW
686 } while (1);
687}
688
689static int
690i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
ed5982e6 691 struct drm_i915_gem_execbuffer2 *args,
54cf91dc 692 struct drm_file *file,
d9e86c0e 693 struct intel_ring_buffer *ring,
27173f1f
BW
694 struct eb_vmas *eb,
695 struct drm_i915_gem_exec_object2 *exec)
54cf91dc
CW
696{
697 struct drm_i915_gem_relocation_entry *reloc;
27173f1f
BW
698 struct i915_address_space *vm;
699 struct i915_vma *vma;
ed5982e6 700 bool need_relocs;
dd6864a4 701 int *reloc_offset;
54cf91dc 702 int i, total, ret;
b205ca57 703 unsigned count = args->buffer_count;
54cf91dc 704
27173f1f
BW
705 if (WARN_ON(list_empty(&eb->vmas)))
706 return 0;
707
708 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
709
67731b87 710 /* We may process another execbuffer during the unlock... */
27173f1f
BW
711 while (!list_empty(&eb->vmas)) {
712 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
713 list_del_init(&vma->exec_list);
a415d355 714 i915_gem_execbuffer_unreserve_vma(vma);
27173f1f 715 drm_gem_object_unreference(&vma->obj->base);
67731b87
CW
716 }
717
54cf91dc
CW
718 mutex_unlock(&dev->struct_mutex);
719
720 total = 0;
721 for (i = 0; i < count; i++)
432e58ed 722 total += exec[i].relocation_count;
54cf91dc 723
dd6864a4 724 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
54cf91dc 725 reloc = drm_malloc_ab(total, sizeof(*reloc));
dd6864a4
CW
726 if (reloc == NULL || reloc_offset == NULL) {
727 drm_free_large(reloc);
728 drm_free_large(reloc_offset);
54cf91dc
CW
729 mutex_lock(&dev->struct_mutex);
730 return -ENOMEM;
731 }
732
733 total = 0;
734 for (i = 0; i < count; i++) {
735 struct drm_i915_gem_relocation_entry __user *user_relocs;
262b6d36
CW
736 u64 invalid_offset = (u64)-1;
737 int j;
54cf91dc 738
2bb4629a 739 user_relocs = to_user_ptr(exec[i].relocs_ptr);
54cf91dc
CW
740
741 if (copy_from_user(reloc+total, user_relocs,
432e58ed 742 exec[i].relocation_count * sizeof(*reloc))) {
54cf91dc
CW
743 ret = -EFAULT;
744 mutex_lock(&dev->struct_mutex);
745 goto err;
746 }
747
262b6d36
CW
748 /* As we do not update the known relocation offsets after
749 * relocating (due to the complexities in lock handling),
750 * we need to mark them as invalid now so that we force the
751 * relocation processing next time. Just in case the target
752 * object is evicted and then rebound into its old
753 * presumed_offset before the next execbuffer - if that
754 * happened we would make the mistake of assuming that the
755 * relocations were valid.
756 */
757 for (j = 0; j < exec[i].relocation_count; j++) {
758 if (copy_to_user(&user_relocs[j].presumed_offset,
759 &invalid_offset,
760 sizeof(invalid_offset))) {
761 ret = -EFAULT;
762 mutex_lock(&dev->struct_mutex);
763 goto err;
764 }
765 }
766
dd6864a4 767 reloc_offset[i] = total;
432e58ed 768 total += exec[i].relocation_count;
54cf91dc
CW
769 }
770
771 ret = i915_mutex_lock_interruptible(dev);
772 if (ret) {
773 mutex_lock(&dev->struct_mutex);
774 goto err;
775 }
776
67731b87 777 /* reacquire the objects */
67731b87 778 eb_reset(eb);
27173f1f 779 ret = eb_lookup_vmas(eb, exec, args, vm, file);
3b96eff4
CW
780 if (ret)
781 goto err;
67731b87 782
ed5982e6 783 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
27173f1f 784 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
54cf91dc
CW
785 if (ret)
786 goto err;
787
27173f1f
BW
788 list_for_each_entry(vma, &eb->vmas, exec_list) {
789 int offset = vma->exec_entry - exec;
790 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
791 reloc + reloc_offset[offset]);
54cf91dc
CW
792 if (ret)
793 goto err;
54cf91dc
CW
794 }
795
796 /* Leave the user relocations as are, this is the painfully slow path,
797 * and we want to avoid the complication of dropping the lock whilst
798 * having buffers reserved in the aperture and so causing spurious
799 * ENOSPC for random operations.
800 */
801
802err:
803 drm_free_large(reloc);
dd6864a4 804 drm_free_large(reloc_offset);
54cf91dc
CW
805 return ret;
806}
807
54cf91dc 808static int
432e58ed 809i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
27173f1f 810 struct list_head *vmas)
54cf91dc 811{
27173f1f 812 struct i915_vma *vma;
6ac42f41 813 uint32_t flush_domains = 0;
000433b6 814 bool flush_chipset = false;
432e58ed 815 int ret;
54cf91dc 816
27173f1f
BW
817 list_for_each_entry(vma, vmas, exec_list) {
818 struct drm_i915_gem_object *obj = vma->obj;
6ac42f41 819 ret = i915_gem_object_sync(obj, ring);
c59a333f
CW
820 if (ret)
821 return ret;
6ac42f41
DV
822
823 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
000433b6 824 flush_chipset |= i915_gem_clflush_object(obj, false);
6ac42f41 825
6ac42f41 826 flush_domains |= obj->base.write_domain;
c59a333f
CW
827 }
828
000433b6 829 if (flush_chipset)
e76e9aeb 830 i915_gem_chipset_flush(ring->dev);
6ac42f41
DV
831
832 if (flush_domains & I915_GEM_DOMAIN_GTT)
833 wmb();
834
09cf7c9a
CW
835 /* Unconditionally invalidate gpu caches and ensure that we do flush
836 * any residual writes from the previous batch.
837 */
a7b9761d 838 return intel_ring_invalidate_all_caches(ring);
54cf91dc
CW
839}
840
432e58ed
CW
841static bool
842i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
54cf91dc 843{
ed5982e6
DV
844 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
845 return false;
846
432e58ed 847 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
54cf91dc
CW
848}
849
850static int
851validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
852 int count)
853{
854 int i;
b205ca57
DV
855 unsigned relocs_total = 0;
856 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
54cf91dc
CW
857
858 for (i = 0; i < count; i++) {
2bb4629a 859 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
54cf91dc
CW
860 int length; /* limited by fault_in_pages_readable() */
861
ed5982e6
DV
862 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
863 return -EINVAL;
864
3118a4f6
KC
865 /* First check for malicious input causing overflow in
866 * the worst case where we need to allocate the entire
867 * relocation tree as a single array.
868 */
869 if (exec[i].relocation_count > relocs_max - relocs_total)
54cf91dc 870 return -EINVAL;
3118a4f6 871 relocs_total += exec[i].relocation_count;
54cf91dc
CW
872
873 length = exec[i].relocation_count *
874 sizeof(struct drm_i915_gem_relocation_entry);
30587535
KC
875 /*
876 * We must check that the entire relocation array is safe
877 * to read, but since we may need to update the presumed
878 * offsets during execution, check for full write access.
879 */
54cf91dc
CW
880 if (!access_ok(VERIFY_WRITE, ptr, length))
881 return -EFAULT;
882
0b74b508
XZ
883 if (likely(!i915_prefault_disable)) {
884 if (fault_in_multipages_readable(ptr, length))
885 return -EFAULT;
886 }
54cf91dc
CW
887 }
888
889 return 0;
890}
891
432e58ed 892static void
27173f1f 893i915_gem_execbuffer_move_to_active(struct list_head *vmas,
9d773091 894 struct intel_ring_buffer *ring)
432e58ed 895{
27173f1f 896 struct i915_vma *vma;
432e58ed 897
27173f1f
BW
898 list_for_each_entry(vma, vmas, exec_list) {
899 struct drm_i915_gem_object *obj = vma->obj;
69c2fc89
CW
900 u32 old_read = obj->base.read_domains;
901 u32 old_write = obj->base.write_domain;
db53a302 902
432e58ed 903 obj->base.write_domain = obj->base.pending_write_domain;
ed5982e6
DV
904 if (obj->base.write_domain == 0)
905 obj->base.pending_read_domains |= obj->base.read_domains;
906 obj->base.read_domains = obj->base.pending_read_domains;
432e58ed
CW
907 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
908
e2d05a8b 909 i915_vma_move_to_active(vma, ring);
432e58ed
CW
910 if (obj->base.write_domain) {
911 obj->dirty = 1;
9d773091 912 obj->last_write_seqno = intel_ring_get_seqno(ring);
acb87dfb 913 if (obj->pin_count) /* check for potential scanout */
c65355bb 914 intel_mark_fb_busy(obj, ring);
432e58ed
CW
915 }
916
db53a302 917 trace_i915_gem_object_change_domain(obj, old_read, old_write);
432e58ed
CW
918 }
919}
920
54cf91dc
CW
921static void
922i915_gem_execbuffer_retire_commands(struct drm_device *dev,
432e58ed 923 struct drm_file *file,
7d736f4f
MK
924 struct intel_ring_buffer *ring,
925 struct drm_i915_gem_object *obj)
54cf91dc 926{
cc889e0f
DV
927 /* Unconditionally force add_request to emit a full flush. */
928 ring->gpu_caches_dirty = true;
54cf91dc 929
432e58ed 930 /* Add a breadcrumb for the completion of the batch buffer */
7d736f4f 931 (void)__i915_add_request(ring, file, obj, NULL);
432e58ed 932}
54cf91dc 933
ae662d31
EA
934static int
935i915_reset_gen7_sol_offsets(struct drm_device *dev,
936 struct intel_ring_buffer *ring)
937{
938 drm_i915_private_t *dev_priv = dev->dev_private;
939 int ret, i;
940
941 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
942 return 0;
943
944 ret = intel_ring_begin(ring, 4 * 3);
945 if (ret)
946 return ret;
947
948 for (i = 0; i < 4; i++) {
949 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
950 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
951 intel_ring_emit(ring, 0);
952 }
953
954 intel_ring_advance(ring);
955
956 return 0;
957}
958
54cf91dc
CW
959static int
960i915_gem_do_execbuffer(struct drm_device *dev, void *data,
961 struct drm_file *file,
962 struct drm_i915_gem_execbuffer2 *args,
28d6a7bf
BW
963 struct drm_i915_gem_exec_object2 *exec,
964 struct i915_address_space *vm)
54cf91dc
CW
965{
966 drm_i915_private_t *dev_priv = dev->dev_private;
27173f1f 967 struct eb_vmas *eb;
54cf91dc
CW
968 struct drm_i915_gem_object *batch_obj;
969 struct drm_clip_rect *cliprects = NULL;
54cf91dc 970 struct intel_ring_buffer *ring;
be62acb4 971 struct i915_ctx_hang_stats *hs;
6e0a69db 972 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
c4e7a414 973 u32 exec_start, exec_len;
ed5982e6 974 u32 mask, flags;
72bfa19c 975 int ret, mode, i;
ed5982e6 976 bool need_relocs;
54cf91dc 977
ed5982e6 978 if (!i915_gem_check_execbuffer(args))
432e58ed 979 return -EINVAL;
432e58ed
CW
980
981 ret = validate_exec_list(exec, args->buffer_count);
54cf91dc
CW
982 if (ret)
983 return ret;
984
d7d4eedd
CW
985 flags = 0;
986 if (args->flags & I915_EXEC_SECURE) {
987 if (!file->is_master || !capable(CAP_SYS_ADMIN))
988 return -EPERM;
989
990 flags |= I915_DISPATCH_SECURE;
991 }
b45305fc
DV
992 if (args->flags & I915_EXEC_IS_PINNED)
993 flags |= I915_DISPATCH_PINNED;
d7d4eedd 994
54cf91dc
CW
995 switch (args->flags & I915_EXEC_RING_MASK) {
996 case I915_EXEC_DEFAULT:
997 case I915_EXEC_RENDER:
1ec14ad3 998 ring = &dev_priv->ring[RCS];
54cf91dc
CW
999 break;
1000 case I915_EXEC_BSD:
1ec14ad3 1001 ring = &dev_priv->ring[VCS];
e8520969 1002 if (ctx_id != DEFAULT_CONTEXT_ID) {
6e0a69db
BW
1003 DRM_DEBUG("Ring %s doesn't support contexts\n",
1004 ring->name);
1005 return -EPERM;
1006 }
54cf91dc
CW
1007 break;
1008 case I915_EXEC_BLT:
1ec14ad3 1009 ring = &dev_priv->ring[BCS];
e8520969 1010 if (ctx_id != DEFAULT_CONTEXT_ID) {
6e0a69db
BW
1011 DRM_DEBUG("Ring %s doesn't support contexts\n",
1012 ring->name);
1013 return -EPERM;
1014 }
54cf91dc 1015 break;
82f91b6e
XH
1016 case I915_EXEC_VEBOX:
1017 ring = &dev_priv->ring[VECS];
e8520969 1018 if (ctx_id != DEFAULT_CONTEXT_ID) {
82f91b6e
XH
1019 DRM_DEBUG("Ring %s doesn't support contexts\n",
1020 ring->name);
1021 return -EPERM;
1022 }
1023 break;
1024
54cf91dc 1025 default:
ff240199 1026 DRM_DEBUG("execbuf with unknown ring: %d\n",
54cf91dc
CW
1027 (int)(args->flags & I915_EXEC_RING_MASK));
1028 return -EINVAL;
1029 }
a15817cf
CW
1030 if (!intel_ring_initialized(ring)) {
1031 DRM_DEBUG("execbuf with invalid ring: %d\n",
1032 (int)(args->flags & I915_EXEC_RING_MASK));
1033 return -EINVAL;
1034 }
54cf91dc 1035
72bfa19c 1036 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
84f9f938 1037 mask = I915_EXEC_CONSTANTS_MASK;
72bfa19c
CW
1038 switch (mode) {
1039 case I915_EXEC_CONSTANTS_REL_GENERAL:
1040 case I915_EXEC_CONSTANTS_ABSOLUTE:
1041 case I915_EXEC_CONSTANTS_REL_SURFACE:
1042 if (ring == &dev_priv->ring[RCS] &&
1043 mode != dev_priv->relative_constants_mode) {
1044 if (INTEL_INFO(dev)->gen < 4)
1045 return -EINVAL;
1046
1047 if (INTEL_INFO(dev)->gen > 5 &&
1048 mode == I915_EXEC_CONSTANTS_REL_SURFACE)
1049 return -EINVAL;
84f9f938
BW
1050
1051 /* The HW changed the meaning on this bit on gen6 */
1052 if (INTEL_INFO(dev)->gen >= 6)
1053 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
72bfa19c
CW
1054 }
1055 break;
1056 default:
ff240199 1057 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
72bfa19c
CW
1058 return -EINVAL;
1059 }
1060
54cf91dc 1061 if (args->buffer_count < 1) {
ff240199 1062 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
1063 return -EINVAL;
1064 }
54cf91dc
CW
1065
1066 if (args->num_cliprects != 0) {
1ec14ad3 1067 if (ring != &dev_priv->ring[RCS]) {
ff240199 1068 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
c4e7a414
CW
1069 return -EINVAL;
1070 }
1071
6ebebc92
DV
1072 if (INTEL_INFO(dev)->gen >= 5) {
1073 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1074 return -EINVAL;
1075 }
1076
44afb3a0
XW
1077 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1078 DRM_DEBUG("execbuf with %u cliprects\n",
1079 args->num_cliprects);
1080 return -EINVAL;
1081 }
5e13a0c5 1082
a1e22653
DV
1083 cliprects = kcalloc(args->num_cliprects,
1084 sizeof(*cliprects),
54cf91dc
CW
1085 GFP_KERNEL);
1086 if (cliprects == NULL) {
1087 ret = -ENOMEM;
1088 goto pre_mutex_err;
1089 }
1090
432e58ed 1091 if (copy_from_user(cliprects,
2bb4629a
VS
1092 to_user_ptr(args->cliprects_ptr),
1093 sizeof(*cliprects)*args->num_cliprects)) {
54cf91dc
CW
1094 ret = -EFAULT;
1095 goto pre_mutex_err;
1096 }
1097 }
1098
54cf91dc
CW
1099 ret = i915_mutex_lock_interruptible(dev);
1100 if (ret)
1101 goto pre_mutex_err;
1102
db1b76ca 1103 if (dev_priv->ums.mm_suspended) {
54cf91dc
CW
1104 mutex_unlock(&dev->struct_mutex);
1105 ret = -EBUSY;
1106 goto pre_mutex_err;
1107 }
1108
27173f1f 1109 eb = eb_create(args, vm);
67731b87
CW
1110 if (eb == NULL) {
1111 mutex_unlock(&dev->struct_mutex);
1112 ret = -ENOMEM;
1113 goto pre_mutex_err;
1114 }
1115
54cf91dc 1116 /* Look up object handles */
27173f1f 1117 ret = eb_lookup_vmas(eb, exec, args, vm, file);
3b96eff4
CW
1118 if (ret)
1119 goto err;
54cf91dc 1120
6fe4f140 1121 /* take note of the batch buffer before we might reorder the lists */
27173f1f 1122 batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
6fe4f140 1123
54cf91dc 1124 /* Move the objects en-masse into the GTT, evicting if necessary. */
ed5982e6 1125 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
27173f1f 1126 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
54cf91dc
CW
1127 if (ret)
1128 goto err;
1129
1130 /* The objects are in their final locations, apply the relocations. */
ed5982e6 1131 if (need_relocs)
28d6a7bf 1132 ret = i915_gem_execbuffer_relocate(eb, vm);
54cf91dc
CW
1133 if (ret) {
1134 if (ret == -EFAULT) {
ed5982e6 1135 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
27173f1f 1136 eb, exec);
54cf91dc
CW
1137 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1138 }
1139 if (ret)
1140 goto err;
1141 }
1142
1143 /* Set the pending read domains for the batch buffer to COMMAND */
54cf91dc 1144 if (batch_obj->base.pending_write_domain) {
ff240199 1145 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
54cf91dc
CW
1146 ret = -EINVAL;
1147 goto err;
1148 }
1149 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1150
d7d4eedd
CW
1151 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1152 * batch" bit. Hence we need to pin secure batches into the global gtt.
28cf5415 1153 * hsw should have this fixed, but bdw mucks it up again. */
d7d4eedd
CW
1154 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1155 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1156
27173f1f 1157 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
432e58ed 1158 if (ret)
54cf91dc 1159 goto err;
54cf91dc 1160
be62acb4
MK
1161 hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
1162 if (IS_ERR(hs)) {
1163 ret = PTR_ERR(hs);
1164 goto err;
1165 }
1166
1167 if (hs->banned) {
1168 ret = -EIO;
1169 goto err;
1170 }
1171
0da5cec1
EA
1172 ret = i915_switch_context(ring, file, ctx_id);
1173 if (ret)
1174 goto err;
1175
e2971bda
BW
1176 if (ring == &dev_priv->ring[RCS] &&
1177 mode != dev_priv->relative_constants_mode) {
1178 ret = intel_ring_begin(ring, 4);
1179 if (ret)
1180 goto err;
1181
1182 intel_ring_emit(ring, MI_NOOP);
1183 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1184 intel_ring_emit(ring, INSTPM);
84f9f938 1185 intel_ring_emit(ring, mask << 16 | mode);
e2971bda
BW
1186 intel_ring_advance(ring);
1187
1188 dev_priv->relative_constants_mode = mode;
1189 }
1190
ae662d31
EA
1191 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1192 ret = i915_reset_gen7_sol_offsets(dev, ring);
1193 if (ret)
1194 goto err;
1195 }
1196
28d6a7bf
BW
1197 exec_start = i915_gem_obj_offset(batch_obj, vm) +
1198 args->batch_start_offset;
c4e7a414
CW
1199 exec_len = args->batch_len;
1200 if (cliprects) {
1201 for (i = 0; i < args->num_cliprects; i++) {
1202 ret = i915_emit_box(dev, &cliprects[i],
1203 args->DR1, args->DR4);
1204 if (ret)
1205 goto err;
1206
1207 ret = ring->dispatch_execbuffer(ring,
d7d4eedd
CW
1208 exec_start, exec_len,
1209 flags);
c4e7a414
CW
1210 if (ret)
1211 goto err;
1212 }
1213 } else {
d7d4eedd
CW
1214 ret = ring->dispatch_execbuffer(ring,
1215 exec_start, exec_len,
1216 flags);
c4e7a414
CW
1217 if (ret)
1218 goto err;
1219 }
54cf91dc 1220
9d773091
CW
1221 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1222
27173f1f 1223 i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
7d736f4f 1224 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
54cf91dc
CW
1225
1226err:
67731b87 1227 eb_destroy(eb);
54cf91dc
CW
1228
1229 mutex_unlock(&dev->struct_mutex);
1230
1231pre_mutex_err:
54cf91dc 1232 kfree(cliprects);
54cf91dc
CW
1233 return ret;
1234}
1235
1236/*
1237 * Legacy execbuffer just creates an exec2 list from the original exec object
1238 * list array and passes it to the real function.
1239 */
1240int
1241i915_gem_execbuffer(struct drm_device *dev, void *data,
1242 struct drm_file *file)
1243{
28d6a7bf 1244 struct drm_i915_private *dev_priv = dev->dev_private;
54cf91dc
CW
1245 struct drm_i915_gem_execbuffer *args = data;
1246 struct drm_i915_gem_execbuffer2 exec2;
1247 struct drm_i915_gem_exec_object *exec_list = NULL;
1248 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1249 int ret, i;
1250
54cf91dc 1251 if (args->buffer_count < 1) {
ff240199 1252 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
1253 return -EINVAL;
1254 }
1255
1256 /* Copy in the exec list from userland */
1257 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1258 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1259 if (exec_list == NULL || exec2_list == NULL) {
ff240199 1260 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1261 args->buffer_count);
1262 drm_free_large(exec_list);
1263 drm_free_large(exec2_list);
1264 return -ENOMEM;
1265 }
1266 ret = copy_from_user(exec_list,
2bb4629a 1267 to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1268 sizeof(*exec_list) * args->buffer_count);
1269 if (ret != 0) {
ff240199 1270 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
1271 args->buffer_count, ret);
1272 drm_free_large(exec_list);
1273 drm_free_large(exec2_list);
1274 return -EFAULT;
1275 }
1276
1277 for (i = 0; i < args->buffer_count; i++) {
1278 exec2_list[i].handle = exec_list[i].handle;
1279 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1280 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1281 exec2_list[i].alignment = exec_list[i].alignment;
1282 exec2_list[i].offset = exec_list[i].offset;
1283 if (INTEL_INFO(dev)->gen < 4)
1284 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1285 else
1286 exec2_list[i].flags = 0;
1287 }
1288
1289 exec2.buffers_ptr = args->buffers_ptr;
1290 exec2.buffer_count = args->buffer_count;
1291 exec2.batch_start_offset = args->batch_start_offset;
1292 exec2.batch_len = args->batch_len;
1293 exec2.DR1 = args->DR1;
1294 exec2.DR4 = args->DR4;
1295 exec2.num_cliprects = args->num_cliprects;
1296 exec2.cliprects_ptr = args->cliprects_ptr;
1297 exec2.flags = I915_EXEC_RENDER;
6e0a69db 1298 i915_execbuffer2_set_context_id(exec2, 0);
54cf91dc 1299
28d6a7bf
BW
1300 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
1301 &dev_priv->gtt.base);
54cf91dc
CW
1302 if (!ret) {
1303 /* Copy the new buffer offsets back to the user's exec list. */
1304 for (i = 0; i < args->buffer_count; i++)
1305 exec_list[i].offset = exec2_list[i].offset;
1306 /* ... and back out to userspace */
2bb4629a 1307 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1308 exec_list,
1309 sizeof(*exec_list) * args->buffer_count);
1310 if (ret) {
1311 ret = -EFAULT;
ff240199 1312 DRM_DEBUG("failed to copy %d exec entries "
54cf91dc
CW
1313 "back to user (%d)\n",
1314 args->buffer_count, ret);
1315 }
1316 }
1317
1318 drm_free_large(exec_list);
1319 drm_free_large(exec2_list);
1320 return ret;
1321}
1322
1323int
1324i915_gem_execbuffer2(struct drm_device *dev, void *data,
1325 struct drm_file *file)
1326{
28d6a7bf 1327 struct drm_i915_private *dev_priv = dev->dev_private;
54cf91dc
CW
1328 struct drm_i915_gem_execbuffer2 *args = data;
1329 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1330 int ret;
1331
ed8cd3b2
XW
1332 if (args->buffer_count < 1 ||
1333 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
ff240199 1334 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
54cf91dc
CW
1335 return -EINVAL;
1336 }
1337
8408c282 1338 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
419fa72a 1339 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
8408c282
CW
1340 if (exec2_list == NULL)
1341 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1342 args->buffer_count);
54cf91dc 1343 if (exec2_list == NULL) {
ff240199 1344 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1345 args->buffer_count);
1346 return -ENOMEM;
1347 }
1348 ret = copy_from_user(exec2_list,
2bb4629a 1349 to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1350 sizeof(*exec2_list) * args->buffer_count);
1351 if (ret != 0) {
ff240199 1352 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
1353 args->buffer_count, ret);
1354 drm_free_large(exec2_list);
1355 return -EFAULT;
1356 }
1357
28d6a7bf
BW
1358 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
1359 &dev_priv->gtt.base);
54cf91dc
CW
1360 if (!ret) {
1361 /* Copy the new buffer offsets back to the user's exec list. */
2bb4629a 1362 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1363 exec2_list,
1364 sizeof(*exec2_list) * args->buffer_count);
1365 if (ret) {
1366 ret = -EFAULT;
ff240199 1367 DRM_DEBUG("failed to copy %d exec entries "
54cf91dc
CW
1368 "back to user (%d)\n",
1369 args->buffer_count, ret);
1370 }
1371 }
1372
1373 drm_free_large(exec2_list);
1374 return ret;
1375}