Merge remote-tracking branch 'asoc/topic/pcm5102a' into asoc-next
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
CommitLineData
54cf91dc
CW
1/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
ad778f89
CW
29#include <linux/dma_remapping.h>
30#include <linux/reservation.h>
fec0445c 31#include <linux/sync_file.h>
ad778f89
CW
32#include <linux/uaccess.h>
33
760285e7 34#include <drm/drmP.h>
cf6e7bac 35#include <drm/drm_syncobj.h>
760285e7 36#include <drm/i915_drm.h>
ad778f89 37
54cf91dc 38#include "i915_drv.h"
57822dc6 39#include "i915_gem_clflush.h"
54cf91dc
CW
40#include "i915_trace.h"
41#include "intel_drv.h"
5d723d7a 42#include "intel_frontbuffer.h"
54cf91dc 43
7dd4f672
CW
44enum {
45 FORCE_CPU_RELOC = 1,
46 FORCE_GTT_RELOC,
47 FORCE_GPU_RELOC,
48#define DBG_FORCE_RELOC 0 /* choose one of the above! */
49};
d50415cc 50
dade2a61
CW
51#define __EXEC_OBJECT_HAS_REF BIT(31)
52#define __EXEC_OBJECT_HAS_PIN BIT(30)
53#define __EXEC_OBJECT_HAS_FENCE BIT(29)
54#define __EXEC_OBJECT_NEEDS_MAP BIT(28)
55#define __EXEC_OBJECT_NEEDS_BIAS BIT(27)
56#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */
2889caa9
CW
57#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
58
59#define __EXEC_HAS_RELOC BIT(31)
60#define __EXEC_VALIDATED BIT(30)
74c1c694 61#define __EXEC_INTERNAL_FLAGS (~0u << 30)
2889caa9 62#define UPDATE PIN_OFFSET_FIXED
d23db88c
CW
63
64#define BATCH_OFFSET_BIAS (256*1024)
a415d355 65
650bc635
CW
66#define __I915_EXEC_ILLEGAL_FLAGS \
67 (__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
5b043f4e 68
2889caa9
CW
69/**
70 * DOC: User command execution
71 *
72 * Userspace submits commands to be executed on the GPU as an instruction
73 * stream within a GEM object we call a batchbuffer. This instructions may
74 * refer to other GEM objects containing auxiliary state such as kernels,
75 * samplers, render targets and even secondary batchbuffers. Userspace does
76 * not know where in the GPU memory these objects reside and so before the
77 * batchbuffer is passed to the GPU for execution, those addresses in the
78 * batchbuffer and auxiliary objects are updated. This is known as relocation,
79 * or patching. To try and avoid having to relocate each object on the next
80 * execution, userspace is told the location of those objects in this pass,
81 * but this remains just a hint as the kernel may choose a new location for
82 * any object in the future.
83 *
84 * Processing an execbuf ioctl is conceptually split up into a few phases.
85 *
86 * 1. Validation - Ensure all the pointers, handles and flags are valid.
87 * 2. Reservation - Assign GPU address space for every object
88 * 3. Relocation - Update any addresses to point to the final locations
89 * 4. Serialisation - Order the request with respect to its dependencies
90 * 5. Construction - Construct a request to execute the batchbuffer
91 * 6. Submission (at some point in the future execution)
92 *
93 * Reserving resources for the execbuf is the most complicated phase. We
94 * neither want to have to migrate the object in the address space, nor do
95 * we want to have to update any relocations pointing to this object. Ideally,
96 * we want to leave the object where it is and for all the existing relocations
97 * to match. If the object is given a new address, or if userspace thinks the
98 * object is elsewhere, we have to parse all the relocation entries and update
99 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
100 * all the target addresses in all of its objects match the value in the
101 * relocation entries and that they all match the presumed offsets given by the
102 * list of execbuffer objects. Using this knowledge, we know that if we haven't
103 * moved any buffers, all the relocation entries are valid and we can skip
104 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
105 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
106 *
107 * The addresses written in the objects must match the corresponding
108 * reloc.presumed_offset which in turn must match the corresponding
109 * execobject.offset.
110 *
111 * Any render targets written to in the batch must be flagged with
112 * EXEC_OBJECT_WRITE.
113 *
114 * To avoid stalling, execobject.offset should match the current
115 * address of that object within the active context.
116 *
117 * The reservation is done is multiple phases. First we try and keep any
118 * object already bound in its current location - so as long as meets the
119 * constraints imposed by the new execbuffer. Any object left unbound after the
120 * first pass is then fitted into any available idle space. If an object does
121 * not fit, all objects are removed from the reservation and the process rerun
122 * after sorting the objects into a priority order (more difficult to fit
123 * objects are tried first). Failing that, the entire VM is cleared and we try
124 * to fit the execbuf once last time before concluding that it simply will not
125 * fit.
126 *
127 * A small complication to all of this is that we allow userspace not only to
128 * specify an alignment and a size for the object in the address space, but
129 * we also allow userspace to specify the exact offset. This objects are
130 * simpler to place (the location is known a priori) all we have to do is make
131 * sure the space is available.
132 *
133 * Once all the objects are in place, patching up the buried pointers to point
134 * to the final locations is a fairly simple job of walking over the relocation
135 * entry arrays, looking up the right address and rewriting the value into
136 * the object. Simple! ... The relocation entries are stored in user memory
137 * and so to access them we have to copy them into a local buffer. That copy
138 * has to avoid taking any pagefaults as they may lead back to a GEM object
139 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
140 * the relocation into multiple passes. First we try to do everything within an
141 * atomic context (avoid the pagefaults) which requires that we never wait. If
142 * we detect that we may wait, or if we need to fault, then we have to fallback
143 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
144 * bells yet?) Dropping the mutex means that we lose all the state we have
145 * built up so far for the execbuf and we must reset any global data. However,
146 * we do leave the objects pinned in their final locations - which is a
147 * potential issue for concurrent execbufs. Once we have left the mutex, we can
148 * allocate and copy all the relocation entries into a large array at our
149 * leisure, reacquire the mutex, reclaim all the objects and other state and
150 * then proceed to update any incorrect addresses with the objects.
151 *
152 * As we process the relocation entries, we maintain a record of whether the
153 * object is being written to. Using NORELOC, we expect userspace to provide
154 * this information instead. We also check whether we can skip the relocation
155 * by comparing the expected value inside the relocation entry with the target's
156 * final address. If they differ, we have to map the current object and rewrite
157 * the 4 or 8 byte pointer within.
158 *
159 * Serialising an execbuf is quite simple according to the rules of the GEM
160 * ABI. Execution within each context is ordered by the order of submission.
161 * Writes to any GEM object are in order of submission and are exclusive. Reads
162 * from a GEM object are unordered with respect to other reads, but ordered by
163 * writes. A write submitted after a read cannot occur before the read, and
164 * similarly any read submitted after a write cannot occur before the write.
165 * Writes are ordered between engines such that only one write occurs at any
166 * time (completing any reads beforehand) - using semaphores where available
167 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
168 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
169 * reads before starting, and any read (either using set-domain or pread) must
170 * flush all GPU writes before starting. (Note we only employ a barrier before,
171 * we currently rely on userspace not concurrently starting a new execution
172 * whilst reading or writing to an object. This may be an advantage or not
173 * depending on how much you trust userspace not to shoot themselves in the
174 * foot.) Serialisation may just result in the request being inserted into
175 * a DAG awaiting its turn, but most simple is to wait on the CPU until
176 * all dependencies are resolved.
177 *
178 * After all of that, is just a matter of closing the request and handing it to
179 * the hardware (well, leaving it in a queue to be executed). However, we also
180 * offer the ability for batchbuffers to be run with elevated privileges so
181 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
182 * Before any batch is given extra privileges we first must check that it
183 * contains no nefarious instructions, we check that each instruction is from
184 * our whitelist and all registers are also from an allowed list. We first
185 * copy the user's batchbuffer to a shadow (so that the user doesn't have
186 * access to it, either by the CPU or GPU as we scan it) and then parse each
187 * instruction. If everything is ok, we set a flag telling the hardware to run
188 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
189 */
190
650bc635 191struct i915_execbuffer {
2889caa9
CW
192 struct drm_i915_private *i915; /** i915 backpointer */
193 struct drm_file *file; /** per-file lookup tables and limits */
194 struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
195 struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
c7c6e46f
CW
196 struct i915_vma **vma;
197 unsigned int *flags;
2889caa9
CW
198
199 struct intel_engine_cs *engine; /** engine to queue the request to */
200 struct i915_gem_context *ctx; /** context for building the request */
201 struct i915_address_space *vm; /** GTT and vma for the request */
202
203 struct drm_i915_gem_request *request; /** our request to build */
204 struct i915_vma *batch; /** identity of the batch obj/vma */
205
206 /** actual size of execobj[] as we may extend it for the cmdparser */
207 unsigned int buffer_count;
208
209 /** list of vma not yet bound during reservation phase */
210 struct list_head unbound;
211
212 /** list of vma that have execobj.relocation_count */
213 struct list_head relocs;
214
215 /**
216 * Track the most recently used object for relocations, as we
217 * frequently have to perform multiple relocations within the same
218 * obj/page
219 */
650bc635 220 struct reloc_cache {
2889caa9
CW
221 struct drm_mm_node node; /** temporary GTT binding */
222 unsigned long vaddr; /** Current kmap address */
223 unsigned long page; /** Currently mapped page index */
7dd4f672 224 unsigned int gen; /** Cached value of INTEL_GEN */
650bc635 225 bool use_64bit_reloc : 1;
2889caa9
CW
226 bool has_llc : 1;
227 bool has_fence : 1;
228 bool needs_unfenced : 1;
7dd4f672
CW
229
230 struct drm_i915_gem_request *rq;
231 u32 *rq_cmd;
232 unsigned int rq_size;
650bc635 233 } reloc_cache;
2889caa9
CW
234
235 u64 invalid_flags; /** Set of execobj.flags that are invalid */
236 u32 context_flags; /** Set of execobj.flags to insert from the ctx */
237
238 u32 batch_start_offset; /** Location within object of batch */
239 u32 batch_len; /** Length of batch within object */
240 u32 batch_flags; /** Flags composed for emit_bb_start() */
241
242 /**
243 * Indicate either the size of the hastable used to resolve
244 * relocation handles, or if negative that we are using a direct
245 * index into the execobj[].
246 */
247 int lut_size;
248 struct hlist_head *buckets; /** ht for relocation handles */
67731b87
CW
249};
250
c7c6e46f 251#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
4ff4b44c 252
2889caa9
CW
253/*
254 * Used to convert any address to canonical form.
255 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
256 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
257 * addresses to be in a canonical form:
258 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
259 * canonical form [63:48] == [47]."
260 */
261#define GEN8_HIGH_ADDRESS_BIT 47
262static inline u64 gen8_canonical_addr(u64 address)
263{
264 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
265}
266
267static inline u64 gen8_noncanonical_addr(u64 address)
268{
269 return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
270}
271
3dbf26ed
CW
272static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
273{
439e2ee4 274 return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
3dbf26ed
CW
275}
276
650bc635 277static int eb_create(struct i915_execbuffer *eb)
67731b87 278{
2889caa9
CW
279 if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
280 unsigned int size = 1 + ilog2(eb->buffer_count);
4ff4b44c 281
2889caa9
CW
282 /*
283 * Without a 1:1 association between relocation handles and
284 * the execobject[] index, we instead create a hashtable.
285 * We size it dynamically based on available memory, starting
286 * first with 1:1 assocative hash and scaling back until
287 * the allocation succeeds.
288 *
289 * Later on we use a positive lut_size to indicate we are
290 * using this hashtable, and a negative value to indicate a
291 * direct lookup.
292 */
4ff4b44c 293 do {
0d95c883 294 gfp_t flags;
4d470f73
CW
295
296 /* While we can still reduce the allocation size, don't
297 * raise a warning and allow the allocation to fail.
298 * On the last pass though, we want to try as hard
299 * as possible to perform the allocation and warn
300 * if it fails.
301 */
0ee931c4 302 flags = GFP_KERNEL;
4d470f73
CW
303 if (size > 1)
304 flags |= __GFP_NORETRY | __GFP_NOWARN;
305
4ff4b44c 306 eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
4d470f73 307 flags);
4ff4b44c
CW
308 if (eb->buckets)
309 break;
310 } while (--size);
311
4d470f73
CW
312 if (unlikely(!size))
313 return -ENOMEM;
eef90ccb 314
2889caa9 315 eb->lut_size = size;
650bc635 316 } else {
2889caa9 317 eb->lut_size = -eb->buffer_count;
650bc635 318 }
eef90ccb 319
650bc635 320 return 0;
67731b87
CW
321}
322
2889caa9
CW
323static bool
324eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
c7c6e46f
CW
325 const struct i915_vma *vma,
326 unsigned int flags)
2889caa9 327{
2889caa9
CW
328 if (vma->node.size < entry->pad_to_size)
329 return true;
330
331 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
332 return true;
333
c7c6e46f 334 if (flags & EXEC_OBJECT_PINNED &&
2889caa9
CW
335 vma->node.start != entry->offset)
336 return true;
337
c7c6e46f 338 if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
2889caa9
CW
339 vma->node.start < BATCH_OFFSET_BIAS)
340 return true;
341
c7c6e46f 342 if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
2889caa9
CW
343 (vma->node.start + vma->node.size - 1) >> 32)
344 return true;
345
1d033beb
CW
346 if (flags & __EXEC_OBJECT_NEEDS_MAP &&
347 !i915_vma_is_map_and_fenceable(vma))
348 return true;
349
2889caa9
CW
350 return false;
351}
352
c7c6e46f 353static inline bool
2889caa9 354eb_pin_vma(struct i915_execbuffer *eb,
c7c6e46f 355 const struct drm_i915_gem_exec_object2 *entry,
2889caa9
CW
356 struct i915_vma *vma)
357{
c7c6e46f
CW
358 unsigned int exec_flags = *vma->exec_flags;
359 u64 pin_flags;
2889caa9 360
616d9cee 361 if (vma->node.size)
c7c6e46f 362 pin_flags = vma->node.start;
616d9cee 363 else
c7c6e46f 364 pin_flags = entry->offset & PIN_OFFSET_MASK;
616d9cee 365
c7c6e46f
CW
366 pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
367 if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
368 pin_flags |= PIN_GLOBAL;
616d9cee 369
c7c6e46f
CW
370 if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
371 return false;
2889caa9 372
c7c6e46f 373 if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
3bd40735 374 if (unlikely(i915_vma_pin_fence(vma))) {
2889caa9 375 i915_vma_unpin(vma);
c7c6e46f 376 return false;
2889caa9
CW
377 }
378
3bd40735 379 if (vma->fence)
c7c6e46f 380 exec_flags |= __EXEC_OBJECT_HAS_FENCE;
2889caa9
CW
381 }
382
c7c6e46f
CW
383 *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
384 return !eb_vma_misplaced(entry, vma, exec_flags);
2889caa9
CW
385}
386
c7c6e46f 387static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
d55495b4 388{
c7c6e46f 389 GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
2889caa9 390
c7c6e46f 391 if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
3bd40735 392 __i915_vma_unpin_fence(vma);
d55495b4 393
2889caa9 394 __i915_vma_unpin(vma);
d55495b4
CW
395}
396
2889caa9 397static inline void
c7c6e46f 398eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
d55495b4 399{
c7c6e46f 400 if (!(*flags & __EXEC_OBJECT_HAS_PIN))
2889caa9 401 return;
d55495b4 402
c7c6e46f
CW
403 __eb_unreserve_vma(vma, *flags);
404 *flags &= ~__EXEC_OBJECT_RESERVED;
d55495b4
CW
405}
406
2889caa9
CW
407static int
408eb_validate_vma(struct i915_execbuffer *eb,
409 struct drm_i915_gem_exec_object2 *entry,
410 struct i915_vma *vma)
67731b87 411{
2889caa9
CW
412 if (unlikely(entry->flags & eb->invalid_flags))
413 return -EINVAL;
d55495b4 414
2889caa9
CW
415 if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
416 return -EINVAL;
417
418 /*
419 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
420 * any non-page-aligned or non-canonical addresses.
421 */
422 if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
423 entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK)))
424 return -EINVAL;
425
426 /* pad_to_size was once a reserved field, so sanitize it */
427 if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
428 if (unlikely(offset_in_page(entry->pad_to_size)))
429 return -EINVAL;
430 } else {
431 entry->pad_to_size = 0;
d55495b4
CW
432 }
433
c7c6e46f 434 if (unlikely(vma->exec_flags)) {
2889caa9
CW
435 DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
436 entry->handle, (int)(entry - eb->exec));
437 return -EINVAL;
438 }
439
440 /*
441 * From drm_mm perspective address space is continuous,
442 * so from this point we're always using non-canonical
443 * form internally.
444 */
445 entry->offset = gen8_noncanonical_addr(entry->offset);
446
c7c6e46f
CW
447 if (!eb->reloc_cache.has_fence) {
448 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
449 } else {
450 if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
451 eb->reloc_cache.needs_unfenced) &&
452 i915_gem_object_is_tiled(vma->obj))
453 entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
454 }
455
456 if (!(entry->flags & EXEC_OBJECT_PINNED))
457 entry->flags |= eb->context_flags;
458
2889caa9 459 return 0;
67731b87
CW
460}
461
2889caa9 462static int
d1b48c1e 463eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
59bfa124 464{
c7c6e46f 465 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
2889caa9
CW
466 int err;
467
468 GEM_BUG_ON(i915_vma_is_closed(vma));
469
470 if (!(eb->args->flags & __EXEC_VALIDATED)) {
471 err = eb_validate_vma(eb, entry, vma);
472 if (unlikely(err))
473 return err;
4ff4b44c 474 }
4ff4b44c 475
4d470f73 476 if (eb->lut_size > 0) {
2889caa9 477 vma->exec_handle = entry->handle;
4ff4b44c 478 hlist_add_head(&vma->exec_node,
2889caa9
CW
479 &eb->buckets[hash_32(entry->handle,
480 eb->lut_size)]);
4ff4b44c 481 }
59bfa124 482
2889caa9
CW
483 if (entry->relocation_count)
484 list_add_tail(&vma->reloc_link, &eb->relocs);
485
2889caa9
CW
486 /*
487 * Stash a pointer from the vma to execobj, so we can query its flags,
488 * size, alignment etc as provided by the user. Also we stash a pointer
489 * to the vma inside the execobj so that we can use a direct lookup
490 * to find the right target VMA when doing relocations.
491 */
c7c6e46f 492 eb->vma[i] = vma;
d1b48c1e 493 eb->flags[i] = entry->flags;
c7c6e46f 494 vma->exec_flags = &eb->flags[i];
2889caa9
CW
495
496 err = 0;
c7c6e46f 497 if (eb_pin_vma(eb, entry, vma)) {
2889caa9
CW
498 if (entry->offset != vma->node.start) {
499 entry->offset = vma->node.start | UPDATE;
500 eb->args->flags |= __EXEC_HAS_RELOC;
501 }
c7c6e46f
CW
502 } else {
503 eb_unreserve_vma(vma, vma->exec_flags);
504
505 list_add_tail(&vma->exec_link, &eb->unbound);
506 if (drm_mm_node_allocated(&vma->node))
507 err = i915_vma_unbind(vma);
e659d14e
CW
508 if (unlikely(err))
509 vma->exec_flags = NULL;
2889caa9
CW
510 }
511 return err;
512}
513
514static inline int use_cpu_reloc(const struct reloc_cache *cache,
515 const struct drm_i915_gem_object *obj)
516{
517 if (!i915_gem_object_has_struct_page(obj))
518 return false;
519
7dd4f672
CW
520 if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
521 return true;
522
523 if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
524 return false;
2889caa9
CW
525
526 return (cache->has_llc ||
527 obj->cache_dirty ||
528 obj->cache_level != I915_CACHE_NONE);
529}
530
531static int eb_reserve_vma(const struct i915_execbuffer *eb,
532 struct i915_vma *vma)
533{
c7c6e46f
CW
534 struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
535 unsigned int exec_flags = *vma->exec_flags;
536 u64 pin_flags;
2889caa9
CW
537 int err;
538
c7c6e46f
CW
539 pin_flags = PIN_USER | PIN_NONBLOCK;
540 if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
541 pin_flags |= PIN_GLOBAL;
2889caa9
CW
542
543 /*
544 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
545 * limit address to the first 4GBs for unflagged objects.
546 */
c7c6e46f
CW
547 if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
548 pin_flags |= PIN_ZONE_4G;
2889caa9 549
c7c6e46f
CW
550 if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
551 pin_flags |= PIN_MAPPABLE;
2889caa9 552
c7c6e46f
CW
553 if (exec_flags & EXEC_OBJECT_PINNED) {
554 pin_flags |= entry->offset | PIN_OFFSET_FIXED;
555 pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
556 } else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
557 pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
2889caa9
CW
558 }
559
c7c6e46f
CW
560 err = i915_vma_pin(vma,
561 entry->pad_to_size, entry->alignment,
562 pin_flags);
2889caa9
CW
563 if (err)
564 return err;
565
566 if (entry->offset != vma->node.start) {
567 entry->offset = vma->node.start | UPDATE;
568 eb->args->flags |= __EXEC_HAS_RELOC;
569 }
570
c7c6e46f 571 if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
3bd40735 572 err = i915_vma_pin_fence(vma);
2889caa9
CW
573 if (unlikely(err)) {
574 i915_vma_unpin(vma);
575 return err;
576 }
577
3bd40735 578 if (vma->fence)
c7c6e46f 579 exec_flags |= __EXEC_OBJECT_HAS_FENCE;
2889caa9
CW
580 }
581
c7c6e46f
CW
582 *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
583 GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
1da7b54c 584
2889caa9
CW
585 return 0;
586}
587
588static int eb_reserve(struct i915_execbuffer *eb)
589{
590 const unsigned int count = eb->buffer_count;
591 struct list_head last;
592 struct i915_vma *vma;
593 unsigned int i, pass;
594 int err;
595
596 /*
597 * Attempt to pin all of the buffers into the GTT.
598 * This is done in 3 phases:
599 *
600 * 1a. Unbind all objects that do not match the GTT constraints for
601 * the execbuffer (fenceable, mappable, alignment etc).
602 * 1b. Increment pin count for already bound objects.
603 * 2. Bind new objects.
604 * 3. Decrement pin count.
605 *
606 * This avoid unnecessary unbinding of later objects in order to make
607 * room for the earlier objects *unless* we need to defragment.
608 */
609
610 pass = 0;
611 err = 0;
612 do {
613 list_for_each_entry(vma, &eb->unbound, exec_link) {
614 err = eb_reserve_vma(eb, vma);
615 if (err)
616 break;
617 }
618 if (err != -ENOSPC)
619 return err;
620
621 /* Resort *all* the objects into priority order */
622 INIT_LIST_HEAD(&eb->unbound);
623 INIT_LIST_HEAD(&last);
624 for (i = 0; i < count; i++) {
c7c6e46f
CW
625 unsigned int flags = eb->flags[i];
626 struct i915_vma *vma = eb->vma[i];
2889caa9 627
c7c6e46f
CW
628 if (flags & EXEC_OBJECT_PINNED &&
629 flags & __EXEC_OBJECT_HAS_PIN)
2889caa9
CW
630 continue;
631
c7c6e46f 632 eb_unreserve_vma(vma, &eb->flags[i]);
2889caa9 633
c7c6e46f 634 if (flags & EXEC_OBJECT_PINNED)
2889caa9 635 list_add(&vma->exec_link, &eb->unbound);
c7c6e46f 636 else if (flags & __EXEC_OBJECT_NEEDS_MAP)
2889caa9
CW
637 list_add_tail(&vma->exec_link, &eb->unbound);
638 else
639 list_add_tail(&vma->exec_link, &last);
640 }
641 list_splice_tail(&last, &eb->unbound);
642
643 switch (pass++) {
644 case 0:
645 break;
646
647 case 1:
648 /* Too fragmented, unbind everything and retry */
649 err = i915_gem_evict_vm(eb->vm);
650 if (err)
651 return err;
652 break;
653
654 default:
655 return -ENOSPC;
656 }
657 } while (1);
4ff4b44c 658}
59bfa124 659
2889caa9
CW
660static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
661{
1a71cf2f
CW
662 if (eb->args->flags & I915_EXEC_BATCH_FIRST)
663 return 0;
664 else
665 return eb->buffer_count - 1;
2889caa9
CW
666}
667
668static int eb_select_context(struct i915_execbuffer *eb)
669{
670 struct i915_gem_context *ctx;
671
672 ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
1acfc104
CW
673 if (unlikely(!ctx))
674 return -ENOENT;
2889caa9 675
1acfc104 676 eb->ctx = ctx;
2889caa9
CW
677 eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
678
679 eb->context_flags = 0;
680 if (ctx->flags & CONTEXT_NO_ZEROMAP)
681 eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
682
683 return 0;
684}
685
686static int eb_lookup_vmas(struct i915_execbuffer *eb)
3b96eff4 687{
d1b48c1e 688 struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
ac70ebe8 689 struct drm_i915_gem_object *obj;
2889caa9 690 unsigned int i;
2889caa9 691 int err;
3b96eff4 692
8bcbfb12
CW
693 if (unlikely(i915_gem_context_is_closed(eb->ctx)))
694 return -ENOENT;
695
696 if (unlikely(i915_gem_context_is_banned(eb->ctx)))
697 return -EIO;
698
2889caa9
CW
699 INIT_LIST_HEAD(&eb->relocs);
700 INIT_LIST_HEAD(&eb->unbound);
d55495b4 701
170fa29b
CW
702 for (i = 0; i < eb->buffer_count; i++) {
703 u32 handle = eb->exec[i].handle;
d1b48c1e 704 struct i915_lut_handle *lut;
170fa29b 705 struct i915_vma *vma;
4ff4b44c 706
d1b48c1e
CW
707 vma = radix_tree_lookup(handles_vma, handle);
708 if (likely(vma))
170fa29b 709 goto add_vma;
4ff4b44c 710
170fa29b 711 obj = i915_gem_object_lookup(eb->file, handle);
4ff4b44c 712 if (unlikely(!obj)) {
2889caa9 713 err = -ENOENT;
170fa29b 714 goto err_vma;
3b96eff4
CW
715 }
716
650bc635 717 vma = i915_vma_instance(obj, eb->vm, NULL);
058d88c4 718 if (unlikely(IS_ERR(vma))) {
2889caa9 719 err = PTR_ERR(vma);
170fa29b 720 goto err_obj;
27173f1f
BW
721 }
722
d1b48c1e
CW
723 lut = kmem_cache_alloc(eb->i915->luts, GFP_KERNEL);
724 if (unlikely(!lut)) {
725 err = -ENOMEM;
726 goto err_obj;
727 }
728
729 err = radix_tree_insert(handles_vma, handle, vma);
730 if (unlikely(err)) {
731 kfree(lut);
732 goto err_obj;
eef90ccb 733 }
4ff4b44c 734
ac70ebe8 735 /* transfer ref to ctx */
3ffff017 736 vma->open_count++;
d1b48c1e
CW
737 list_add(&lut->obj_link, &obj->lut_list);
738 list_add(&lut->ctx_link, &eb->ctx->handles_list);
739 lut->ctx = eb->ctx;
740 lut->handle = handle;
741
170fa29b 742add_vma:
d1b48c1e 743 err = eb_add_vma(eb, i, vma);
2889caa9 744 if (unlikely(err))
ac70ebe8 745 goto err_vma;
dade2a61 746
c7c6e46f
CW
747 GEM_BUG_ON(vma != eb->vma[i]);
748 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
4ff4b44c
CW
749 }
750
2889caa9
CW
751 /* take note of the batch buffer before we might reorder the lists */
752 i = eb_batch_index(eb);
c7c6e46f
CW
753 eb->batch = eb->vma[i];
754 GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]);
27173f1f 755
9ae9ab52 756 /*
4ff4b44c
CW
757 * SNA is doing fancy tricks with compressing batch buffers, which leads
758 * to negative relocation deltas. Usually that works out ok since the
759 * relocate address is still positive, except when the batch is placed
760 * very low in the GTT. Ensure this doesn't happen.
761 *
762 * Note that actual hangs have only been observed on gen7, but for
763 * paranoia do it everywhere.
9ae9ab52 764 */
c7c6e46f
CW
765 if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
766 eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
2889caa9 767 if (eb->reloc_cache.has_fence)
c7c6e46f 768 eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
9ae9ab52 769
2889caa9
CW
770 eb->args->flags |= __EXEC_VALIDATED;
771 return eb_reserve(eb);
772
170fa29b 773err_obj:
ac70ebe8 774 i915_gem_object_put(obj);
170fa29b
CW
775err_vma:
776 eb->vma[i] = NULL;
2889caa9 777 return err;
3b96eff4
CW
778}
779
4ff4b44c 780static struct i915_vma *
2889caa9 781eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
67731b87 782{
2889caa9
CW
783 if (eb->lut_size < 0) {
784 if (handle >= -eb->lut_size)
eef90ccb 785 return NULL;
c7c6e46f 786 return eb->vma[handle];
eef90ccb
CW
787 } else {
788 struct hlist_head *head;
aa45950b 789 struct i915_vma *vma;
67731b87 790
2889caa9 791 head = &eb->buckets[hash_32(handle, eb->lut_size)];
aa45950b 792 hlist_for_each_entry(vma, head, exec_node) {
27173f1f
BW
793 if (vma->exec_handle == handle)
794 return vma;
eef90ccb
CW
795 }
796 return NULL;
797 }
67731b87
CW
798}
799
2889caa9 800static void eb_release_vmas(const struct i915_execbuffer *eb)
a415d355 801{
2889caa9
CW
802 const unsigned int count = eb->buffer_count;
803 unsigned int i;
804
805 for (i = 0; i < count; i++) {
c7c6e46f
CW
806 struct i915_vma *vma = eb->vma[i];
807 unsigned int flags = eb->flags[i];
650bc635 808
2889caa9 809 if (!vma)
170fa29b 810 break;
bcffc3fa 811
c7c6e46f
CW
812 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
813 vma->exec_flags = NULL;
814 eb->vma[i] = NULL;
9e53d9be 815
c7c6e46f
CW
816 if (flags & __EXEC_OBJECT_HAS_PIN)
817 __eb_unreserve_vma(vma, flags);
dade2a61 818
c7c6e46f 819 if (flags & __EXEC_OBJECT_HAS_REF)
dade2a61 820 i915_vma_put(vma);
2889caa9 821 }
dabdfe02
CW
822}
823
2889caa9 824static void eb_reset_vmas(const struct i915_execbuffer *eb)
934acce3 825{
2889caa9 826 eb_release_vmas(eb);
4d470f73 827 if (eb->lut_size > 0)
2889caa9
CW
828 memset(eb->buckets, 0,
829 sizeof(struct hlist_head) << eb->lut_size);
934acce3
MW
830}
831
2889caa9 832static void eb_destroy(const struct i915_execbuffer *eb)
934acce3 833{
7dd4f672
CW
834 GEM_BUG_ON(eb->reloc_cache.rq);
835
4d470f73 836 if (eb->lut_size > 0)
2889caa9 837 kfree(eb->buckets);
934acce3
MW
838}
839
2889caa9 840static inline u64
d50415cc 841relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
2889caa9 842 const struct i915_vma *target)
934acce3 843{
2889caa9 844 return gen8_canonical_addr((int)reloc->delta + target->node.start);
934acce3
MW
845}
846
d50415cc
CW
847static void reloc_cache_init(struct reloc_cache *cache,
848 struct drm_i915_private *i915)
5032d871 849{
31a39207 850 cache->page = -1;
d50415cc 851 cache->vaddr = 0;
dfc5148f 852 /* Must be a variable in the struct to allow GCC to unroll. */
7dd4f672 853 cache->gen = INTEL_GEN(i915);
2889caa9 854 cache->has_llc = HAS_LLC(i915);
dfc5148f 855 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
7dd4f672
CW
856 cache->has_fence = cache->gen < 4;
857 cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
e8cb909a 858 cache->node.allocated = false;
7dd4f672
CW
859 cache->rq = NULL;
860 cache->rq_size = 0;
d50415cc 861}
5032d871 862
d50415cc
CW
863static inline void *unmask_page(unsigned long p)
864{
865 return (void *)(uintptr_t)(p & PAGE_MASK);
866}
867
868static inline unsigned int unmask_flags(unsigned long p)
869{
870 return p & ~PAGE_MASK;
31a39207
CW
871}
872
d50415cc
CW
873#define KMAP 0x4 /* after CLFLUSH_FLAGS */
874
650bc635
CW
875static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
876{
877 struct drm_i915_private *i915 =
878 container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
879 return &i915->ggtt;
880}
881
7dd4f672
CW
882static void reloc_gpu_flush(struct reloc_cache *cache)
883{
884 GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
885 cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
886 i915_gem_object_unpin_map(cache->rq->batch->obj);
887 i915_gem_chipset_flush(cache->rq->i915);
888
889 __i915_add_request(cache->rq, true);
890 cache->rq = NULL;
891}
892
650bc635 893static void reloc_cache_reset(struct reloc_cache *cache)
31a39207 894{
d50415cc 895 void *vaddr;
5032d871 896
7dd4f672
CW
897 if (cache->rq)
898 reloc_gpu_flush(cache);
899
31a39207
CW
900 if (!cache->vaddr)
901 return;
3c94ceee 902
d50415cc
CW
903 vaddr = unmask_page(cache->vaddr);
904 if (cache->vaddr & KMAP) {
905 if (cache->vaddr & CLFLUSH_AFTER)
906 mb();
3c94ceee 907
d50415cc
CW
908 kunmap_atomic(vaddr);
909 i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
910 } else {
e8cb909a 911 wmb();
d50415cc 912 io_mapping_unmap_atomic((void __iomem *)vaddr);
e8cb909a 913 if (cache->node.allocated) {
650bc635 914 struct i915_ggtt *ggtt = cache_to_ggtt(cache);
e8cb909a
CW
915
916 ggtt->base.clear_range(&ggtt->base,
917 cache->node.start,
4fb84d99 918 cache->node.size);
e8cb909a
CW
919 drm_mm_remove_node(&cache->node);
920 } else {
921 i915_vma_unpin((struct i915_vma *)cache->node.mm);
3c94ceee 922 }
31a39207 923 }
650bc635
CW
924
925 cache->vaddr = 0;
926 cache->page = -1;
31a39207
CW
927}
928
929static void *reloc_kmap(struct drm_i915_gem_object *obj,
930 struct reloc_cache *cache,
2889caa9 931 unsigned long page)
31a39207 932{
d50415cc
CW
933 void *vaddr;
934
935 if (cache->vaddr) {
936 kunmap_atomic(unmask_page(cache->vaddr));
937 } else {
938 unsigned int flushes;
2889caa9 939 int err;
31a39207 940
2889caa9
CW
941 err = i915_gem_obj_prepare_shmem_write(obj, &flushes);
942 if (err)
943 return ERR_PTR(err);
d50415cc
CW
944
945 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
946 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
3c94ceee 947
d50415cc
CW
948 cache->vaddr = flushes | KMAP;
949 cache->node.mm = (void *)obj;
950 if (flushes)
951 mb();
3c94ceee
BW
952 }
953
d50415cc
CW
954 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
955 cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
31a39207 956 cache->page = page;
5032d871 957
d50415cc 958 return vaddr;
5032d871
RB
959}
960
d50415cc
CW
961static void *reloc_iomap(struct drm_i915_gem_object *obj,
962 struct reloc_cache *cache,
2889caa9 963 unsigned long page)
5032d871 964{
650bc635 965 struct i915_ggtt *ggtt = cache_to_ggtt(cache);
e8cb909a 966 unsigned long offset;
d50415cc 967 void *vaddr;
5032d871 968
d50415cc 969 if (cache->vaddr) {
615e5000 970 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
d50415cc
CW
971 } else {
972 struct i915_vma *vma;
2889caa9 973 int err;
5032d871 974
2889caa9 975 if (use_cpu_reloc(cache, obj))
d50415cc 976 return NULL;
3c94ceee 977
2889caa9
CW
978 err = i915_gem_object_set_to_gtt_domain(obj, true);
979 if (err)
980 return ERR_PTR(err);
3c94ceee 981
d50415cc 982 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
3c755c5b
CW
983 PIN_MAPPABLE |
984 PIN_NONBLOCK |
985 PIN_NONFAULT);
e8cb909a
CW
986 if (IS_ERR(vma)) {
987 memset(&cache->node, 0, sizeof(cache->node));
2889caa9 988 err = drm_mm_insert_node_in_range
e8cb909a 989 (&ggtt->base.mm, &cache->node,
f51455d4 990 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
e8cb909a 991 0, ggtt->mappable_end,
4e64e553 992 DRM_MM_INSERT_LOW);
2889caa9 993 if (err) /* no inactive aperture space, use cpu reloc */
c92fa4fe 994 return NULL;
e8cb909a 995 } else {
2889caa9
CW
996 err = i915_vma_put_fence(vma);
997 if (err) {
e8cb909a 998 i915_vma_unpin(vma);
2889caa9 999 return ERR_PTR(err);
e8cb909a 1000 }
5032d871 1001
e8cb909a
CW
1002 cache->node.start = vma->node.start;
1003 cache->node.mm = (void *)vma;
3c94ceee 1004 }
e8cb909a 1005 }
3c94ceee 1006
e8cb909a
CW
1007 offset = cache->node.start;
1008 if (cache->node.allocated) {
fc099090 1009 wmb();
e8cb909a
CW
1010 ggtt->base.insert_page(&ggtt->base,
1011 i915_gem_object_get_dma_address(obj, page),
1012 offset, I915_CACHE_NONE, 0);
1013 } else {
1014 offset += page << PAGE_SHIFT;
3c94ceee
BW
1015 }
1016
73ebd503 1017 vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
650bc635 1018 offset);
d50415cc
CW
1019 cache->page = page;
1020 cache->vaddr = (unsigned long)vaddr;
5032d871 1021
d50415cc 1022 return vaddr;
5032d871
RB
1023}
1024
d50415cc
CW
1025static void *reloc_vaddr(struct drm_i915_gem_object *obj,
1026 struct reloc_cache *cache,
2889caa9 1027 unsigned long page)
edf4427b 1028{
d50415cc 1029 void *vaddr;
5032d871 1030
d50415cc
CW
1031 if (cache->page == page) {
1032 vaddr = unmask_page(cache->vaddr);
1033 } else {
1034 vaddr = NULL;
1035 if ((cache->vaddr & KMAP) == 0)
1036 vaddr = reloc_iomap(obj, cache, page);
1037 if (!vaddr)
1038 vaddr = reloc_kmap(obj, cache, page);
3c94ceee
BW
1039 }
1040
d50415cc 1041 return vaddr;
edf4427b
CW
1042}
1043
d50415cc 1044static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
edf4427b 1045{
d50415cc
CW
1046 if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
1047 if (flushes & CLFLUSH_BEFORE) {
1048 clflushopt(addr);
1049 mb();
1050 }
edf4427b 1051
d50415cc 1052 *addr = value;
edf4427b 1053
2889caa9
CW
1054 /*
1055 * Writes to the same cacheline are serialised by the CPU
d50415cc
CW
1056 * (including clflush). On the write path, we only require
1057 * that it hits memory in an orderly fashion and place
1058 * mb barriers at the start and end of the relocation phase
1059 * to ensure ordering of clflush wrt to the system.
1060 */
1061 if (flushes & CLFLUSH_AFTER)
1062 clflushopt(addr);
1063 } else
1064 *addr = value;
edf4427b 1065}
edf4427b 1066
7dd4f672
CW
1067static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
1068 struct i915_vma *vma,
1069 unsigned int len)
1070{
1071 struct reloc_cache *cache = &eb->reloc_cache;
1072 struct drm_i915_gem_object *obj;
1073 struct drm_i915_gem_request *rq;
1074 struct i915_vma *batch;
1075 u32 *cmd;
1076 int err;
1077
1078 GEM_BUG_ON(vma->obj->base.write_domain & I915_GEM_DOMAIN_CPU);
1079
1080 obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
1081 if (IS_ERR(obj))
1082 return PTR_ERR(obj);
1083
1084 cmd = i915_gem_object_pin_map(obj,
a575c676
CW
1085 cache->has_llc ?
1086 I915_MAP_FORCE_WB :
1087 I915_MAP_FORCE_WC);
7dd4f672
CW
1088 i915_gem_object_unpin_pages(obj);
1089 if (IS_ERR(cmd))
1090 return PTR_ERR(cmd);
1091
1092 err = i915_gem_object_set_to_wc_domain(obj, false);
1093 if (err)
1094 goto err_unmap;
1095
1096 batch = i915_vma_instance(obj, vma->vm, NULL);
1097 if (IS_ERR(batch)) {
1098 err = PTR_ERR(batch);
1099 goto err_unmap;
1100 }
1101
1102 err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
1103 if (err)
1104 goto err_unmap;
1105
1106 rq = i915_gem_request_alloc(eb->engine, eb->ctx);
1107 if (IS_ERR(rq)) {
1108 err = PTR_ERR(rq);
1109 goto err_unpin;
1110 }
1111
1112 err = i915_gem_request_await_object(rq, vma->obj, true);
1113 if (err)
1114 goto err_request;
1115
7dd4f672
CW
1116 err = eb->engine->emit_bb_start(rq,
1117 batch->node.start, PAGE_SIZE,
1118 cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
1119 if (err)
1120 goto err_request;
1121
95ff7c7d 1122 GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
7dd4f672 1123 i915_vma_move_to_active(batch, rq, 0);
95ff7c7d
CW
1124 reservation_object_lock(batch->resv, NULL);
1125 reservation_object_add_excl_fence(batch->resv, &rq->fence);
1126 reservation_object_unlock(batch->resv);
7dd4f672
CW
1127 i915_vma_unpin(batch);
1128
25ffaa67 1129 i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
95ff7c7d
CW
1130 reservation_object_lock(vma->resv, NULL);
1131 reservation_object_add_excl_fence(vma->resv, &rq->fence);
1132 reservation_object_unlock(vma->resv);
7dd4f672
CW
1133
1134 rq->batch = batch;
1135
1136 cache->rq = rq;
1137 cache->rq_cmd = cmd;
1138 cache->rq_size = 0;
1139
1140 /* Return with batch mapping (cmd) still pinned */
1141 return 0;
1142
1143err_request:
1144 i915_add_request(rq);
1145err_unpin:
1146 i915_vma_unpin(batch);
1147err_unmap:
1148 i915_gem_object_unpin_map(obj);
1149 return err;
1150}
1151
1152static u32 *reloc_gpu(struct i915_execbuffer *eb,
1153 struct i915_vma *vma,
1154 unsigned int len)
1155{
1156 struct reloc_cache *cache = &eb->reloc_cache;
1157 u32 *cmd;
1158
1159 if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
1160 reloc_gpu_flush(cache);
1161
1162 if (unlikely(!cache->rq)) {
1163 int err;
1164
3dbf26ed
CW
1165 /* If we need to copy for the cmdparser, we will stall anyway */
1166 if (eb_use_cmdparser(eb))
1167 return ERR_PTR(-EWOULDBLOCK);
1168
90cad095
CW
1169 if (!intel_engine_can_store_dword(eb->engine))
1170 return ERR_PTR(-ENODEV);
1171
7dd4f672
CW
1172 err = __reloc_gpu_alloc(eb, vma, len);
1173 if (unlikely(err))
1174 return ERR_PTR(err);
1175 }
1176
1177 cmd = cache->rq_cmd + cache->rq_size;
1178 cache->rq_size += len;
1179
1180 return cmd;
1181}
1182
2889caa9
CW
1183static u64
1184relocate_entry(struct i915_vma *vma,
d50415cc 1185 const struct drm_i915_gem_relocation_entry *reloc,
2889caa9
CW
1186 struct i915_execbuffer *eb,
1187 const struct i915_vma *target)
edf4427b 1188{
d50415cc 1189 u64 offset = reloc->offset;
2889caa9
CW
1190 u64 target_offset = relocation_target(reloc, target);
1191 bool wide = eb->reloc_cache.use_64bit_reloc;
d50415cc 1192 void *vaddr;
edf4427b 1193
7dd4f672
CW
1194 if (!eb->reloc_cache.vaddr &&
1195 (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
90cad095 1196 !reservation_object_test_signaled_rcu(vma->resv, true))) {
7dd4f672
CW
1197 const unsigned int gen = eb->reloc_cache.gen;
1198 unsigned int len;
1199 u32 *batch;
1200 u64 addr;
1201
1202 if (wide)
1203 len = offset & 7 ? 8 : 5;
1204 else if (gen >= 4)
1205 len = 4;
f2f5c061 1206 else
7dd4f672 1207 len = 3;
7dd4f672
CW
1208
1209 batch = reloc_gpu(eb, vma, len);
1210 if (IS_ERR(batch))
1211 goto repeat;
1212
1213 addr = gen8_canonical_addr(vma->node.start + offset);
1214 if (wide) {
1215 if (offset & 7) {
1216 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1217 *batch++ = lower_32_bits(addr);
1218 *batch++ = upper_32_bits(addr);
1219 *batch++ = lower_32_bits(target_offset);
1220
1221 addr = gen8_canonical_addr(addr + 4);
1222
1223 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1224 *batch++ = lower_32_bits(addr);
1225 *batch++ = upper_32_bits(addr);
1226 *batch++ = upper_32_bits(target_offset);
1227 } else {
1228 *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
1229 *batch++ = lower_32_bits(addr);
1230 *batch++ = upper_32_bits(addr);
1231 *batch++ = lower_32_bits(target_offset);
1232 *batch++ = upper_32_bits(target_offset);
1233 }
1234 } else if (gen >= 6) {
1235 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1236 *batch++ = 0;
1237 *batch++ = addr;
1238 *batch++ = target_offset;
1239 } else if (gen >= 4) {
1240 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1241 *batch++ = 0;
1242 *batch++ = addr;
1243 *batch++ = target_offset;
1244 } else {
1245 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1246 *batch++ = addr;
1247 *batch++ = target_offset;
1248 }
1249
1250 goto out;
1251 }
1252
d50415cc 1253repeat:
95ff7c7d 1254 vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
d50415cc
CW
1255 if (IS_ERR(vaddr))
1256 return PTR_ERR(vaddr);
1257
1258 clflush_write32(vaddr + offset_in_page(offset),
1259 lower_32_bits(target_offset),
2889caa9 1260 eb->reloc_cache.vaddr);
d50415cc
CW
1261
1262 if (wide) {
1263 offset += sizeof(u32);
1264 target_offset >>= 32;
1265 wide = false;
1266 goto repeat;
edf4427b 1267 }
edf4427b 1268
7dd4f672 1269out:
2889caa9 1270 return target->node.start | UPDATE;
edf4427b 1271}
edf4427b 1272
2889caa9
CW
1273static u64
1274eb_relocate_entry(struct i915_execbuffer *eb,
1275 struct i915_vma *vma,
1276 const struct drm_i915_gem_relocation_entry *reloc)
54cf91dc 1277{
507d977f 1278 struct i915_vma *target;
2889caa9 1279 int err;
54cf91dc 1280
67731b87 1281 /* we've already hold a reference to all valid objects */
507d977f
CW
1282 target = eb_get_vma(eb, reloc->target_handle);
1283 if (unlikely(!target))
54cf91dc 1284 return -ENOENT;
e844b990 1285
54cf91dc 1286 /* Validate that the target is in a valid r/w GPU domain */
b8f7ab17 1287 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
ff240199 1288 DRM_DEBUG("reloc with multiple write domains: "
507d977f 1289 "target %d offset %d "
54cf91dc 1290 "read %08x write %08x",
507d977f 1291 reloc->target_handle,
54cf91dc
CW
1292 (int) reloc->offset,
1293 reloc->read_domains,
1294 reloc->write_domain);
8b78f0e5 1295 return -EINVAL;
54cf91dc 1296 }
4ca4a250
DV
1297 if (unlikely((reloc->write_domain | reloc->read_domains)
1298 & ~I915_GEM_GPU_DOMAINS)) {
ff240199 1299 DRM_DEBUG("reloc with read/write non-GPU domains: "
507d977f 1300 "target %d offset %d "
54cf91dc 1301 "read %08x write %08x",
507d977f 1302 reloc->target_handle,
54cf91dc
CW
1303 (int) reloc->offset,
1304 reloc->read_domains,
1305 reloc->write_domain);
8b78f0e5 1306 return -EINVAL;
54cf91dc 1307 }
54cf91dc 1308
2889caa9 1309 if (reloc->write_domain) {
c7c6e46f 1310 *target->exec_flags |= EXEC_OBJECT_WRITE;
507d977f 1311
2889caa9
CW
1312 /*
1313 * Sandybridge PPGTT errata: We need a global gtt mapping
1314 * for MI and pipe_control writes because the gpu doesn't
1315 * properly redirect them through the ppgtt for non_secure
1316 * batchbuffers.
1317 */
1318 if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
1319 IS_GEN6(eb->i915)) {
1320 err = i915_vma_bind(target, target->obj->cache_level,
1321 PIN_GLOBAL);
1322 if (WARN_ONCE(err,
1323 "Unexpected failure to bind target VMA!"))
1324 return err;
1325 }
507d977f 1326 }
54cf91dc 1327
2889caa9
CW
1328 /*
1329 * If the relocation already has the right value in it, no
54cf91dc
CW
1330 * more work needs to be done.
1331 */
7dd4f672
CW
1332 if (!DBG_FORCE_RELOC &&
1333 gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
67731b87 1334 return 0;
54cf91dc
CW
1335
1336 /* Check that the relocation address is valid... */
3c94ceee 1337 if (unlikely(reloc->offset >
507d977f 1338 vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
ff240199 1339 DRM_DEBUG("Relocation beyond object bounds: "
507d977f
CW
1340 "target %d offset %d size %d.\n",
1341 reloc->target_handle,
1342 (int)reloc->offset,
1343 (int)vma->size);
8b78f0e5 1344 return -EINVAL;
54cf91dc 1345 }
b8f7ab17 1346 if (unlikely(reloc->offset & 3)) {
ff240199 1347 DRM_DEBUG("Relocation not 4-byte aligned: "
507d977f
CW
1348 "target %d offset %d.\n",
1349 reloc->target_handle,
1350 (int)reloc->offset);
8b78f0e5 1351 return -EINVAL;
54cf91dc
CW
1352 }
1353
071750e5
CW
1354 /*
1355 * If we write into the object, we need to force the synchronisation
1356 * barrier, either with an asynchronous clflush or if we executed the
1357 * patching using the GPU (though that should be serialised by the
1358 * timeline). To be completely sure, and since we are required to
1359 * do relocations we are already stalling, disable the user's opt
0519bcb1 1360 * out of our synchronisation.
071750e5 1361 */
c7c6e46f 1362 *vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
071750e5 1363
54cf91dc 1364 /* and update the user's relocation entry */
2889caa9 1365 return relocate_entry(vma, reloc, eb, target);
54cf91dc
CW
1366}
1367
2889caa9 1368static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
54cf91dc 1369{
1d83f442 1370#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
2889caa9
CW
1371 struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
1372 struct drm_i915_gem_relocation_entry __user *urelocs;
c7c6e46f 1373 const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
2889caa9 1374 unsigned int remain;
54cf91dc 1375
2889caa9 1376 urelocs = u64_to_user_ptr(entry->relocs_ptr);
1d83f442 1377 remain = entry->relocation_count;
2889caa9
CW
1378 if (unlikely(remain > N_RELOC(ULONG_MAX)))
1379 return -EINVAL;
ebc0808f 1380
2889caa9
CW
1381 /*
1382 * We must check that the entire relocation array is safe
1383 * to read. However, if the array is not writable the user loses
1384 * the updated relocation values.
1385 */
edd9003f 1386 if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs))))
2889caa9
CW
1387 return -EFAULT;
1388
1389 do {
1390 struct drm_i915_gem_relocation_entry *r = stack;
1391 unsigned int count =
1392 min_t(unsigned int, remain, ARRAY_SIZE(stack));
1393 unsigned int copied;
1d83f442 1394
2889caa9
CW
1395 /*
1396 * This is the fast path and we cannot handle a pagefault
ebc0808f
CW
1397 * whilst holding the struct mutex lest the user pass in the
1398 * relocations contained within a mmaped bo. For in such a case
1399 * we, the page fault handler would call i915_gem_fault() and
1400 * we would try to acquire the struct mutex again. Obviously
1401 * this is bad and so lockdep complains vehemently.
1402 */
1403 pagefault_disable();
2889caa9 1404 copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
ebc0808f 1405 pagefault_enable();
2889caa9
CW
1406 if (unlikely(copied)) {
1407 remain = -EFAULT;
31a39207
CW
1408 goto out;
1409 }
54cf91dc 1410
2889caa9 1411 remain -= count;
1d83f442 1412 do {
2889caa9 1413 u64 offset = eb_relocate_entry(eb, vma, r);
54cf91dc 1414
2889caa9
CW
1415 if (likely(offset == 0)) {
1416 } else if ((s64)offset < 0) {
1417 remain = (int)offset;
31a39207 1418 goto out;
2889caa9
CW
1419 } else {
1420 /*
1421 * Note that reporting an error now
1422 * leaves everything in an inconsistent
1423 * state as we have *already* changed
1424 * the relocation value inside the
1425 * object. As we have not changed the
1426 * reloc.presumed_offset or will not
1427 * change the execobject.offset, on the
1428 * call we may not rewrite the value
1429 * inside the object, leaving it
1430 * dangling and causing a GPU hang. Unless
1431 * userspace dynamically rebuilds the
1432 * relocations on each execbuf rather than
1433 * presume a static tree.
1434 *
1435 * We did previously check if the relocations
1436 * were writable (access_ok), an error now
1437 * would be a strange race with mprotect,
1438 * having already demonstrated that we
1439 * can read from this userspace address.
1440 */
1441 offset = gen8_canonical_addr(offset & ~UPDATE);
1442 __put_user(offset,
1443 &urelocs[r-stack].presumed_offset);
1d83f442 1444 }
2889caa9
CW
1445 } while (r++, --count);
1446 urelocs += ARRAY_SIZE(stack);
1447 } while (remain);
31a39207 1448out:
650bc635 1449 reloc_cache_reset(&eb->reloc_cache);
2889caa9 1450 return remain;
54cf91dc
CW
1451}
1452
1453static int
2889caa9 1454eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
54cf91dc 1455{
c7c6e46f 1456 const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
2889caa9
CW
1457 struct drm_i915_gem_relocation_entry *relocs =
1458 u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
1459 unsigned int i;
1460 int err;
54cf91dc
CW
1461
1462 for (i = 0; i < entry->relocation_count; i++) {
2889caa9 1463 u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
d4aeee77 1464
2889caa9
CW
1465 if ((s64)offset < 0) {
1466 err = (int)offset;
1467 goto err;
1468 }
54cf91dc 1469 }
2889caa9
CW
1470 err = 0;
1471err:
1472 reloc_cache_reset(&eb->reloc_cache);
1473 return err;
edf4427b
CW
1474}
1475
2889caa9 1476static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1690e1eb 1477{
2889caa9
CW
1478 const char __user *addr, *end;
1479 unsigned long size;
1480 char __maybe_unused c;
1690e1eb 1481
2889caa9
CW
1482 size = entry->relocation_count;
1483 if (size == 0)
1484 return 0;
7788a765 1485
2889caa9
CW
1486 if (size > N_RELOC(ULONG_MAX))
1487 return -EINVAL;
9a5a53b3 1488
2889caa9
CW
1489 addr = u64_to_user_ptr(entry->relocs_ptr);
1490 size *= sizeof(struct drm_i915_gem_relocation_entry);
1491 if (!access_ok(VERIFY_READ, addr, size))
1492 return -EFAULT;
1690e1eb 1493
2889caa9
CW
1494 end = addr + size;
1495 for (; addr < end; addr += PAGE_SIZE) {
1496 int err = __get_user(c, addr);
1497 if (err)
1498 return err;
ed5982e6 1499 }
2889caa9 1500 return __get_user(c, end - 1);
7788a765 1501}
1690e1eb 1502
2889caa9 1503static int eb_copy_relocations(const struct i915_execbuffer *eb)
d23db88c 1504{
2889caa9
CW
1505 const unsigned int count = eb->buffer_count;
1506 unsigned int i;
1507 int err;
e6a84468 1508
2889caa9
CW
1509 for (i = 0; i < count; i++) {
1510 const unsigned int nreloc = eb->exec[i].relocation_count;
1511 struct drm_i915_gem_relocation_entry __user *urelocs;
1512 struct drm_i915_gem_relocation_entry *relocs;
1513 unsigned long size;
1514 unsigned long copied;
e6a84468 1515
2889caa9
CW
1516 if (nreloc == 0)
1517 continue;
e6a84468 1518
2889caa9
CW
1519 err = check_relocations(&eb->exec[i]);
1520 if (err)
1521 goto err;
d23db88c 1522
2889caa9
CW
1523 urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
1524 size = nreloc * sizeof(*relocs);
d23db88c 1525
0ee931c4 1526 relocs = kvmalloc_array(size, 1, GFP_KERNEL);
2889caa9
CW
1527 if (!relocs) {
1528 kvfree(relocs);
1529 err = -ENOMEM;
1530 goto err;
1531 }
d23db88c 1532
2889caa9
CW
1533 /* copy_from_user is limited to < 4GiB */
1534 copied = 0;
1535 do {
1536 unsigned int len =
1537 min_t(u64, BIT_ULL(31), size - copied);
1538
1539 if (__copy_from_user((char *)relocs + copied,
908a6105 1540 (char __user *)urelocs + copied,
2889caa9
CW
1541 len)) {
1542 kvfree(relocs);
1543 err = -EFAULT;
1544 goto err;
1545 }
91b2db6f 1546
2889caa9
CW
1547 copied += len;
1548 } while (copied < size);
506a8e87 1549
2889caa9
CW
1550 /*
1551 * As we do not update the known relocation offsets after
1552 * relocating (due to the complexities in lock handling),
1553 * we need to mark them as invalid now so that we force the
1554 * relocation processing next time. Just in case the target
1555 * object is evicted and then rebound into its old
1556 * presumed_offset before the next execbuffer - if that
1557 * happened we would make the mistake of assuming that the
1558 * relocations were valid.
1559 */
1560 user_access_begin();
1561 for (copied = 0; copied < nreloc; copied++)
1562 unsafe_put_user(-1,
1563 &urelocs[copied].presumed_offset,
1564 end_user);
1565end_user:
1566 user_access_end();
d23db88c 1567
2889caa9
CW
1568 eb->exec[i].relocs_ptr = (uintptr_t)relocs;
1569 }
edf4427b 1570
2889caa9 1571 return 0;
101b506a 1572
2889caa9
CW
1573err:
1574 while (i--) {
1575 struct drm_i915_gem_relocation_entry *relocs =
1576 u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
1577 if (eb->exec[i].relocation_count)
1578 kvfree(relocs);
1579 }
1580 return err;
d23db88c
CW
1581}
1582
2889caa9 1583static int eb_prefault_relocations(const struct i915_execbuffer *eb)
54cf91dc 1584{
2889caa9
CW
1585 const unsigned int count = eb->buffer_count;
1586 unsigned int i;
54cf91dc 1587
4f044a88 1588 if (unlikely(i915_modparams.prefault_disable))
2889caa9 1589 return 0;
54cf91dc 1590
2889caa9
CW
1591 for (i = 0; i < count; i++) {
1592 int err;
54cf91dc 1593
2889caa9
CW
1594 err = check_relocations(&eb->exec[i]);
1595 if (err)
1596 return err;
1597 }
a415d355 1598
2889caa9 1599 return 0;
54cf91dc
CW
1600}
1601
2889caa9 1602static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
54cf91dc 1603{
650bc635 1604 struct drm_device *dev = &eb->i915->drm;
2889caa9 1605 bool have_copy = false;
27173f1f 1606 struct i915_vma *vma;
2889caa9
CW
1607 int err = 0;
1608
1609repeat:
1610 if (signal_pending(current)) {
1611 err = -ERESTARTSYS;
1612 goto out;
1613 }
27173f1f 1614
67731b87 1615 /* We may process another execbuffer during the unlock... */
2889caa9 1616 eb_reset_vmas(eb);
54cf91dc
CW
1617 mutex_unlock(&dev->struct_mutex);
1618
2889caa9
CW
1619 /*
1620 * We take 3 passes through the slowpatch.
1621 *
1622 * 1 - we try to just prefault all the user relocation entries and
1623 * then attempt to reuse the atomic pagefault disabled fast path again.
1624 *
1625 * 2 - we copy the user entries to a local buffer here outside of the
1626 * local and allow ourselves to wait upon any rendering before
1627 * relocations
1628 *
1629 * 3 - we already have a local copy of the relocation entries, but
1630 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
1631 */
1632 if (!err) {
1633 err = eb_prefault_relocations(eb);
1634 } else if (!have_copy) {
1635 err = eb_copy_relocations(eb);
1636 have_copy = err == 0;
1637 } else {
1638 cond_resched();
1639 err = 0;
54cf91dc 1640 }
2889caa9
CW
1641 if (err) {
1642 mutex_lock(&dev->struct_mutex);
1643 goto out;
54cf91dc
CW
1644 }
1645
8a2421bd
CW
1646 /* A frequent cause for EAGAIN are currently unavailable client pages */
1647 flush_workqueue(eb->i915->mm.userptr_wq);
1648
2889caa9
CW
1649 err = i915_mutex_lock_interruptible(dev);
1650 if (err) {
54cf91dc 1651 mutex_lock(&dev->struct_mutex);
2889caa9 1652 goto out;
54cf91dc
CW
1653 }
1654
67731b87 1655 /* reacquire the objects */
2889caa9
CW
1656 err = eb_lookup_vmas(eb);
1657 if (err)
3b96eff4 1658 goto err;
67731b87 1659
c7c6e46f
CW
1660 GEM_BUG_ON(!eb->batch);
1661
2889caa9
CW
1662 list_for_each_entry(vma, &eb->relocs, reloc_link) {
1663 if (!have_copy) {
1664 pagefault_disable();
1665 err = eb_relocate_vma(eb, vma);
1666 pagefault_enable();
1667 if (err)
1668 goto repeat;
1669 } else {
1670 err = eb_relocate_vma_slow(eb, vma);
1671 if (err)
1672 goto err;
1673 }
54cf91dc
CW
1674 }
1675
2889caa9
CW
1676 /*
1677 * Leave the user relocations as are, this is the painfully slow path,
54cf91dc
CW
1678 * and we want to avoid the complication of dropping the lock whilst
1679 * having buffers reserved in the aperture and so causing spurious
1680 * ENOSPC for random operations.
1681 */
1682
1683err:
2889caa9
CW
1684 if (err == -EAGAIN)
1685 goto repeat;
1686
1687out:
1688 if (have_copy) {
1689 const unsigned int count = eb->buffer_count;
1690 unsigned int i;
1691
1692 for (i = 0; i < count; i++) {
1693 const struct drm_i915_gem_exec_object2 *entry =
1694 &eb->exec[i];
1695 struct drm_i915_gem_relocation_entry *relocs;
1696
1697 if (!entry->relocation_count)
1698 continue;
1699
1700 relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
1701 kvfree(relocs);
1702 }
1703 }
1704
1f727d9e 1705 return err;
54cf91dc
CW
1706}
1707
2889caa9 1708static int eb_relocate(struct i915_execbuffer *eb)
54cf91dc 1709{
2889caa9
CW
1710 if (eb_lookup_vmas(eb))
1711 goto slow;
1712
1713 /* The objects are in their final locations, apply the relocations. */
1714 if (eb->args->flags & __EXEC_HAS_RELOC) {
1715 struct i915_vma *vma;
1716
1717 list_for_each_entry(vma, &eb->relocs, reloc_link) {
1718 if (eb_relocate_vma(eb, vma))
1719 goto slow;
1720 }
1721 }
1722
1723 return 0;
1724
1725slow:
1726 return eb_relocate_slow(eb);
1727}
1728
95ff7c7d 1729static void eb_export_fence(struct i915_vma *vma,
2889caa9
CW
1730 struct drm_i915_gem_request *req,
1731 unsigned int flags)
1732{
95ff7c7d 1733 struct reservation_object *resv = vma->resv;
2889caa9
CW
1734
1735 /*
1736 * Ignore errors from failing to allocate the new fence, we can't
1737 * handle an error right now. Worst case should be missed
1738 * synchronisation leading to rendering corruption.
1739 */
1740 reservation_object_lock(resv, NULL);
1741 if (flags & EXEC_OBJECT_WRITE)
1742 reservation_object_add_excl_fence(resv, &req->fence);
1743 else if (reservation_object_reserve_shared(resv) == 0)
1744 reservation_object_add_shared_fence(resv, &req->fence);
1745 reservation_object_unlock(resv);
1746}
1747
1748static int eb_move_to_gpu(struct i915_execbuffer *eb)
1749{
1750 const unsigned int count = eb->buffer_count;
1751 unsigned int i;
1752 int err;
54cf91dc 1753
2889caa9 1754 for (i = 0; i < count; i++) {
c7c6e46f
CW
1755 unsigned int flags = eb->flags[i];
1756 struct i915_vma *vma = eb->vma[i];
27173f1f 1757 struct drm_i915_gem_object *obj = vma->obj;
03ade511 1758
c7c6e46f 1759 if (flags & EXEC_OBJECT_CAPTURE) {
b0fd47ad
CW
1760 struct i915_gem_capture_list *capture;
1761
1762 capture = kmalloc(sizeof(*capture), GFP_KERNEL);
1763 if (unlikely(!capture))
1764 return -ENOMEM;
1765
650bc635 1766 capture->next = eb->request->capture_list;
c7c6e46f 1767 capture->vma = eb->vma[i];
650bc635 1768 eb->request->capture_list = capture;
b0fd47ad
CW
1769 }
1770
b8f55be6
CW
1771 /*
1772 * If the GPU is not _reading_ through the CPU cache, we need
1773 * to make sure that any writes (both previous GPU writes from
1774 * before a change in snooping levels and normal CPU writes)
1775 * caught in that cache are flushed to main memory.
1776 *
1777 * We want to say
1778 * obj->cache_dirty &&
1779 * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
1780 * but gcc's optimiser doesn't handle that as well and emits
1781 * two jumps instead of one. Maybe one day...
1782 */
1783 if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
0f46daa1 1784 if (i915_gem_clflush_object(obj, 0))
c7c6e46f 1785 flags &= ~EXEC_OBJECT_ASYNC;
0f46daa1
CW
1786 }
1787
c7c6e46f
CW
1788 if (flags & EXEC_OBJECT_ASYNC)
1789 continue;
77ae9957 1790
2889caa9 1791 err = i915_gem_request_await_object
c7c6e46f 1792 (eb->request, obj, flags & EXEC_OBJECT_WRITE);
2889caa9
CW
1793 if (err)
1794 return err;
2889caa9
CW
1795 }
1796
1797 for (i = 0; i < count; i++) {
c7c6e46f
CW
1798 unsigned int flags = eb->flags[i];
1799 struct i915_vma *vma = eb->vma[i];
1800
1801 i915_vma_move_to_active(vma, eb->request, flags);
1802 eb_export_fence(vma, eb->request, flags);
2889caa9 1803
c7c6e46f
CW
1804 __eb_unreserve_vma(vma, flags);
1805 vma->exec_flags = NULL;
1806
1807 if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
dade2a61 1808 i915_vma_put(vma);
c59a333f 1809 }
2889caa9 1810 eb->exec = NULL;
c59a333f 1811
dcd79934 1812 /* Unconditionally flush any chipset caches (for streaming writes). */
650bc635 1813 i915_gem_chipset_flush(eb->i915);
6ac42f41 1814
2113184c 1815 return 0;
54cf91dc
CW
1816}
1817
2889caa9 1818static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
54cf91dc 1819{
650bc635 1820 if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
ed5982e6
DV
1821 return false;
1822
2f5945bc 1823 /* Kernel clipping was a DRI1 misfeature */
cf6e7bac
JE
1824 if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
1825 if (exec->num_cliprects || exec->cliprects_ptr)
1826 return false;
1827 }
2f5945bc
CW
1828
1829 if (exec->DR4 == 0xffffffff) {
1830 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1831 exec->DR4 = 0;
1832 }
1833 if (exec->DR1 || exec->DR4)
1834 return false;
1835
1836 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1837 return false;
1838
1839 return true;
54cf91dc
CW
1840}
1841
5cf3d280
CW
1842void i915_vma_move_to_active(struct i915_vma *vma,
1843 struct drm_i915_gem_request *req,
1844 unsigned int flags)
1845{
1846 struct drm_i915_gem_object *obj = vma->obj;
1847 const unsigned int idx = req->engine->id;
1848
81147b07 1849 lockdep_assert_held(&req->i915->drm.struct_mutex);
5cf3d280
CW
1850 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1851
2889caa9
CW
1852 /*
1853 * Add a reference if we're newly entering the active list.
b0decaf7
CW
1854 * The order in which we add operations to the retirement queue is
1855 * vital here: mark_active adds to the start of the callback list,
1856 * such that subsequent callbacks are called first. Therefore we
1857 * add the active reference first and queue for it to be dropped
1858 * *last*.
1859 */
d07f0e59
CW
1860 if (!i915_vma_is_active(vma))
1861 obj->active_count++;
1862 i915_vma_set_active(vma, idx);
1863 i915_gem_active_set(&vma->last_read[idx], req);
1864 list_move_tail(&vma->vm_link, &vma->vm->active_list);
5cf3d280 1865
e27ab73d 1866 obj->base.write_domain = 0;
5cf3d280 1867 if (flags & EXEC_OBJECT_WRITE) {
e27ab73d
CW
1868 obj->base.write_domain = I915_GEM_DOMAIN_RENDER;
1869
5b8c8aec
CW
1870 if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
1871 i915_gem_active_set(&obj->frontbuffer_write, req);
5cf3d280 1872
e27ab73d 1873 obj->base.read_domains = 0;
5cf3d280 1874 }
e27ab73d 1875 obj->base.read_domains |= I915_GEM_GPU_DOMAINS;
5cf3d280 1876
49ef5294
CW
1877 if (flags & EXEC_OBJECT_NEEDS_FENCE)
1878 i915_gem_active_set(&vma->last_fence, req);
5cf3d280
CW
1879}
1880
2889caa9 1881static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
ae662d31 1882{
73dec95e
TU
1883 u32 *cs;
1884 int i;
ae662d31 1885
b5321f30 1886 if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
9d662da8
DV
1887 DRM_DEBUG("sol reset is gen7/rcs only\n");
1888 return -EINVAL;
1889 }
ae662d31 1890
2889caa9 1891 cs = intel_ring_begin(req, 4 * 2 + 2);
73dec95e
TU
1892 if (IS_ERR(cs))
1893 return PTR_ERR(cs);
ae662d31 1894
2889caa9 1895 *cs++ = MI_LOAD_REGISTER_IMM(4);
ae662d31 1896 for (i = 0; i < 4; i++) {
73dec95e
TU
1897 *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
1898 *cs++ = 0;
ae662d31 1899 }
2889caa9 1900 *cs++ = MI_NOOP;
73dec95e 1901 intel_ring_advance(req, cs);
ae662d31
EA
1902
1903 return 0;
1904}
1905
650bc635 1906static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
71745376 1907{
71745376 1908 struct drm_i915_gem_object *shadow_batch_obj;
17cabf57 1909 struct i915_vma *vma;
2889caa9 1910 int err;
71745376 1911
650bc635
CW
1912 shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
1913 PAGE_ALIGN(eb->batch_len));
71745376 1914 if (IS_ERR(shadow_batch_obj))
59bfa124 1915 return ERR_CAST(shadow_batch_obj);
71745376 1916
2889caa9 1917 err = intel_engine_cmd_parser(eb->engine,
650bc635 1918 eb->batch->obj,
33a051a5 1919 shadow_batch_obj,
650bc635
CW
1920 eb->batch_start_offset,
1921 eb->batch_len,
33a051a5 1922 is_master);
2889caa9
CW
1923 if (err) {
1924 if (err == -EACCES) /* unhandled chained batch */
058d88c4
CW
1925 vma = NULL;
1926 else
2889caa9 1927 vma = ERR_PTR(err);
058d88c4
CW
1928 goto out;
1929 }
71745376 1930
058d88c4
CW
1931 vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
1932 if (IS_ERR(vma))
1933 goto out;
de4e783a 1934
c7c6e46f
CW
1935 eb->vma[eb->buffer_count] = i915_vma_get(vma);
1936 eb->flags[eb->buffer_count] =
1937 __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
1938 vma->exec_flags = &eb->flags[eb->buffer_count];
1939 eb->buffer_count++;
71745376 1940
058d88c4 1941out:
de4e783a 1942 i915_gem_object_unpin_pages(shadow_batch_obj);
058d88c4 1943 return vma;
71745376 1944}
5c6c6003 1945
c8659efa 1946static void
2889caa9 1947add_to_client(struct drm_i915_gem_request *req, struct drm_file *file)
c8659efa
CW
1948{
1949 req->file_priv = file->driver_priv;
1950 list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
1951}
1952
2889caa9 1953static int eb_submit(struct i915_execbuffer *eb)
78382593 1954{
2889caa9 1955 int err;
78382593 1956
2889caa9
CW
1957 err = eb_move_to_gpu(eb);
1958 if (err)
1959 return err;
78382593 1960
650bc635 1961 if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
2889caa9
CW
1962 err = i915_reset_gen7_sol_offsets(eb->request);
1963 if (err)
1964 return err;
78382593
OM
1965 }
1966
2889caa9 1967 err = eb->engine->emit_bb_start(eb->request,
650bc635
CW
1968 eb->batch->node.start +
1969 eb->batch_start_offset,
1970 eb->batch_len,
2889caa9
CW
1971 eb->batch_flags);
1972 if (err)
1973 return err;
78382593 1974
2f5945bc 1975 return 0;
78382593
OM
1976}
1977
a8ebba75
ZY
1978/**
1979 * Find one BSD ring to dispatch the corresponding BSD command.
c80ff16e 1980 * The engine index is returned.
a8ebba75 1981 */
de1add36 1982static unsigned int
c80ff16e
CW
1983gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
1984 struct drm_file *file)
a8ebba75 1985{
a8ebba75
ZY
1986 struct drm_i915_file_private *file_priv = file->driver_priv;
1987
de1add36 1988 /* Check whether the file_priv has already selected one ring. */
6f633402
JL
1989 if ((int)file_priv->bsd_engine < 0)
1990 file_priv->bsd_engine = atomic_fetch_xor(1,
1991 &dev_priv->mm.bsd_engine_dispatch_index);
d23db88c 1992
c80ff16e 1993 return file_priv->bsd_engine;
d23db88c
CW
1994}
1995
de1add36
TU
1996#define I915_USER_RINGS (4)
1997
117897f4 1998static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
de1add36
TU
1999 [I915_EXEC_DEFAULT] = RCS,
2000 [I915_EXEC_RENDER] = RCS,
2001 [I915_EXEC_BLT] = BCS,
2002 [I915_EXEC_BSD] = VCS,
2003 [I915_EXEC_VEBOX] = VECS
2004};
2005
f8ca0c07
DG
2006static struct intel_engine_cs *
2007eb_select_engine(struct drm_i915_private *dev_priv,
2008 struct drm_file *file,
2009 struct drm_i915_gem_execbuffer2 *args)
de1add36
TU
2010{
2011 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
f8ca0c07 2012 struct intel_engine_cs *engine;
de1add36
TU
2013
2014 if (user_ring_id > I915_USER_RINGS) {
2015 DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
f8ca0c07 2016 return NULL;
de1add36
TU
2017 }
2018
2019 if ((user_ring_id != I915_EXEC_BSD) &&
2020 ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
2021 DRM_DEBUG("execbuf with non bsd ring but with invalid "
2022 "bsd dispatch flags: %d\n", (int)(args->flags));
f8ca0c07 2023 return NULL;
de1add36
TU
2024 }
2025
2026 if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
2027 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
2028
2029 if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
c80ff16e 2030 bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
de1add36
TU
2031 } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
2032 bsd_idx <= I915_EXEC_BSD_RING2) {
d9da6aa0 2033 bsd_idx >>= I915_EXEC_BSD_SHIFT;
de1add36
TU
2034 bsd_idx--;
2035 } else {
2036 DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
2037 bsd_idx);
f8ca0c07 2038 return NULL;
de1add36
TU
2039 }
2040
3b3f1650 2041 engine = dev_priv->engine[_VCS(bsd_idx)];
de1add36 2042 } else {
3b3f1650 2043 engine = dev_priv->engine[user_ring_map[user_ring_id]];
de1add36
TU
2044 }
2045
3b3f1650 2046 if (!engine) {
de1add36 2047 DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
f8ca0c07 2048 return NULL;
de1add36
TU
2049 }
2050
f8ca0c07 2051 return engine;
de1add36
TU
2052}
2053
cf6e7bac
JE
2054static void
2055__free_fence_array(struct drm_syncobj **fences, unsigned int n)
2056{
2057 while (n--)
2058 drm_syncobj_put(ptr_mask_bits(fences[n], 2));
2059 kvfree(fences);
2060}
2061
2062static struct drm_syncobj **
2063get_fence_array(struct drm_i915_gem_execbuffer2 *args,
2064 struct drm_file *file)
2065{
d710fc16 2066 const unsigned long nfences = args->num_cliprects;
cf6e7bac
JE
2067 struct drm_i915_gem_exec_fence __user *user;
2068 struct drm_syncobj **fences;
d710fc16 2069 unsigned long n;
cf6e7bac
JE
2070 int err;
2071
2072 if (!(args->flags & I915_EXEC_FENCE_ARRAY))
2073 return NULL;
2074
d710fc16
CW
2075 /* Check multiplication overflow for access_ok() and kvmalloc_array() */
2076 BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
2077 if (nfences > min_t(unsigned long,
2078 ULONG_MAX / sizeof(*user),
2079 SIZE_MAX / sizeof(*fences)))
cf6e7bac
JE
2080 return ERR_PTR(-EINVAL);
2081
2082 user = u64_to_user_ptr(args->cliprects_ptr);
d710fc16 2083 if (!access_ok(VERIFY_READ, user, nfences * sizeof(*user)))
cf6e7bac
JE
2084 return ERR_PTR(-EFAULT);
2085
d710fc16 2086 fences = kvmalloc_array(nfences, sizeof(*fences),
0ee931c4 2087 __GFP_NOWARN | GFP_KERNEL);
cf6e7bac
JE
2088 if (!fences)
2089 return ERR_PTR(-ENOMEM);
2090
2091 for (n = 0; n < nfences; n++) {
2092 struct drm_i915_gem_exec_fence fence;
2093 struct drm_syncobj *syncobj;
2094
2095 if (__copy_from_user(&fence, user++, sizeof(fence))) {
2096 err = -EFAULT;
2097 goto err;
2098 }
2099
ebcaa1ff
TU
2100 if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
2101 err = -EINVAL;
2102 goto err;
2103 }
2104
cf6e7bac
JE
2105 syncobj = drm_syncobj_find(file, fence.handle);
2106 if (!syncobj) {
2107 DRM_DEBUG("Invalid syncobj handle provided\n");
2108 err = -ENOENT;
2109 goto err;
2110 }
2111
ebcaa1ff
TU
2112 BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
2113 ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
2114
cf6e7bac
JE
2115 fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
2116 }
2117
2118 return fences;
2119
2120err:
2121 __free_fence_array(fences, n);
2122 return ERR_PTR(err);
2123}
2124
2125static void
2126put_fence_array(struct drm_i915_gem_execbuffer2 *args,
2127 struct drm_syncobj **fences)
2128{
2129 if (fences)
2130 __free_fence_array(fences, args->num_cliprects);
2131}
2132
2133static int
2134await_fence_array(struct i915_execbuffer *eb,
2135 struct drm_syncobj **fences)
2136{
2137 const unsigned int nfences = eb->args->num_cliprects;
2138 unsigned int n;
2139 int err;
2140
2141 for (n = 0; n < nfences; n++) {
2142 struct drm_syncobj *syncobj;
2143 struct dma_fence *fence;
2144 unsigned int flags;
2145
2146 syncobj = ptr_unpack_bits(fences[n], &flags, 2);
2147 if (!(flags & I915_EXEC_FENCE_WAIT))
2148 continue;
2149
afca4216 2150 fence = drm_syncobj_fence_get(syncobj);
cf6e7bac
JE
2151 if (!fence)
2152 return -EINVAL;
2153
2154 err = i915_gem_request_await_dma_fence(eb->request, fence);
2155 dma_fence_put(fence);
2156 if (err < 0)
2157 return err;
2158 }
2159
2160 return 0;
2161}
2162
2163static void
2164signal_fence_array(struct i915_execbuffer *eb,
2165 struct drm_syncobj **fences)
2166{
2167 const unsigned int nfences = eb->args->num_cliprects;
2168 struct dma_fence * const fence = &eb->request->fence;
2169 unsigned int n;
2170
2171 for (n = 0; n < nfences; n++) {
2172 struct drm_syncobj *syncobj;
2173 unsigned int flags;
2174
2175 syncobj = ptr_unpack_bits(fences[n], &flags, 2);
2176 if (!(flags & I915_EXEC_FENCE_SIGNAL))
2177 continue;
2178
2179 drm_syncobj_replace_fence(syncobj, fence);
2180 }
2181}
2182
54cf91dc 2183static int
650bc635 2184i915_gem_do_execbuffer(struct drm_device *dev,
54cf91dc
CW
2185 struct drm_file *file,
2186 struct drm_i915_gem_execbuffer2 *args,
cf6e7bac
JE
2187 struct drm_i915_gem_exec_object2 *exec,
2188 struct drm_syncobj **fences)
54cf91dc 2189{
650bc635 2190 struct i915_execbuffer eb;
fec0445c
CW
2191 struct dma_fence *in_fence = NULL;
2192 struct sync_file *out_fence = NULL;
2193 int out_fence_fd = -1;
2889caa9 2194 int err;
432e58ed 2195
74c1c694 2196 BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
2889caa9
CW
2197 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
2198 ~__EXEC_OBJECT_UNKNOWN_FLAGS);
54cf91dc 2199
650bc635
CW
2200 eb.i915 = to_i915(dev);
2201 eb.file = file;
2202 eb.args = args;
7dd4f672 2203 if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
2889caa9 2204 args->flags |= __EXEC_HAS_RELOC;
c7c6e46f 2205
650bc635 2206 eb.exec = exec;
170fa29b
CW
2207 eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
2208 eb.vma[0] = NULL;
c7c6e46f
CW
2209 eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);
2210
2889caa9
CW
2211 eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
2212 if (USES_FULL_PPGTT(eb.i915))
2213 eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
650bc635
CW
2214 reloc_cache_init(&eb.reloc_cache, eb.i915);
2215
2889caa9 2216 eb.buffer_count = args->buffer_count;
650bc635
CW
2217 eb.batch_start_offset = args->batch_start_offset;
2218 eb.batch_len = args->batch_len;
2219
2889caa9 2220 eb.batch_flags = 0;
d7d4eedd 2221 if (args->flags & I915_EXEC_SECURE) {
b3ac9f25 2222 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
d7d4eedd
CW
2223 return -EPERM;
2224
2889caa9 2225 eb.batch_flags |= I915_DISPATCH_SECURE;
d7d4eedd 2226 }
b45305fc 2227 if (args->flags & I915_EXEC_IS_PINNED)
2889caa9 2228 eb.batch_flags |= I915_DISPATCH_PINNED;
54cf91dc 2229
650bc635
CW
2230 eb.engine = eb_select_engine(eb.i915, file, args);
2231 if (!eb.engine)
54cf91dc 2232 return -EINVAL;
54cf91dc 2233
a9ed33ca 2234 if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
650bc635 2235 if (!HAS_RESOURCE_STREAMER(eb.i915)) {
a9ed33ca
AJ
2236 DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
2237 return -EINVAL;
2238 }
650bc635 2239 if (eb.engine->id != RCS) {
a9ed33ca 2240 DRM_DEBUG("RS is not available on %s\n",
650bc635 2241 eb.engine->name);
a9ed33ca
AJ
2242 return -EINVAL;
2243 }
2244
2889caa9 2245 eb.batch_flags |= I915_DISPATCH_RS;
a9ed33ca
AJ
2246 }
2247
fec0445c
CW
2248 if (args->flags & I915_EXEC_FENCE_IN) {
2249 in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
4a04e371
DCS
2250 if (!in_fence)
2251 return -EINVAL;
fec0445c
CW
2252 }
2253
2254 if (args->flags & I915_EXEC_FENCE_OUT) {
2255 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
2256 if (out_fence_fd < 0) {
2889caa9 2257 err = out_fence_fd;
4a04e371 2258 goto err_in_fence;
fec0445c
CW
2259 }
2260 }
2261
4d470f73
CW
2262 err = eb_create(&eb);
2263 if (err)
2264 goto err_out_fence;
2265
2266 GEM_BUG_ON(!eb.lut_size);
2889caa9 2267
1acfc104
CW
2268 err = eb_select_context(&eb);
2269 if (unlikely(err))
2270 goto err_destroy;
2271
2889caa9
CW
2272 /*
2273 * Take a local wakeref for preparing to dispatch the execbuf as
67d97da3
CW
2274 * we expect to access the hardware fairly frequently in the
2275 * process. Upon first dispatch, we acquire another prolonged
2276 * wakeref that we hold until the GPU has been idle for at least
2277 * 100ms.
2278 */
650bc635 2279 intel_runtime_pm_get(eb.i915);
1acfc104 2280
2889caa9
CW
2281 err = i915_mutex_lock_interruptible(dev);
2282 if (err)
2283 goto err_rpm;
f65c9168 2284
2889caa9 2285 err = eb_relocate(&eb);
1f727d9e 2286 if (err) {
2889caa9
CW
2287 /*
2288 * If the user expects the execobject.offset and
2289 * reloc.presumed_offset to be an exact match,
2290 * as for using NO_RELOC, then we cannot update
2291 * the execobject.offset until we have completed
2292 * relocation.
2293 */
2294 args->flags &= ~__EXEC_HAS_RELOC;
2889caa9 2295 goto err_vma;
1f727d9e 2296 }
54cf91dc 2297
c7c6e46f 2298 if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
ff240199 2299 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
2889caa9
CW
2300 err = -EINVAL;
2301 goto err_vma;
54cf91dc 2302 }
650bc635
CW
2303 if (eb.batch_start_offset > eb.batch->size ||
2304 eb.batch_len > eb.batch->size - eb.batch_start_offset) {
0b537272 2305 DRM_DEBUG("Attempting to use out-of-bounds batch\n");
2889caa9
CW
2306 err = -EINVAL;
2307 goto err_vma;
0b537272 2308 }
54cf91dc 2309
3dbf26ed 2310 if (eb_use_cmdparser(&eb)) {
59bfa124
CW
2311 struct i915_vma *vma;
2312
650bc635 2313 vma = eb_parse(&eb, drm_is_current_master(file));
59bfa124 2314 if (IS_ERR(vma)) {
2889caa9
CW
2315 err = PTR_ERR(vma);
2316 goto err_vma;
78a42377 2317 }
17cabf57 2318
59bfa124 2319 if (vma) {
c7c7372e
RP
2320 /*
2321 * Batch parsed and accepted:
2322 *
2323 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
2324 * bit from MI_BATCH_BUFFER_START commands issued in
2325 * the dispatch_execbuffer implementations. We
2326 * specifically don't want that set on batches the
2327 * command parser has accepted.
2328 */
2889caa9 2329 eb.batch_flags |= I915_DISPATCH_SECURE;
650bc635
CW
2330 eb.batch_start_offset = 0;
2331 eb.batch = vma;
c7c7372e 2332 }
351e3db2
BV
2333 }
2334
650bc635
CW
2335 if (eb.batch_len == 0)
2336 eb.batch_len = eb.batch->size - eb.batch_start_offset;
78a42377 2337
2889caa9
CW
2338 /*
2339 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
d7d4eedd 2340 * batch" bit. Hence we need to pin secure batches into the global gtt.
28cf5415 2341 * hsw should have this fixed, but bdw mucks it up again. */
2889caa9 2342 if (eb.batch_flags & I915_DISPATCH_SECURE) {
058d88c4 2343 struct i915_vma *vma;
59bfa124 2344
da51a1e7
DV
2345 /*
2346 * So on first glance it looks freaky that we pin the batch here
2347 * outside of the reservation loop. But:
2348 * - The batch is already pinned into the relevant ppgtt, so we
2349 * already have the backing storage fully allocated.
2350 * - No other BO uses the global gtt (well contexts, but meh),
fd0753cf 2351 * so we don't really have issues with multiple objects not
da51a1e7
DV
2352 * fitting due to fragmentation.
2353 * So this is actually safe.
2354 */
2889caa9 2355 vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
058d88c4 2356 if (IS_ERR(vma)) {
2889caa9
CW
2357 err = PTR_ERR(vma);
2358 goto err_vma;
058d88c4 2359 }
d7d4eedd 2360
650bc635 2361 eb.batch = vma;
59bfa124 2362 }
d7d4eedd 2363
7dd4f672
CW
2364 /* All GPU relocation batches must be submitted prior to the user rq */
2365 GEM_BUG_ON(eb.reloc_cache.rq);
2366
0c8dac88 2367 /* Allocate a request for this batch buffer nice and early. */
650bc635
CW
2368 eb.request = i915_gem_request_alloc(eb.engine, eb.ctx);
2369 if (IS_ERR(eb.request)) {
2889caa9 2370 err = PTR_ERR(eb.request);
0c8dac88 2371 goto err_batch_unpin;
26827088 2372 }
0c8dac88 2373
fec0445c 2374 if (in_fence) {
2889caa9
CW
2375 err = i915_gem_request_await_dma_fence(eb.request, in_fence);
2376 if (err < 0)
fec0445c
CW
2377 goto err_request;
2378 }
2379
cf6e7bac
JE
2380 if (fences) {
2381 err = await_fence_array(&eb, fences);
2382 if (err)
2383 goto err_request;
2384 }
2385
fec0445c 2386 if (out_fence_fd != -1) {
650bc635 2387 out_fence = sync_file_create(&eb.request->fence);
fec0445c 2388 if (!out_fence) {
2889caa9 2389 err = -ENOMEM;
fec0445c
CW
2390 goto err_request;
2391 }
2392 }
2393
2889caa9
CW
2394 /*
2395 * Whilst this request exists, batch_obj will be on the
17f298cf
CW
2396 * active_list, and so will hold the active reference. Only when this
2397 * request is retired will the the batch_obj be moved onto the
2398 * inactive_list and lose its active reference. Hence we do not need
2399 * to explicitly hold another reference here.
2400 */
650bc635 2401 eb.request->batch = eb.batch;
5f19e2bf 2402
2889caa9
CW
2403 trace_i915_gem_request_queue(eb.request, eb.batch_flags);
2404 err = eb_submit(&eb);
aa9b7810 2405err_request:
2889caa9 2406 __i915_add_request(eb.request, err == 0);
650bc635 2407 add_to_client(eb.request, file);
c8659efa 2408
cf6e7bac
JE
2409 if (fences)
2410 signal_fence_array(&eb, fences);
2411
fec0445c 2412 if (out_fence) {
2889caa9 2413 if (err == 0) {
fec0445c 2414 fd_install(out_fence_fd, out_fence->file);
b1b13780 2415 args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
fec0445c
CW
2416 args->rsvd2 |= (u64)out_fence_fd << 32;
2417 out_fence_fd = -1;
2418 } else {
2419 fput(out_fence->file);
2420 }
2421 }
54cf91dc 2422
0c8dac88 2423err_batch_unpin:
2889caa9 2424 if (eb.batch_flags & I915_DISPATCH_SECURE)
650bc635 2425 i915_vma_unpin(eb.batch);
2889caa9
CW
2426err_vma:
2427 if (eb.exec)
2428 eb_release_vmas(&eb);
54cf91dc 2429 mutex_unlock(&dev->struct_mutex);
2889caa9 2430err_rpm:
650bc635 2431 intel_runtime_pm_put(eb.i915);
1acfc104
CW
2432 i915_gem_context_put(eb.ctx);
2433err_destroy:
2889caa9 2434 eb_destroy(&eb);
4d470f73 2435err_out_fence:
fec0445c
CW
2436 if (out_fence_fd != -1)
2437 put_unused_fd(out_fence_fd);
4a04e371 2438err_in_fence:
fec0445c 2439 dma_fence_put(in_fence);
2889caa9 2440 return err;
54cf91dc
CW
2441}
2442
d710fc16
CW
2443static size_t eb_element_size(void)
2444{
2445 return (sizeof(struct drm_i915_gem_exec_object2) +
2446 sizeof(struct i915_vma *) +
2447 sizeof(unsigned int));
2448}
2449
2450static bool check_buffer_count(size_t count)
2451{
2452 const size_t sz = eb_element_size();
2453
2454 /*
2455 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
2456 * array size (see eb_create()). Otherwise, we can accept an array as
2457 * large as can be addressed (though use large arrays at your peril)!
2458 */
2459
2460 return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
2461}
2462
54cf91dc
CW
2463/*
2464 * Legacy execbuffer just creates an exec2 list from the original exec object
2465 * list array and passes it to the real function.
2466 */
2467int
2468i915_gem_execbuffer(struct drm_device *dev, void *data,
2469 struct drm_file *file)
2470{
2471 struct drm_i915_gem_execbuffer *args = data;
2472 struct drm_i915_gem_execbuffer2 exec2;
2473 struct drm_i915_gem_exec_object *exec_list = NULL;
2474 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
d710fc16 2475 const size_t count = args->buffer_count;
2889caa9
CW
2476 unsigned int i;
2477 int err;
54cf91dc 2478
d710fc16
CW
2479 if (!check_buffer_count(count)) {
2480 DRM_DEBUG("execbuf2 with %zd buffers\n", count);
54cf91dc
CW
2481 return -EINVAL;
2482 }
2483
2889caa9
CW
2484 exec2.buffers_ptr = args->buffers_ptr;
2485 exec2.buffer_count = args->buffer_count;
2486 exec2.batch_start_offset = args->batch_start_offset;
2487 exec2.batch_len = args->batch_len;
2488 exec2.DR1 = args->DR1;
2489 exec2.DR4 = args->DR4;
2490 exec2.num_cliprects = args->num_cliprects;
2491 exec2.cliprects_ptr = args->cliprects_ptr;
2492 exec2.flags = I915_EXEC_RENDER;
2493 i915_execbuffer2_set_context_id(exec2, 0);
2494
2495 if (!i915_gem_check_execbuffer(&exec2))
2496 return -EINVAL;
2497
54cf91dc 2498 /* Copy in the exec list from userland */
d710fc16 2499 exec_list = kvmalloc_array(count, sizeof(*exec_list),
0ee931c4 2500 __GFP_NOWARN | GFP_KERNEL);
d710fc16 2501 exec2_list = kvmalloc_array(count + 1, eb_element_size(),
0ee931c4 2502 __GFP_NOWARN | GFP_KERNEL);
54cf91dc 2503 if (exec_list == NULL || exec2_list == NULL) {
ff240199 2504 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc 2505 args->buffer_count);
2098105e
MH
2506 kvfree(exec_list);
2507 kvfree(exec2_list);
54cf91dc
CW
2508 return -ENOMEM;
2509 }
2889caa9 2510 err = copy_from_user(exec_list,
3ed605bc 2511 u64_to_user_ptr(args->buffers_ptr),
d710fc16 2512 sizeof(*exec_list) * count);
2889caa9 2513 if (err) {
ff240199 2514 DRM_DEBUG("copy %d exec entries failed %d\n",
2889caa9 2515 args->buffer_count, err);
2098105e
MH
2516 kvfree(exec_list);
2517 kvfree(exec2_list);
54cf91dc
CW
2518 return -EFAULT;
2519 }
2520
2521 for (i = 0; i < args->buffer_count; i++) {
2522 exec2_list[i].handle = exec_list[i].handle;
2523 exec2_list[i].relocation_count = exec_list[i].relocation_count;
2524 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
2525 exec2_list[i].alignment = exec_list[i].alignment;
2526 exec2_list[i].offset = exec_list[i].offset;
f0836b72 2527 if (INTEL_GEN(to_i915(dev)) < 4)
54cf91dc
CW
2528 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
2529 else
2530 exec2_list[i].flags = 0;
2531 }
2532
cf6e7bac 2533 err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
2889caa9 2534 if (exec2.flags & __EXEC_HAS_RELOC) {
9aab8bff 2535 struct drm_i915_gem_exec_object __user *user_exec_list =
3ed605bc 2536 u64_to_user_ptr(args->buffers_ptr);
9aab8bff 2537
54cf91dc 2538 /* Copy the new buffer offsets back to the user's exec list. */
9aab8bff 2539 for (i = 0; i < args->buffer_count; i++) {
2889caa9
CW
2540 if (!(exec2_list[i].offset & UPDATE))
2541 continue;
2542
934acce3 2543 exec2_list[i].offset =
2889caa9
CW
2544 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
2545 exec2_list[i].offset &= PIN_OFFSET_MASK;
2546 if (__copy_to_user(&user_exec_list[i].offset,
2547 &exec2_list[i].offset,
2548 sizeof(user_exec_list[i].offset)))
9aab8bff 2549 break;
54cf91dc
CW
2550 }
2551 }
2552
2098105e
MH
2553 kvfree(exec_list);
2554 kvfree(exec2_list);
2889caa9 2555 return err;
54cf91dc
CW
2556}
2557
2558int
2559i915_gem_execbuffer2(struct drm_device *dev, void *data,
2560 struct drm_file *file)
2561{
2562 struct drm_i915_gem_execbuffer2 *args = data;
2889caa9 2563 struct drm_i915_gem_exec_object2 *exec2_list;
cf6e7bac 2564 struct drm_syncobj **fences = NULL;
d710fc16 2565 const size_t count = args->buffer_count;
2889caa9 2566 int err;
54cf91dc 2567
d710fc16
CW
2568 if (!check_buffer_count(count)) {
2569 DRM_DEBUG("execbuf2 with %zd buffers\n", count);
54cf91dc
CW
2570 return -EINVAL;
2571 }
2572
2889caa9
CW
2573 if (!i915_gem_check_execbuffer(args))
2574 return -EINVAL;
2575
2576 /* Allocate an extra slot for use by the command parser */
d710fc16 2577 exec2_list = kvmalloc_array(count + 1, eb_element_size(),
0ee931c4 2578 __GFP_NOWARN | GFP_KERNEL);
54cf91dc 2579 if (exec2_list == NULL) {
d710fc16
CW
2580 DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
2581 count);
54cf91dc
CW
2582 return -ENOMEM;
2583 }
2889caa9
CW
2584 if (copy_from_user(exec2_list,
2585 u64_to_user_ptr(args->buffers_ptr),
d710fc16
CW
2586 sizeof(*exec2_list) * count)) {
2587 DRM_DEBUG("copy %zd exec entries failed\n", count);
2098105e 2588 kvfree(exec2_list);
54cf91dc
CW
2589 return -EFAULT;
2590 }
2591
cf6e7bac
JE
2592 if (args->flags & I915_EXEC_FENCE_ARRAY) {
2593 fences = get_fence_array(args, file);
2594 if (IS_ERR(fences)) {
2595 kvfree(exec2_list);
2596 return PTR_ERR(fences);
2597 }
2598 }
2599
2600 err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
2889caa9
CW
2601
2602 /*
2603 * Now that we have begun execution of the batchbuffer, we ignore
2604 * any new error after this point. Also given that we have already
2605 * updated the associated relocations, we try to write out the current
2606 * object locations irrespective of any error.
2607 */
2608 if (args->flags & __EXEC_HAS_RELOC) {
d593d992 2609 struct drm_i915_gem_exec_object2 __user *user_exec_list =
2889caa9
CW
2610 u64_to_user_ptr(args->buffers_ptr);
2611 unsigned int i;
9aab8bff 2612
2889caa9
CW
2613 /* Copy the new buffer offsets back to the user's exec list. */
2614 user_access_begin();
9aab8bff 2615 for (i = 0; i < args->buffer_count; i++) {
2889caa9
CW
2616 if (!(exec2_list[i].offset & UPDATE))
2617 continue;
2618
934acce3 2619 exec2_list[i].offset =
2889caa9
CW
2620 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
2621 unsafe_put_user(exec2_list[i].offset,
2622 &user_exec_list[i].offset,
2623 end_user);
54cf91dc 2624 }
2889caa9
CW
2625end_user:
2626 user_access_end();
54cf91dc
CW
2627 }
2628
2889caa9 2629 args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
cf6e7bac 2630 put_fence_array(args, fences);
2098105e 2631 kvfree(exec2_list);
2889caa9 2632 return err;
54cf91dc 2633}