drm/i915/bdw: Write the tail pointer, LRC style
[linux-block.git] / drivers / gpu / drm / i915 / intel_lrc.c
CommitLineData
b20385f1
OM
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
31/*
32 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
33 * These expanded contexts enable a number of new abilities, especially
34 * "Execlists" (also implemented in this file).
35 *
36 * Execlists are the new method by which, on gen8+ hardware, workloads are
37 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
38 */
39
40#include <drm/drmP.h>
41#include <drm/i915_drm.h>
42#include "i915_drv.h"
127f1003 43
8c857917
OM
44#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
45#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
46
47#define GEN8_LR_CONTEXT_ALIGN 4096
48
8670d6f9 49#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
84b790f8 50#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
8670d6f9
OM
51#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
52
53#define CTX_LRI_HEADER_0 0x01
54#define CTX_CONTEXT_CONTROL 0x02
55#define CTX_RING_HEAD 0x04
56#define CTX_RING_TAIL 0x06
57#define CTX_RING_BUFFER_START 0x08
58#define CTX_RING_BUFFER_CONTROL 0x0a
59#define CTX_BB_HEAD_U 0x0c
60#define CTX_BB_HEAD_L 0x0e
61#define CTX_BB_STATE 0x10
62#define CTX_SECOND_BB_HEAD_U 0x12
63#define CTX_SECOND_BB_HEAD_L 0x14
64#define CTX_SECOND_BB_STATE 0x16
65#define CTX_BB_PER_CTX_PTR 0x18
66#define CTX_RCS_INDIRECT_CTX 0x1a
67#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
68#define CTX_LRI_HEADER_1 0x21
69#define CTX_CTX_TIMESTAMP 0x22
70#define CTX_PDP3_UDW 0x24
71#define CTX_PDP3_LDW 0x26
72#define CTX_PDP2_UDW 0x28
73#define CTX_PDP2_LDW 0x2a
74#define CTX_PDP1_UDW 0x2c
75#define CTX_PDP1_LDW 0x2e
76#define CTX_PDP0_UDW 0x30
77#define CTX_PDP0_LDW 0x32
78#define CTX_LRI_HEADER_2 0x41
79#define CTX_R_PWR_CLK_STATE 0x42
80#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
81
84b790f8
BW
82#define GEN8_CTX_VALID (1<<0)
83#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
84#define GEN8_CTX_FORCE_RESTORE (1<<2)
85#define GEN8_CTX_L3LLC_COHERENT (1<<5)
86#define GEN8_CTX_PRIVILEGE (1<<8)
87enum {
88 ADVANCED_CONTEXT = 0,
89 LEGACY_CONTEXT,
90 ADVANCED_AD_CONTEXT,
91 LEGACY_64B_CONTEXT
92};
93#define GEN8_CTX_MODE_SHIFT 3
94enum {
95 FAULT_AND_HANG = 0,
96 FAULT_AND_HALT, /* Debug only */
97 FAULT_AND_STREAM,
98 FAULT_AND_CONTINUE /* Unsupported */
99};
100#define GEN8_CTX_ID_SHIFT 32
101
127f1003
OM
102int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
103{
bd84b1e9
DV
104 WARN_ON(i915.enable_ppgtt == -1);
105
127f1003
OM
106 if (enable_execlists == 0)
107 return 0;
108
14bf993e
OM
109 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
110 i915.use_mmio_flip >= 0)
127f1003
OM
111 return 1;
112
113 return 0;
114}
ede7d42b 115
84b790f8
BW
116u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
117{
118 u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj);
119
120 /* LRCA is required to be 4K aligned so the more significant 20 bits
121 * are globally unique */
122 return lrca >> 12;
123}
124
125static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
126{
127 uint64_t desc;
128 uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
129 BUG_ON(lrca & 0xFFFFFFFF00000FFFULL);
130
131 desc = GEN8_CTX_VALID;
132 desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
133 desc |= GEN8_CTX_L3LLC_COHERENT;
134 desc |= GEN8_CTX_PRIVILEGE;
135 desc |= lrca;
136 desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
137
138 /* TODO: WaDisableLiteRestore when we start using semaphore
139 * signalling between Command Streamers */
140 /* desc |= GEN8_CTX_FORCE_RESTORE; */
141
142 return desc;
143}
144
145static void execlists_elsp_write(struct intel_engine_cs *ring,
146 struct drm_i915_gem_object *ctx_obj0,
147 struct drm_i915_gem_object *ctx_obj1)
148{
149 struct drm_i915_private *dev_priv = ring->dev->dev_private;
150 uint64_t temp = 0;
151 uint32_t desc[4];
152
153 /* XXX: You must always write both descriptors in the order below. */
154 if (ctx_obj1)
155 temp = execlists_ctx_descriptor(ctx_obj1);
156 else
157 temp = 0;
158 desc[1] = (u32)(temp >> 32);
159 desc[0] = (u32)temp;
160
161 temp = execlists_ctx_descriptor(ctx_obj0);
162 desc[3] = (u32)(temp >> 32);
163 desc[2] = (u32)temp;
164
165 /* Set Force Wakeup bit to prevent GT from entering C6 while
166 * ELSP writes are in progress */
167 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
168
169 I915_WRITE(RING_ELSP(ring), desc[1]);
170 I915_WRITE(RING_ELSP(ring), desc[0]);
171 I915_WRITE(RING_ELSP(ring), desc[3]);
172 /* The context is automatically loaded after the following */
173 I915_WRITE(RING_ELSP(ring), desc[2]);
174
175 /* ELSP is a wo register, so use another nearby reg for posting instead */
176 POSTING_READ(RING_EXECLIST_STATUS(ring));
177
178 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
179}
180
ae1250b9
OM
181static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail)
182{
183 struct page *page;
184 uint32_t *reg_state;
185
186 page = i915_gem_object_get_page(ctx_obj, 1);
187 reg_state = kmap_atomic(page);
188
189 reg_state[CTX_RING_TAIL+1] = tail;
190
191 kunmap_atomic(reg_state);
192
193 return 0;
194}
195
84b790f8
BW
196static int execlists_submit_context(struct intel_engine_cs *ring,
197 struct intel_context *to0, u32 tail0,
198 struct intel_context *to1, u32 tail1)
199{
200 struct drm_i915_gem_object *ctx_obj0;
201 struct drm_i915_gem_object *ctx_obj1 = NULL;
202
203 ctx_obj0 = to0->engine[ring->id].state;
204 BUG_ON(!ctx_obj0);
205 BUG_ON(!i915_gem_obj_is_pinned(ctx_obj0));
206
ae1250b9
OM
207 execlists_ctx_write_tail(ctx_obj0, tail0);
208
84b790f8
BW
209 if (to1) {
210 ctx_obj1 = to1->engine[ring->id].state;
211 BUG_ON(!ctx_obj1);
212 BUG_ON(!i915_gem_obj_is_pinned(ctx_obj1));
ae1250b9
OM
213
214 execlists_ctx_write_tail(ctx_obj1, tail1);
84b790f8
BW
215 }
216
217 execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
218
219 return 0;
220}
221
ba8b7ccb
OM
222static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
223{
224 struct intel_engine_cs *ring = ringbuf->ring;
225 uint32_t flush_domains;
226 int ret;
227
228 flush_domains = 0;
229 if (ring->gpu_caches_dirty)
230 flush_domains = I915_GEM_GPU_DOMAINS;
231
232 ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
233 if (ret)
234 return ret;
235
236 ring->gpu_caches_dirty = false;
237 return 0;
238}
239
240static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
241 struct list_head *vmas)
242{
243 struct intel_engine_cs *ring = ringbuf->ring;
244 struct i915_vma *vma;
245 uint32_t flush_domains = 0;
246 bool flush_chipset = false;
247 int ret;
248
249 list_for_each_entry(vma, vmas, exec_list) {
250 struct drm_i915_gem_object *obj = vma->obj;
251
252 ret = i915_gem_object_sync(obj, ring);
253 if (ret)
254 return ret;
255
256 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
257 flush_chipset |= i915_gem_clflush_object(obj, false);
258
259 flush_domains |= obj->base.write_domain;
260 }
261
262 if (flush_domains & I915_GEM_DOMAIN_GTT)
263 wmb();
264
265 /* Unconditionally invalidate gpu caches and ensure that we do flush
266 * any residual writes from the previous batch.
267 */
268 return logical_ring_invalidate_all_caches(ringbuf);
269}
270
454afebd
OM
271int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
272 struct intel_engine_cs *ring,
273 struct intel_context *ctx,
274 struct drm_i915_gem_execbuffer2 *args,
275 struct list_head *vmas,
276 struct drm_i915_gem_object *batch_obj,
277 u64 exec_start, u32 flags)
278{
ba8b7ccb
OM
279 struct drm_i915_private *dev_priv = dev->dev_private;
280 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
281 int instp_mode;
282 u32 instp_mask;
283 int ret;
284
285 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
286 instp_mask = I915_EXEC_CONSTANTS_MASK;
287 switch (instp_mode) {
288 case I915_EXEC_CONSTANTS_REL_GENERAL:
289 case I915_EXEC_CONSTANTS_ABSOLUTE:
290 case I915_EXEC_CONSTANTS_REL_SURFACE:
291 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
292 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
293 return -EINVAL;
294 }
295
296 if (instp_mode != dev_priv->relative_constants_mode) {
297 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
298 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
299 return -EINVAL;
300 }
301
302 /* The HW changed the meaning on this bit on gen6 */
303 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
304 }
305 break;
306 default:
307 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
308 return -EINVAL;
309 }
310
311 if (args->num_cliprects != 0) {
312 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
313 return -EINVAL;
314 } else {
315 if (args->DR4 == 0xffffffff) {
316 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
317 args->DR4 = 0;
318 }
319
320 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
321 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
322 return -EINVAL;
323 }
324 }
325
326 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
327 DRM_DEBUG("sol reset is gen7 only\n");
328 return -EINVAL;
329 }
330
331 ret = execlists_move_to_gpu(ringbuf, vmas);
332 if (ret)
333 return ret;
334
335 if (ring == &dev_priv->ring[RCS] &&
336 instp_mode != dev_priv->relative_constants_mode) {
337 ret = intel_logical_ring_begin(ringbuf, 4);
338 if (ret)
339 return ret;
340
341 intel_logical_ring_emit(ringbuf, MI_NOOP);
342 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
343 intel_logical_ring_emit(ringbuf, INSTPM);
344 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
345 intel_logical_ring_advance(ringbuf);
346
347 dev_priv->relative_constants_mode = instp_mode;
348 }
349
350 ret = ring->emit_bb_start(ringbuf, exec_start, flags);
351 if (ret)
352 return ret;
353
354 i915_gem_execbuffer_move_to_active(vmas, ring);
355 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
356
454afebd
OM
357 return 0;
358}
359
360void intel_logical_ring_stop(struct intel_engine_cs *ring)
361{
9832b9da
OM
362 struct drm_i915_private *dev_priv = ring->dev->dev_private;
363 int ret;
364
365 if (!intel_ring_initialized(ring))
366 return;
367
368 ret = intel_ring_idle(ring);
369 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
370 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
371 ring->name, ret);
372
373 /* TODO: Is this correct with Execlists enabled? */
374 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
375 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
376 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
377 return;
378 }
379 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
454afebd
OM
380}
381
48e29f55
OM
382int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
383{
384 struct intel_engine_cs *ring = ringbuf->ring;
385 int ret;
386
387 if (!ring->gpu_caches_dirty)
388 return 0;
389
390 ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
391 if (ret)
392 return ret;
393
394 ring->gpu_caches_dirty = false;
395 return 0;
396}
397
82e104cc
OM
398void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
399{
84b790f8
BW
400 struct intel_engine_cs *ring = ringbuf->ring;
401 struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
402
82e104cc
OM
403 intel_logical_ring_advance(ringbuf);
404
84b790f8 405 if (intel_ring_stopped(ring))
82e104cc
OM
406 return;
407
84b790f8
BW
408 /* FIXME: too cheeky, we don't even check if the ELSP is ready */
409 execlists_submit_context(ring, ctx, ringbuf->tail, NULL, 0);
82e104cc
OM
410}
411
48e29f55
OM
412static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
413 struct intel_context *ctx)
82e104cc
OM
414{
415 if (ring->outstanding_lazy_seqno)
416 return 0;
417
418 if (ring->preallocated_lazy_request == NULL) {
419 struct drm_i915_gem_request *request;
420
421 request = kmalloc(sizeof(*request), GFP_KERNEL);
422 if (request == NULL)
423 return -ENOMEM;
424
48e29f55
OM
425 /* Hold a reference to the context this request belongs to
426 * (we will need it when the time comes to emit/retire the
427 * request).
428 */
429 request->ctx = ctx;
430 i915_gem_context_reference(request->ctx);
431
82e104cc
OM
432 ring->preallocated_lazy_request = request;
433 }
434
435 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
436}
437
438static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
439 int bytes)
440{
441 struct intel_engine_cs *ring = ringbuf->ring;
442 struct drm_i915_gem_request *request;
443 u32 seqno = 0;
444 int ret;
445
446 if (ringbuf->last_retired_head != -1) {
447 ringbuf->head = ringbuf->last_retired_head;
448 ringbuf->last_retired_head = -1;
449
450 ringbuf->space = intel_ring_space(ringbuf);
451 if (ringbuf->space >= bytes)
452 return 0;
453 }
454
455 list_for_each_entry(request, &ring->request_list, list) {
456 if (__intel_ring_space(request->tail, ringbuf->tail,
457 ringbuf->size) >= bytes) {
458 seqno = request->seqno;
459 break;
460 }
461 }
462
463 if (seqno == 0)
464 return -ENOSPC;
465
466 ret = i915_wait_seqno(ring, seqno);
467 if (ret)
468 return ret;
469
82e104cc
OM
470 i915_gem_retire_requests_ring(ring);
471 ringbuf->head = ringbuf->last_retired_head;
472 ringbuf->last_retired_head = -1;
473
474 ringbuf->space = intel_ring_space(ringbuf);
475 return 0;
476}
477
478static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
479 int bytes)
480{
481 struct intel_engine_cs *ring = ringbuf->ring;
482 struct drm_device *dev = ring->dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
484 unsigned long end;
485 int ret;
486
487 ret = logical_ring_wait_request(ringbuf, bytes);
488 if (ret != -ENOSPC)
489 return ret;
490
491 /* Force the context submission in case we have been skipping it */
492 intel_logical_ring_advance_and_submit(ringbuf);
493
494 /* With GEM the hangcheck timer should kick us out of the loop,
495 * leaving it early runs the risk of corrupting GEM state (due
496 * to running on almost untested codepaths). But on resume
497 * timers don't work yet, so prevent a complete hang in that
498 * case by choosing an insanely large timeout. */
499 end = jiffies + 60 * HZ;
500
501 do {
502 ringbuf->head = I915_READ_HEAD(ring);
503 ringbuf->space = intel_ring_space(ringbuf);
504 if (ringbuf->space >= bytes) {
505 ret = 0;
506 break;
507 }
508
509 msleep(1);
510
511 if (dev_priv->mm.interruptible && signal_pending(current)) {
512 ret = -ERESTARTSYS;
513 break;
514 }
515
516 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
517 dev_priv->mm.interruptible);
518 if (ret)
519 break;
520
521 if (time_after(jiffies, end)) {
522 ret = -EBUSY;
523 break;
524 }
525 } while (1);
526
527 return ret;
528}
529
530static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
531{
532 uint32_t __iomem *virt;
533 int rem = ringbuf->size - ringbuf->tail;
534
535 if (ringbuf->space < rem) {
536 int ret = logical_ring_wait_for_space(ringbuf, rem);
537
538 if (ret)
539 return ret;
540 }
541
542 virt = ringbuf->virtual_start + ringbuf->tail;
543 rem /= 4;
544 while (rem--)
545 iowrite32(MI_NOOP, virt++);
546
547 ringbuf->tail = 0;
548 ringbuf->space = intel_ring_space(ringbuf);
549
550 return 0;
551}
552
553static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
554{
555 int ret;
556
557 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
558 ret = logical_ring_wrap_buffer(ringbuf);
559 if (unlikely(ret))
560 return ret;
561 }
562
563 if (unlikely(ringbuf->space < bytes)) {
564 ret = logical_ring_wait_for_space(ringbuf, bytes);
565 if (unlikely(ret))
566 return ret;
567 }
568
569 return 0;
570}
571
572int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
573{
574 struct intel_engine_cs *ring = ringbuf->ring;
575 struct drm_device *dev = ring->dev;
576 struct drm_i915_private *dev_priv = dev->dev_private;
577 int ret;
578
579 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
580 dev_priv->mm.interruptible);
581 if (ret)
582 return ret;
583
584 ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
585 if (ret)
586 return ret;
587
588 /* Preallocate the olr before touching the ring */
48e29f55 589 ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
82e104cc
OM
590 if (ret)
591 return ret;
592
593 ringbuf->space -= num_dwords * sizeof(uint32_t);
594 return 0;
595}
596
9b1136d5
OM
597static int gen8_init_common_ring(struct intel_engine_cs *ring)
598{
599 struct drm_device *dev = ring->dev;
600 struct drm_i915_private *dev_priv = dev->dev_private;
601
73d477f6
OM
602 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
603 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
604
9b1136d5
OM
605 I915_WRITE(RING_MODE_GEN7(ring),
606 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
607 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
608 POSTING_READ(RING_MODE_GEN7(ring));
609 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
610
611 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
612
613 return 0;
614}
615
616static int gen8_init_render_ring(struct intel_engine_cs *ring)
617{
618 struct drm_device *dev = ring->dev;
619 struct drm_i915_private *dev_priv = dev->dev_private;
620 int ret;
621
622 ret = gen8_init_common_ring(ring);
623 if (ret)
624 return ret;
625
626 /* We need to disable the AsyncFlip performance optimisations in order
627 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
628 * programmed to '1' on all products.
629 *
630 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
631 */
632 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
633
634 ret = intel_init_pipe_control(ring);
635 if (ret)
636 return ret;
637
638 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
639
640 return ret;
641}
642
15648585
OM
643static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
644 u64 offset, unsigned flags)
645{
15648585
OM
646 bool ppgtt = !(flags & I915_DISPATCH_SECURE);
647 int ret;
648
649 ret = intel_logical_ring_begin(ringbuf, 4);
650 if (ret)
651 return ret;
652
653 /* FIXME(BDW): Address space and security selectors. */
654 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
655 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
656 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
657 intel_logical_ring_emit(ringbuf, MI_NOOP);
658 intel_logical_ring_advance(ringbuf);
659
660 return 0;
661}
662
73d477f6
OM
663static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
664{
665 struct drm_device *dev = ring->dev;
666 struct drm_i915_private *dev_priv = dev->dev_private;
667 unsigned long flags;
668
669 if (!dev->irq_enabled)
670 return false;
671
672 spin_lock_irqsave(&dev_priv->irq_lock, flags);
673 if (ring->irq_refcount++ == 0) {
674 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
675 POSTING_READ(RING_IMR(ring->mmio_base));
676 }
677 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
678
679 return true;
680}
681
682static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
683{
684 struct drm_device *dev = ring->dev;
685 struct drm_i915_private *dev_priv = dev->dev_private;
686 unsigned long flags;
687
688 spin_lock_irqsave(&dev_priv->irq_lock, flags);
689 if (--ring->irq_refcount == 0) {
690 I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
691 POSTING_READ(RING_IMR(ring->mmio_base));
692 }
693 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
694}
695
4712274c
OM
696static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
697 u32 invalidate_domains,
698 u32 unused)
699{
700 struct intel_engine_cs *ring = ringbuf->ring;
701 struct drm_device *dev = ring->dev;
702 struct drm_i915_private *dev_priv = dev->dev_private;
703 uint32_t cmd;
704 int ret;
705
706 ret = intel_logical_ring_begin(ringbuf, 4);
707 if (ret)
708 return ret;
709
710 cmd = MI_FLUSH_DW + 1;
711
712 if (ring == &dev_priv->ring[VCS]) {
713 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
714 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
715 MI_FLUSH_DW_STORE_INDEX |
716 MI_FLUSH_DW_OP_STOREDW;
717 } else {
718 if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
719 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
720 MI_FLUSH_DW_OP_STOREDW;
721 }
722
723 intel_logical_ring_emit(ringbuf, cmd);
724 intel_logical_ring_emit(ringbuf,
725 I915_GEM_HWS_SCRATCH_ADDR |
726 MI_FLUSH_DW_USE_GTT);
727 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
728 intel_logical_ring_emit(ringbuf, 0); /* value */
729 intel_logical_ring_advance(ringbuf);
730
731 return 0;
732}
733
734static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
735 u32 invalidate_domains,
736 u32 flush_domains)
737{
738 struct intel_engine_cs *ring = ringbuf->ring;
739 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
740 u32 flags = 0;
741 int ret;
742
743 flags |= PIPE_CONTROL_CS_STALL;
744
745 if (flush_domains) {
746 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
747 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
748 }
749
750 if (invalidate_domains) {
751 flags |= PIPE_CONTROL_TLB_INVALIDATE;
752 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
753 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
754 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
755 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
756 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
757 flags |= PIPE_CONTROL_QW_WRITE;
758 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
759 }
760
761 ret = intel_logical_ring_begin(ringbuf, 6);
762 if (ret)
763 return ret;
764
765 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
766 intel_logical_ring_emit(ringbuf, flags);
767 intel_logical_ring_emit(ringbuf, scratch_addr);
768 intel_logical_ring_emit(ringbuf, 0);
769 intel_logical_ring_emit(ringbuf, 0);
770 intel_logical_ring_emit(ringbuf, 0);
771 intel_logical_ring_advance(ringbuf);
772
773 return 0;
774}
775
e94e37ad
OM
776static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
777{
778 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
779}
780
781static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
782{
783 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
784}
785
4da46e1e
OM
786static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
787{
788 struct intel_engine_cs *ring = ringbuf->ring;
789 u32 cmd;
790 int ret;
791
792 ret = intel_logical_ring_begin(ringbuf, 6);
793 if (ret)
794 return ret;
795
796 cmd = MI_STORE_DWORD_IMM_GEN8;
797 cmd |= MI_GLOBAL_GTT;
798
799 intel_logical_ring_emit(ringbuf, cmd);
800 intel_logical_ring_emit(ringbuf,
801 (ring->status_page.gfx_addr +
802 (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
803 intel_logical_ring_emit(ringbuf, 0);
804 intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
805 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
806 intel_logical_ring_emit(ringbuf, MI_NOOP);
807 intel_logical_ring_advance_and_submit(ringbuf);
808
809 return 0;
810}
811
454afebd
OM
812void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
813{
9832b9da
OM
814 struct drm_i915_private *dev_priv = ring->dev->dev_private;
815
48d82387
OM
816 if (!intel_ring_initialized(ring))
817 return;
818
9832b9da
OM
819 intel_logical_ring_stop(ring);
820 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
48d82387
OM
821 ring->preallocated_lazy_request = NULL;
822 ring->outstanding_lazy_seqno = 0;
823
824 if (ring->cleanup)
825 ring->cleanup(ring);
826
827 i915_cmd_parser_fini_ring(ring);
828
829 if (ring->status_page.obj) {
830 kunmap(sg_page(ring->status_page.obj->pages->sgl));
831 ring->status_page.obj = NULL;
832 }
454afebd
OM
833}
834
835static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
836{
48d82387
OM
837 int ret;
838 struct intel_context *dctx = ring->default_context;
839 struct drm_i915_gem_object *dctx_obj;
840
841 /* Intentionally left blank. */
842 ring->buffer = NULL;
843
844 ring->dev = dev;
845 INIT_LIST_HEAD(&ring->active_list);
846 INIT_LIST_HEAD(&ring->request_list);
847 init_waitqueue_head(&ring->irq_queue);
848
849 ret = intel_lr_context_deferred_create(dctx, ring);
850 if (ret)
851 return ret;
852
853 /* The status page is offset 0 from the context object in LRCs. */
854 dctx_obj = dctx->engine[ring->id].state;
855 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
856 ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
857 if (ring->status_page.page_addr == NULL)
858 return -ENOMEM;
859 ring->status_page.obj = dctx_obj;
860
861 ret = i915_cmd_parser_init_ring(ring);
862 if (ret)
863 return ret;
864
865 if (ring->init) {
866 ret = ring->init(ring);
867 if (ret)
868 return ret;
869 }
870
454afebd
OM
871 return 0;
872}
873
874static int logical_render_ring_init(struct drm_device *dev)
875{
876 struct drm_i915_private *dev_priv = dev->dev_private;
877 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
878
879 ring->name = "render ring";
880 ring->id = RCS;
881 ring->mmio_base = RENDER_RING_BASE;
882 ring->irq_enable_mask =
883 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
73d477f6
OM
884 ring->irq_keep_mask =
885 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
886 if (HAS_L3_DPF(dev))
887 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
454afebd 888
9b1136d5
OM
889 ring->init = gen8_init_render_ring;
890 ring->cleanup = intel_fini_pipe_control;
e94e37ad
OM
891 ring->get_seqno = gen8_get_seqno;
892 ring->set_seqno = gen8_set_seqno;
4da46e1e 893 ring->emit_request = gen8_emit_request;
4712274c 894 ring->emit_flush = gen8_emit_flush_render;
73d477f6
OM
895 ring->irq_get = gen8_logical_ring_get_irq;
896 ring->irq_put = gen8_logical_ring_put_irq;
15648585 897 ring->emit_bb_start = gen8_emit_bb_start;
9b1136d5 898
454afebd
OM
899 return logical_ring_init(dev, ring);
900}
901
902static int logical_bsd_ring_init(struct drm_device *dev)
903{
904 struct drm_i915_private *dev_priv = dev->dev_private;
905 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
906
907 ring->name = "bsd ring";
908 ring->id = VCS;
909 ring->mmio_base = GEN6_BSD_RING_BASE;
910 ring->irq_enable_mask =
911 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
73d477f6
OM
912 ring->irq_keep_mask =
913 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
454afebd 914
9b1136d5 915 ring->init = gen8_init_common_ring;
e94e37ad
OM
916 ring->get_seqno = gen8_get_seqno;
917 ring->set_seqno = gen8_set_seqno;
4da46e1e 918 ring->emit_request = gen8_emit_request;
4712274c 919 ring->emit_flush = gen8_emit_flush;
73d477f6
OM
920 ring->irq_get = gen8_logical_ring_get_irq;
921 ring->irq_put = gen8_logical_ring_put_irq;
15648585 922 ring->emit_bb_start = gen8_emit_bb_start;
9b1136d5 923
454afebd
OM
924 return logical_ring_init(dev, ring);
925}
926
927static int logical_bsd2_ring_init(struct drm_device *dev)
928{
929 struct drm_i915_private *dev_priv = dev->dev_private;
930 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
931
932 ring->name = "bds2 ring";
933 ring->id = VCS2;
934 ring->mmio_base = GEN8_BSD2_RING_BASE;
935 ring->irq_enable_mask =
936 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
73d477f6
OM
937 ring->irq_keep_mask =
938 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
454afebd 939
9b1136d5 940 ring->init = gen8_init_common_ring;
e94e37ad
OM
941 ring->get_seqno = gen8_get_seqno;
942 ring->set_seqno = gen8_set_seqno;
4da46e1e 943 ring->emit_request = gen8_emit_request;
4712274c 944 ring->emit_flush = gen8_emit_flush;
73d477f6
OM
945 ring->irq_get = gen8_logical_ring_get_irq;
946 ring->irq_put = gen8_logical_ring_put_irq;
15648585 947 ring->emit_bb_start = gen8_emit_bb_start;
9b1136d5 948
454afebd
OM
949 return logical_ring_init(dev, ring);
950}
951
952static int logical_blt_ring_init(struct drm_device *dev)
953{
954 struct drm_i915_private *dev_priv = dev->dev_private;
955 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
956
957 ring->name = "blitter ring";
958 ring->id = BCS;
959 ring->mmio_base = BLT_RING_BASE;
960 ring->irq_enable_mask =
961 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
73d477f6
OM
962 ring->irq_keep_mask =
963 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
454afebd 964
9b1136d5 965 ring->init = gen8_init_common_ring;
e94e37ad
OM
966 ring->get_seqno = gen8_get_seqno;
967 ring->set_seqno = gen8_set_seqno;
4da46e1e 968 ring->emit_request = gen8_emit_request;
4712274c 969 ring->emit_flush = gen8_emit_flush;
73d477f6
OM
970 ring->irq_get = gen8_logical_ring_get_irq;
971 ring->irq_put = gen8_logical_ring_put_irq;
15648585 972 ring->emit_bb_start = gen8_emit_bb_start;
9b1136d5 973
454afebd
OM
974 return logical_ring_init(dev, ring);
975}
976
977static int logical_vebox_ring_init(struct drm_device *dev)
978{
979 struct drm_i915_private *dev_priv = dev->dev_private;
980 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
981
982 ring->name = "video enhancement ring";
983 ring->id = VECS;
984 ring->mmio_base = VEBOX_RING_BASE;
985 ring->irq_enable_mask =
986 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
73d477f6
OM
987 ring->irq_keep_mask =
988 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
454afebd 989
9b1136d5 990 ring->init = gen8_init_common_ring;
e94e37ad
OM
991 ring->get_seqno = gen8_get_seqno;
992 ring->set_seqno = gen8_set_seqno;
4da46e1e 993 ring->emit_request = gen8_emit_request;
4712274c 994 ring->emit_flush = gen8_emit_flush;
73d477f6
OM
995 ring->irq_get = gen8_logical_ring_get_irq;
996 ring->irq_put = gen8_logical_ring_put_irq;
15648585 997 ring->emit_bb_start = gen8_emit_bb_start;
9b1136d5 998
454afebd
OM
999 return logical_ring_init(dev, ring);
1000}
1001
1002int intel_logical_rings_init(struct drm_device *dev)
1003{
1004 struct drm_i915_private *dev_priv = dev->dev_private;
1005 int ret;
1006
1007 ret = logical_render_ring_init(dev);
1008 if (ret)
1009 return ret;
1010
1011 if (HAS_BSD(dev)) {
1012 ret = logical_bsd_ring_init(dev);
1013 if (ret)
1014 goto cleanup_render_ring;
1015 }
1016
1017 if (HAS_BLT(dev)) {
1018 ret = logical_blt_ring_init(dev);
1019 if (ret)
1020 goto cleanup_bsd_ring;
1021 }
1022
1023 if (HAS_VEBOX(dev)) {
1024 ret = logical_vebox_ring_init(dev);
1025 if (ret)
1026 goto cleanup_blt_ring;
1027 }
1028
1029 if (HAS_BSD2(dev)) {
1030 ret = logical_bsd2_ring_init(dev);
1031 if (ret)
1032 goto cleanup_vebox_ring;
1033 }
1034
1035 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
1036 if (ret)
1037 goto cleanup_bsd2_ring;
1038
1039 return 0;
1040
1041cleanup_bsd2_ring:
1042 intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
1043cleanup_vebox_ring:
1044 intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
1045cleanup_blt_ring:
1046 intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
1047cleanup_bsd_ring:
1048 intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
1049cleanup_render_ring:
1050 intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
1051
1052 return ret;
1053}
1054
8670d6f9
OM
1055static int
1056populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
1057 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
1058{
1059 struct drm_i915_gem_object *ring_obj = ringbuf->obj;
ae6c4806 1060 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
8670d6f9
OM
1061 struct page *page;
1062 uint32_t *reg_state;
1063 int ret;
1064
1065 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
1066 if (ret) {
1067 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
1068 return ret;
1069 }
1070
1071 ret = i915_gem_object_get_pages(ctx_obj);
1072 if (ret) {
1073 DRM_DEBUG_DRIVER("Could not get object pages\n");
1074 return ret;
1075 }
1076
1077 i915_gem_object_pin_pages(ctx_obj);
1078
1079 /* The second page of the context object contains some fields which must
1080 * be set up prior to the first execution. */
1081 page = i915_gem_object_get_page(ctx_obj, 1);
1082 reg_state = kmap_atomic(page);
1083
1084 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
1085 * commands followed by (reg, value) pairs. The values we are setting here are
1086 * only for the first context restore: on a subsequent save, the GPU will
1087 * recreate this batchbuffer with new values (including all the missing
1088 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
1089 if (ring->id == RCS)
1090 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
1091 else
1092 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
1093 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
1094 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
1095 reg_state[CTX_CONTEXT_CONTROL+1] =
1096 _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
1097 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
1098 reg_state[CTX_RING_HEAD+1] = 0;
1099 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
1100 reg_state[CTX_RING_TAIL+1] = 0;
1101 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
1102 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
1103 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
1104 reg_state[CTX_RING_BUFFER_CONTROL+1] =
1105 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
1106 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
1107 reg_state[CTX_BB_HEAD_U+1] = 0;
1108 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
1109 reg_state[CTX_BB_HEAD_L+1] = 0;
1110 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
1111 reg_state[CTX_BB_STATE+1] = (1<<5);
1112 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
1113 reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
1114 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
1115 reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
1116 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
1117 reg_state[CTX_SECOND_BB_STATE+1] = 0;
1118 if (ring->id == RCS) {
1119 /* TODO: according to BSpec, the register state context
1120 * for CHV does not have these. OTOH, these registers do
1121 * exist in CHV. I'm waiting for a clarification */
1122 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
1123 reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
1124 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
1125 reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
1126 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
1127 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
1128 }
1129 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
1130 reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
1131 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
1132 reg_state[CTX_CTX_TIMESTAMP+1] = 0;
1133 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
1134 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
1135 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
1136 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
1137 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
1138 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
1139 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
1140 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
1141 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
1142 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
1143 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
1144 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
1145 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
1146 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
1147 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
1148 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
1149 if (ring->id == RCS) {
1150 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
1151 reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
1152 reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
1153 }
1154
1155 kunmap_atomic(reg_state);
1156
1157 ctx_obj->dirty = 1;
1158 set_page_dirty(page);
1159 i915_gem_object_unpin_pages(ctx_obj);
1160
1161 return 0;
1162}
1163
ede7d42b
OM
1164void intel_lr_context_free(struct intel_context *ctx)
1165{
8c857917
OM
1166 int i;
1167
1168 for (i = 0; i < I915_NUM_RINGS; i++) {
1169 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
84c2377f
OM
1170 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
1171
8c857917 1172 if (ctx_obj) {
84c2377f
OM
1173 intel_destroy_ringbuffer_obj(ringbuf);
1174 kfree(ringbuf);
8c857917
OM
1175 i915_gem_object_ggtt_unpin(ctx_obj);
1176 drm_gem_object_unreference(&ctx_obj->base);
1177 }
1178 }
1179}
1180
1181static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
1182{
1183 int ret = 0;
1184
1185 WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
1186
1187 switch (ring->id) {
1188 case RCS:
1189 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
1190 break;
1191 case VCS:
1192 case BCS:
1193 case VECS:
1194 case VCS2:
1195 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
1196 break;
1197 }
1198
1199 return ret;
ede7d42b
OM
1200}
1201
1202int intel_lr_context_deferred_create(struct intel_context *ctx,
1203 struct intel_engine_cs *ring)
1204{
8c857917
OM
1205 struct drm_device *dev = ring->dev;
1206 struct drm_i915_gem_object *ctx_obj;
1207 uint32_t context_size;
84c2377f 1208 struct intel_ringbuffer *ringbuf;
8c857917
OM
1209 int ret;
1210
ede7d42b 1211 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
48d82387
OM
1212 if (ctx->engine[ring->id].state)
1213 return 0;
ede7d42b 1214
8c857917
OM
1215 context_size = round_up(get_lr_context_size(ring), 4096);
1216
1217 ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
1218 if (IS_ERR(ctx_obj)) {
1219 ret = PTR_ERR(ctx_obj);
1220 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
1221 return ret;
1222 }
1223
1224 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
1225 if (ret) {
1226 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
1227 drm_gem_object_unreference(&ctx_obj->base);
1228 return ret;
1229 }
1230
84c2377f
OM
1231 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1232 if (!ringbuf) {
1233 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
1234 ring->name);
1235 i915_gem_object_ggtt_unpin(ctx_obj);
1236 drm_gem_object_unreference(&ctx_obj->base);
1237 ret = -ENOMEM;
1238 return ret;
1239 }
1240
0c7dd53b 1241 ringbuf->ring = ring;
582d67f0
OM
1242 ringbuf->FIXME_lrc_ctx = ctx;
1243
84c2377f
OM
1244 ringbuf->size = 32 * PAGE_SIZE;
1245 ringbuf->effective_size = ringbuf->size;
1246 ringbuf->head = 0;
1247 ringbuf->tail = 0;
1248 ringbuf->space = ringbuf->size;
1249 ringbuf->last_retired_head = -1;
1250
1251 /* TODO: For now we put this in the mappable region so that we can reuse
1252 * the existing ringbuffer code which ioremaps it. When we start
1253 * creating many contexts, this will no longer work and we must switch
1254 * to a kmapish interface.
1255 */
1256 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1257 if (ret) {
1258 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
1259 ring->name, ret);
8670d6f9
OM
1260 goto error;
1261 }
1262
1263 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
1264 if (ret) {
1265 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
1266 intel_destroy_ringbuffer_obj(ringbuf);
1267 goto error;
84c2377f
OM
1268 }
1269
1270 ctx->engine[ring->id].ringbuf = ringbuf;
8c857917 1271 ctx->engine[ring->id].state = ctx_obj;
ede7d42b
OM
1272
1273 return 0;
8670d6f9
OM
1274
1275error:
1276 kfree(ringbuf);
1277 i915_gem_object_ggtt_unpin(ctx_obj);
1278 drm_gem_object_unreference(&ctx_obj->base);
1279 return ret;
ede7d42b 1280}