drm/i915/selftests: Setup engine->retire for mock_engine
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gt / intel_gt.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "i915_drv.h"
7 #include "intel_gt.h"
8 #include "intel_gt_pm.h"
9 #include "intel_gt_requests.h"
10 #include "intel_mocs.h"
11 #include "intel_rc6.h"
12 #include "intel_rps.h"
13 #include "intel_uncore.h"
14 #include "intel_pm.h"
15
16 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
17 {
18         gt->i915 = i915;
19         gt->uncore = &i915->uncore;
20
21         spin_lock_init(&gt->irq_lock);
22
23         INIT_LIST_HEAD(&gt->closed_vma);
24         spin_lock_init(&gt->closed_lock);
25
26         intel_gt_init_reset(gt);
27         intel_gt_init_requests(gt);
28         intel_gt_init_timelines(gt);
29         intel_gt_pm_init_early(gt);
30
31         intel_rps_init_early(&gt->rps);
32         intel_uc_init_early(&gt->uc);
33 }
34
35 void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
36 {
37         gt->ggtt = ggtt;
38
39         intel_gt_sanitize(gt, false);
40 }
41
42 static void init_unused_ring(struct intel_gt *gt, u32 base)
43 {
44         struct intel_uncore *uncore = gt->uncore;
45
46         intel_uncore_write(uncore, RING_CTL(base), 0);
47         intel_uncore_write(uncore, RING_HEAD(base), 0);
48         intel_uncore_write(uncore, RING_TAIL(base), 0);
49         intel_uncore_write(uncore, RING_START(base), 0);
50 }
51
52 static void init_unused_rings(struct intel_gt *gt)
53 {
54         struct drm_i915_private *i915 = gt->i915;
55
56         if (IS_I830(i915)) {
57                 init_unused_ring(gt, PRB1_BASE);
58                 init_unused_ring(gt, SRB0_BASE);
59                 init_unused_ring(gt, SRB1_BASE);
60                 init_unused_ring(gt, SRB2_BASE);
61                 init_unused_ring(gt, SRB3_BASE);
62         } else if (IS_GEN(i915, 2)) {
63                 init_unused_ring(gt, SRB0_BASE);
64                 init_unused_ring(gt, SRB1_BASE);
65         } else if (IS_GEN(i915, 3)) {
66                 init_unused_ring(gt, PRB1_BASE);
67                 init_unused_ring(gt, PRB2_BASE);
68         }
69 }
70
71 int intel_gt_init_hw(struct intel_gt *gt)
72 {
73         struct drm_i915_private *i915 = gt->i915;
74         struct intel_uncore *uncore = gt->uncore;
75         int ret;
76
77         BUG_ON(!i915->kernel_context);
78         ret = intel_gt_terminally_wedged(gt);
79         if (ret)
80                 return ret;
81
82         gt->last_init_time = ktime_get();
83
84         /* Double layer security blanket, see i915_gem_init() */
85         intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
86
87         if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
88                 intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
89
90         if (IS_HASWELL(i915))
91                 intel_uncore_write(uncore,
92                                    MI_PREDICATE_RESULT_2,
93                                    IS_HSW_GT3(i915) ?
94                                    LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
95
96         /* Apply the GT workarounds... */
97         intel_gt_apply_workarounds(gt);
98         /* ...and determine whether they are sticking. */
99         intel_gt_verify_workarounds(gt, "init");
100
101         intel_gt_init_swizzling(gt);
102
103         /*
104          * At least 830 can leave some of the unused rings
105          * "active" (ie. head != tail) after resume which
106          * will prevent c3 entry. Makes sure all unused rings
107          * are totally idle.
108          */
109         init_unused_rings(gt);
110
111         ret = i915_ppgtt_init_hw(gt);
112         if (ret) {
113                 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
114                 goto out;
115         }
116
117         /* We can't enable contexts until all firmware is loaded */
118         ret = intel_uc_init_hw(&gt->uc);
119         if (ret) {
120                 i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
121                 goto out;
122         }
123
124         intel_mocs_init(gt);
125
126 out:
127         intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
128         return ret;
129 }
130
131 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
132 {
133         intel_uncore_rmw(uncore, reg, 0, set);
134 }
135
136 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
137 {
138         intel_uncore_rmw(uncore, reg, clr, 0);
139 }
140
141 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
142 {
143         intel_uncore_rmw(uncore, reg, 0, 0);
144 }
145
146 static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
147 {
148         GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
149         GEN6_RING_FAULT_REG_POSTING_READ(engine);
150 }
151
152 void
153 intel_gt_clear_error_registers(struct intel_gt *gt,
154                                intel_engine_mask_t engine_mask)
155 {
156         struct drm_i915_private *i915 = gt->i915;
157         struct intel_uncore *uncore = gt->uncore;
158         u32 eir;
159
160         if (!IS_GEN(i915, 2))
161                 clear_register(uncore, PGTBL_ER);
162
163         if (INTEL_GEN(i915) < 4)
164                 clear_register(uncore, IPEIR(RENDER_RING_BASE));
165         else
166                 clear_register(uncore, IPEIR_I965);
167
168         clear_register(uncore, EIR);
169         eir = intel_uncore_read(uncore, EIR);
170         if (eir) {
171                 /*
172                  * some errors might have become stuck,
173                  * mask them.
174                  */
175                 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
176                 rmw_set(uncore, EMR, eir);
177                 intel_uncore_write(uncore, GEN2_IIR,
178                                    I915_MASTER_ERROR_INTERRUPT);
179         }
180
181         if (INTEL_GEN(i915) >= 12) {
182                 rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
183                 intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
184         } else if (INTEL_GEN(i915) >= 8) {
185                 rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
186                 intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
187         } else if (INTEL_GEN(i915) >= 6) {
188                 struct intel_engine_cs *engine;
189                 enum intel_engine_id id;
190
191                 for_each_engine_masked(engine, gt, engine_mask, id)
192                         gen8_clear_engine_error_register(engine);
193         }
194 }
195
196 static void gen6_check_faults(struct intel_gt *gt)
197 {
198         struct intel_engine_cs *engine;
199         enum intel_engine_id id;
200         u32 fault;
201
202         for_each_engine(engine, gt, id) {
203                 fault = GEN6_RING_FAULT_REG_READ(engine);
204                 if (fault & RING_FAULT_VALID) {
205                         DRM_DEBUG_DRIVER("Unexpected fault\n"
206                                          "\tAddr: 0x%08lx\n"
207                                          "\tAddress space: %s\n"
208                                          "\tSource ID: %d\n"
209                                          "\tType: %d\n",
210                                          fault & PAGE_MASK,
211                                          fault & RING_FAULT_GTTSEL_MASK ?
212                                          "GGTT" : "PPGTT",
213                                          RING_FAULT_SRCID(fault),
214                                          RING_FAULT_FAULT_TYPE(fault));
215                 }
216         }
217 }
218
219 static void gen8_check_faults(struct intel_gt *gt)
220 {
221         struct intel_uncore *uncore = gt->uncore;
222         i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
223         u32 fault;
224
225         if (INTEL_GEN(gt->i915) >= 12) {
226                 fault_reg = GEN12_RING_FAULT_REG;
227                 fault_data0_reg = GEN12_FAULT_TLB_DATA0;
228                 fault_data1_reg = GEN12_FAULT_TLB_DATA1;
229         } else {
230                 fault_reg = GEN8_RING_FAULT_REG;
231                 fault_data0_reg = GEN8_FAULT_TLB_DATA0;
232                 fault_data1_reg = GEN8_FAULT_TLB_DATA1;
233         }
234
235         fault = intel_uncore_read(uncore, fault_reg);
236         if (fault & RING_FAULT_VALID) {
237                 u32 fault_data0, fault_data1;
238                 u64 fault_addr;
239
240                 fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
241                 fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
242
243                 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
244                              ((u64)fault_data0 << 12);
245
246                 DRM_DEBUG_DRIVER("Unexpected fault\n"
247                                  "\tAddr: 0x%08x_%08x\n"
248                                  "\tAddress space: %s\n"
249                                  "\tEngine ID: %d\n"
250                                  "\tSource ID: %d\n"
251                                  "\tType: %d\n",
252                                  upper_32_bits(fault_addr),
253                                  lower_32_bits(fault_addr),
254                                  fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
255                                  GEN8_RING_FAULT_ENGINE_ID(fault),
256                                  RING_FAULT_SRCID(fault),
257                                  RING_FAULT_FAULT_TYPE(fault));
258         }
259 }
260
261 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
262 {
263         struct drm_i915_private *i915 = gt->i915;
264
265         /* From GEN8 onwards we only have one 'All Engine Fault Register' */
266         if (INTEL_GEN(i915) >= 8)
267                 gen8_check_faults(gt);
268         else if (INTEL_GEN(i915) >= 6)
269                 gen6_check_faults(gt);
270         else
271                 return;
272
273         intel_gt_clear_error_registers(gt, ALL_ENGINES);
274 }
275
276 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
277 {
278         struct intel_uncore *uncore = gt->uncore;
279         intel_wakeref_t wakeref;
280
281         /*
282          * No actual flushing is required for the GTT write domain for reads
283          * from the GTT domain. Writes to it "immediately" go to main memory
284          * as far as we know, so there's no chipset flush. It also doesn't
285          * land in the GPU render cache.
286          *
287          * However, we do have to enforce the order so that all writes through
288          * the GTT land before any writes to the device, such as updates to
289          * the GATT itself.
290          *
291          * We also have to wait a bit for the writes to land from the GTT.
292          * An uncached read (i.e. mmio) seems to be ideal for the round-trip
293          * timing. This issue has only been observed when switching quickly
294          * between GTT writes and CPU reads from inside the kernel on recent hw,
295          * and it appears to only affect discrete GTT blocks (i.e. on LLC
296          * system agents we cannot reproduce this behaviour, until Cannonlake
297          * that was!).
298          */
299
300         wmb();
301
302         if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
303                 return;
304
305         intel_gt_chipset_flush(gt);
306
307         with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
308                 unsigned long flags;
309
310                 spin_lock_irqsave(&uncore->lock, flags);
311                 intel_uncore_posting_read_fw(uncore,
312                                              RING_HEAD(RENDER_RING_BASE));
313                 spin_unlock_irqrestore(&uncore->lock, flags);
314         }
315 }
316
317 void intel_gt_chipset_flush(struct intel_gt *gt)
318 {
319         wmb();
320         if (INTEL_GEN(gt->i915) < 6)
321                 intel_gtt_chipset_flush();
322 }
323
324 void intel_gt_driver_register(struct intel_gt *gt)
325 {
326         intel_rps_driver_register(&gt->rps);
327 }
328
329 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
330 {
331         struct drm_i915_private *i915 = gt->i915;
332         struct drm_i915_gem_object *obj;
333         struct i915_vma *vma;
334         int ret;
335
336         obj = i915_gem_object_create_stolen(i915, size);
337         if (IS_ERR(obj))
338                 obj = i915_gem_object_create_internal(i915, size);
339         if (IS_ERR(obj)) {
340                 DRM_ERROR("Failed to allocate scratch page\n");
341                 return PTR_ERR(obj);
342         }
343
344         vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
345         if (IS_ERR(vma)) {
346                 ret = PTR_ERR(vma);
347                 goto err_unref;
348         }
349
350         ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
351         if (ret)
352                 goto err_unref;
353
354         gt->scratch = i915_vma_make_unshrinkable(vma);
355
356         return 0;
357
358 err_unref:
359         i915_gem_object_put(obj);
360         return ret;
361 }
362
363 static void intel_gt_fini_scratch(struct intel_gt *gt)
364 {
365         i915_vma_unpin_and_release(&gt->scratch, 0);
366 }
367
368 int intel_gt_init(struct intel_gt *gt)
369 {
370         int err;
371
372         err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
373         if (err)
374                 return err;
375
376         intel_gt_pm_init(gt);
377
378         return 0;
379 }
380
381 void intel_gt_driver_remove(struct intel_gt *gt)
382 {
383         GEM_BUG_ON(gt->awake);
384 }
385
386 void intel_gt_driver_unregister(struct intel_gt *gt)
387 {
388         intel_rps_driver_unregister(&gt->rps);
389 }
390
391 void intel_gt_driver_release(struct intel_gt *gt)
392 {
393         intel_gt_pm_fini(gt);
394         intel_gt_fini_scratch(gt);
395 }
396
397 void intel_gt_driver_late_release(struct intel_gt *gt)
398 {
399         intel_uc_driver_late_release(&gt->uc);
400         intel_gt_fini_requests(gt);
401         intel_gt_fini_reset(gt);
402         intel_gt_fini_timelines(gt);
403 }