1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
9 #include "intel_gt_pm.h"
10 #include "intel_uncore.h"
12 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
15 gt->uncore = &i915->uncore;
17 INIT_LIST_HEAD(>->active_rings);
18 INIT_LIST_HEAD(>->closed_vma);
20 spin_lock_init(>->closed_lock);
22 intel_gt_init_hangcheck(gt);
23 intel_gt_init_reset(gt);
24 intel_gt_pm_init_early(gt);
27 void intel_gt_init_hw(struct drm_i915_private *i915)
29 i915->gt.ggtt = &i915->ggtt;
32 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
34 intel_uncore_rmw(uncore, reg, 0, set);
37 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
39 intel_uncore_rmw(uncore, reg, clr, 0);
42 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
44 intel_uncore_rmw(uncore, reg, 0, 0);
47 static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
49 GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
50 GEN6_RING_FAULT_REG_POSTING_READ(engine);
54 intel_gt_clear_error_registers(struct intel_gt *gt,
55 intel_engine_mask_t engine_mask)
57 struct drm_i915_private *i915 = gt->i915;
58 struct intel_uncore *uncore = gt->uncore;
62 clear_register(uncore, PGTBL_ER);
64 if (INTEL_GEN(i915) < 4)
65 clear_register(uncore, IPEIR(RENDER_RING_BASE));
67 clear_register(uncore, IPEIR_I965);
69 clear_register(uncore, EIR);
70 eir = intel_uncore_read(uncore, EIR);
73 * some errors might have become stuck,
76 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
77 rmw_set(uncore, EMR, eir);
78 intel_uncore_write(uncore, GEN2_IIR,
79 I915_MASTER_ERROR_INTERRUPT);
82 if (INTEL_GEN(i915) >= 8) {
83 rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
84 intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
85 } else if (INTEL_GEN(i915) >= 6) {
86 struct intel_engine_cs *engine;
87 enum intel_engine_id id;
89 for_each_engine_masked(engine, i915, engine_mask, id)
90 gen8_clear_engine_error_register(engine);
94 static void gen6_check_faults(struct intel_gt *gt)
96 struct intel_engine_cs *engine;
97 enum intel_engine_id id;
100 for_each_engine(engine, gt->i915, id) {
101 fault = GEN6_RING_FAULT_REG_READ(engine);
102 if (fault & RING_FAULT_VALID) {
103 DRM_DEBUG_DRIVER("Unexpected fault\n"
105 "\tAddress space: %s\n"
109 fault & RING_FAULT_GTTSEL_MASK ?
111 RING_FAULT_SRCID(fault),
112 RING_FAULT_FAULT_TYPE(fault));
117 static void gen8_check_faults(struct intel_gt *gt)
119 struct intel_uncore *uncore = gt->uncore;
120 u32 fault = intel_uncore_read(uncore, GEN8_RING_FAULT_REG);
122 if (fault & RING_FAULT_VALID) {
123 u32 fault_data0, fault_data1;
126 fault_data0 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA0);
127 fault_data1 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA1);
128 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
129 ((u64)fault_data0 << 12);
131 DRM_DEBUG_DRIVER("Unexpected fault\n"
132 "\tAddr: 0x%08x_%08x\n"
133 "\tAddress space: %s\n"
137 upper_32_bits(fault_addr),
138 lower_32_bits(fault_addr),
139 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
140 GEN8_RING_FAULT_ENGINE_ID(fault),
141 RING_FAULT_SRCID(fault),
142 RING_FAULT_FAULT_TYPE(fault));
146 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
148 struct drm_i915_private *i915 = gt->i915;
150 /* From GEN8 onwards we only have one 'All Engine Fault Register' */
151 if (INTEL_GEN(i915) >= 8)
152 gen8_check_faults(gt);
153 else if (INTEL_GEN(i915) >= 6)
154 gen6_check_faults(gt);
158 intel_gt_clear_error_registers(gt, ALL_ENGINES);
161 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
163 struct drm_i915_private *i915 = gt->i915;
164 intel_wakeref_t wakeref;
167 * No actual flushing is required for the GTT write domain for reads
168 * from the GTT domain. Writes to it "immediately" go to main memory
169 * as far as we know, so there's no chipset flush. It also doesn't
170 * land in the GPU render cache.
172 * However, we do have to enforce the order so that all writes through
173 * the GTT land before any writes to the device, such as updates to
176 * We also have to wait a bit for the writes to land from the GTT.
177 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
178 * timing. This issue has only been observed when switching quickly
179 * between GTT writes and CPU reads from inside the kernel on recent hw,
180 * and it appears to only affect discrete GTT blocks (i.e. on LLC
181 * system agents we cannot reproduce this behaviour, until Cannonlake
187 if (INTEL_INFO(i915)->has_coherent_ggtt)
190 intel_gt_chipset_flush(gt);
192 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
193 struct intel_uncore *uncore = gt->uncore;
195 spin_lock_irq(&uncore->lock);
196 intel_uncore_posting_read_fw(uncore,
197 RING_HEAD(RENDER_RING_BASE));
198 spin_unlock_irq(&uncore->lock);
202 void intel_gt_chipset_flush(struct intel_gt *gt)
205 if (INTEL_GEN(gt->i915) < 6)
206 intel_gtt_chipset_flush();
209 int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
211 struct drm_i915_private *i915 = gt->i915;
212 struct drm_i915_gem_object *obj;
213 struct i915_vma *vma;
216 obj = i915_gem_object_create_stolen(i915, size);
218 obj = i915_gem_object_create_internal(i915, size);
220 DRM_ERROR("Failed to allocate scratch page\n");
224 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
230 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
238 i915_gem_object_put(obj);
242 void intel_gt_fini_scratch(struct intel_gt *gt)
244 i915_vma_unpin_and_release(>->scratch, 0);
247 void intel_gt_cleanup_early(struct intel_gt *gt)
249 intel_gt_fini_reset(gt);