1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2022-2023 Intel Corporation
6 #ifndef _XE_GT_TYPES_H_
7 #define _XE_GT_TYPES_H_
9 #include "xe_force_wake_types.h"
10 #include "xe_gt_idle_types.h"
11 #include "xe_hw_engine_types.h"
12 #include "xe_hw_fence_types.h"
13 #include "xe_reg_sr_types.h"
14 #include "xe_sa_types.h"
15 #include "xe_uc_types.h"
17 struct xe_exec_queue_ops;
22 XE_GT_TYPE_UNINITIALIZED,
27 #define XE_MAX_DSS_FUSE_REGS 3
28 #define XE_MAX_EU_FUSE_REGS 1
30 typedef unsigned long xe_dss_mask_t[BITS_TO_LONGS(32 * XE_MAX_DSS_FUSE_REGS)];
31 typedef unsigned long xe_eu_mask_t[BITS_TO_LONGS(32 * XE_MAX_EU_FUSE_REGS)];
33 struct xe_mmio_range {
39 * The hardware has multiple kinds of multicast register ranges that need
40 * special register steering (and future platforms are expected to add
43 * During driver startup, we initialize the steering control register to
44 * direct reads to a slice/subslice that are valid for the 'subslice' class
45 * of multicast registers. If another type of steering does not have any
46 * overlap in valid steering targets with 'subslice' style registers, we will
47 * need to explicitly re-steer reads of registers of the other type.
49 * Only the replication types that may need additional non-default steering
52 enum xe_steering_type {
61 * On some platforms there are multiple types of MCR registers that
62 * will always return a non-terminated value at instance (0, 0). We'll
63 * lump those all into a single category to keep things simple.
68 * Register ranges that don't need special steering for each register:
69 * it's sufficient to keep the HW-default for the selector, or only
70 * change it once, on GT initialization. This needs to be the last
77 #define gt_to_tile(gt__) \
79 const struct xe_gt * : (const struct xe_tile *)((gt__)->tile), \
80 struct xe_gt * : (gt__)->tile)
82 #define gt_to_xe(gt__) \
84 const struct xe_gt * : (const struct xe_device *)(gt_to_tile(gt__)->xe), \
85 struct xe_gt * : gt_to_tile(gt__)->xe)
88 * struct xe_gt - A "Graphics Technology" unit of the GPU
90 * A GT ("Graphics Technology") is the subset of a GPU primarily responsible
91 * for implementing the graphics, compute, and/or media IP. It encapsulates
92 * the hardware engines, programmable execution units, and GuC. Each GT has
93 * its own handling of power management (RC6+forcewake) and multicast register
96 * A GPU/tile may have a single GT that supplies all graphics, compute, and
97 * media functionality, or the graphics/compute and media may be split into
98 * separate GTs within a tile.
101 /** @tile: Backpointer to GT's tile */
102 struct xe_tile *tile;
104 /** @info: GT info */
106 /** @info.type: type of GT */
107 enum xe_gt_type type;
108 /** @info.id: Unique ID of this GT within the PCI Device */
110 /** @info.reference_clock: clock frequency */
112 /** @info.engine_mask: mask of engines present on GT */
115 * @info.__engine_mask: mask of engines present on GT read from
116 * xe_pci.c, used to fake reading the engine_mask from the
120 /** @info.gmdid: raw GMD_ID value from hardware */
125 * @mmio: mmio info for GT. All GTs within a tile share the same
126 * register space, but have their own copy of GSI registers at a
127 * specific offset, as well as their own forcewake handling.
130 /** @mmio.fw: force wake for GT */
131 struct xe_force_wake fw;
133 * @mmio.adj_limit: adjust MMIO address if address is below this
137 /** @mmio.adj_offset: offect to add to MMIO address when adjusting */
142 * @reg_sr: table with registers to be restored on GT init/resume/reset
144 struct xe_reg_sr reg_sr;
146 /** @reset: state for GT resets */
149 * @reset.worker: work so GT resets can done async allowing to reset
150 * code to safely flush all code paths
152 struct work_struct worker;
155 /** @tlb_invalidation: TLB invalidation state */
157 /** @tlb_invalidation.seqno: TLB invalidation seqno, protected by CT lock */
158 #define TLB_INVALIDATION_SEQNO_MAX 0x100000
161 * @tlb_invalidation.seqno_recv: last received TLB invalidation seqno,
162 * protected by CT lock
166 * @tlb_invalidation.pending_fences: list of pending fences waiting TLB
167 * invaliations, protected by CT lock
169 struct list_head pending_fences;
171 * @tlb_invalidation.pending_lock: protects @tlb_invalidation.pending_fences
172 * and updating @tlb_invalidation.seqno_recv.
174 spinlock_t pending_lock;
176 * @tlb_invalidation.fence_tdr: schedules a delayed call to
177 * xe_gt_tlb_fence_timeout after the timeut interval is over.
179 struct delayed_work fence_tdr;
180 /** @tlb_invalidation.lock: protects TLB invalidation fences */
185 * @ccs_mode: Number of compute engines enabled.
186 * Allows fixed mapping of available compute slices to compute engines.
187 * By default only the first available compute engine is enabled and all
188 * available compute slices are allocated to it.
192 /** @usm: unified shared memory state */
195 * @usm.bb_pool: Pool from which batchbuffers, for USM operations
196 * (e.g. migrations, fixing page tables), are allocated.
197 * Dedicated pool needed so USM operations to not get blocked
198 * behind any user operations which may have resulted in a
201 struct xe_sa_manager *bb_pool;
203 * @usm.reserved_bcs_instance: reserved BCS instance used for USM
204 * operations (e.g. mmigrations, fixing page tables)
206 u16 reserved_bcs_instance;
207 /** @usm.pf_wq: page fault work queue, unbound, high priority */
208 struct workqueue_struct *pf_wq;
209 /** @usm.acc_wq: access counter work queue, unbound, high priority */
210 struct workqueue_struct *acc_wq;
212 * @usm.pf_queue: Page fault queue used to sync faults so faults can
213 * be processed not under the GuC CT lock. The queue is sized so
214 * it can sync all possible faults (1 per physical engine).
215 * Multiple queues exists for page faults from different VMs are
216 * be processed in parallel.
219 /** @usm.pf_queue.gt: back pointer to GT */
221 #define PF_QUEUE_NUM_DW 128
222 /** @usm.pf_queue.data: data in the page fault queue */
223 u32 data[PF_QUEUE_NUM_DW];
225 * @usm.pf_queue.tail: tail pointer in DWs for page fault queue,
226 * moved by worker which processes faults (consumer).
230 * @usm.pf_queue.head: head pointer in DWs for page fault queue,
231 * moved by G2H handler (producer).
234 /** @usm.pf_queue.lock: protects page fault queue */
236 /** @usm.pf_queue.worker: to process page faults */
237 struct work_struct worker;
238 #define NUM_PF_QUEUE 4
239 } pf_queue[NUM_PF_QUEUE];
241 * @usm.acc_queue: Same as page fault queue, cannot process access
242 * counters under CT lock.
245 /** @usm.acc_queue.gt: back pointer to GT */
247 #define ACC_QUEUE_NUM_DW 128
248 /** @usm.acc_queue.data: data in the page fault queue */
249 u32 data[ACC_QUEUE_NUM_DW];
251 * @usm.acc_queue.tail: tail pointer in DWs for access counter queue,
252 * moved by worker which processes counters
257 * @usm.acc_queue.head: head pointer in DWs for access counter queue,
258 * moved by G2H handler (producer).
261 /** @usm.acc_queue.lock: protects page fault queue */
263 /** @usm.acc_queue.worker: to process access counters */
264 struct work_struct worker;
265 #define NUM_ACC_QUEUE 4
266 } acc_queue[NUM_ACC_QUEUE];
269 /** @ordered_wq: used to serialize GT resets and TDRs */
270 struct workqueue_struct *ordered_wq;
272 /** @uc: micro controllers on the GT */
275 /** @gtidle: idle properties of GT */
276 struct xe_gt_idle gtidle;
278 /** @exec_queue_ops: submission backend exec queue operations */
279 const struct xe_exec_queue_ops *exec_queue_ops;
282 * @ring_ops: ring operations for this hw engine (1 per engine class)
284 const struct xe_ring_ops *ring_ops[XE_ENGINE_CLASS_MAX];
286 /** @fence_irq: fence IRQs (1 per engine class) */
287 struct xe_hw_fence_irq fence_irq[XE_ENGINE_CLASS_MAX];
289 /** @default_lrc: default LRC state */
290 void *default_lrc[XE_ENGINE_CLASS_MAX];
292 /** @hw_engines: hardware engines on the GT */
293 struct xe_hw_engine hw_engines[XE_NUM_HW_ENGINES];
295 /** @eclass: per hardware engine class interface on the GT */
296 struct xe_hw_engine_class_intf eclass[XE_ENGINE_CLASS_MAX];
298 /** @pcode: GT's PCODE */
300 /** @pcode.lock: protecting GT's PCODE mailbox data */
304 /** @sysfs: sysfs' kobj used by xe_gt_sysfs */
305 struct kobject *sysfs;
307 /** @freq: Main GT freq sysfs control */
308 struct kobject *freq;
312 /** @mocs.uc_index: UC index */
314 /** @mocs.wb_index: WB index, only used on L3_CCS platforms */
318 /** @fuse_topo: GT topology reported by fuse registers */
320 /** @fuse_topo.g_dss_mask: dual-subslices usable by geometry */
321 xe_dss_mask_t g_dss_mask;
323 /** @fuse_topo.c_dss_mask: dual-subslices usable by compute */
324 xe_dss_mask_t c_dss_mask;
326 /** @fuse_topo.eu_mask_per_dss: EU mask per DSS*/
327 xe_eu_mask_t eu_mask_per_dss;
330 /** @steering: register steering for individual HW units */
332 /** @steering.ranges: register ranges used for this steering type */
333 const struct xe_mmio_range *ranges;
335 /** @steering.group_target: target to steer accesses to */
337 /** @steering.instance_target: instance to steer accesses to */
339 } steering[NUM_STEERING_TYPES];
342 * @mcr_lock: protects the MCR_SELECTOR register for the duration
343 * of a steered operation
347 /** @wa_active: keep track of active workarounds */
349 /** @wa_active.gt: bitmap with active GT workarounds */
351 /** @wa_active.engine: bitmap with active engine workarounds */
352 unsigned long *engine;
353 /** @wa_active.lrc: bitmap with active LRC workarounds */
355 /** @wa_active.oob: bitmap with active OOB workaroudns */