Create a new xe_tile structure to begin separating the concept of "tile"
from "GT." A tile is effectively a complete GPU, and a GT is just one
part of that. On platforms like MTL, there's only a single full GPU
(tile) which has its IP blocks provided by two GTs. In contrast, a
"multi-tile" platform like PVC is basically multiple complete GPUs
packed behind a single PCI device.
For now, just create xe_tile as a simple wrapper around xe_gt. The
items in xe_gt that are truly tied to the tile rather than the GT will
be moved in future patches. Support for multiple GTs per tile (i.e.,
the MTL standalone media case) will also be re-introduced in a future
patch.
v2:
- Fix kunit test build
- Move hunk from next patch to use local tile variable rather than
direct xe->tiles[id] accesses. (Lucas)
- Mention compute in kerneldoc. (Rodrigo)
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://lore.kernel.org/r/20230601215244.678611-3-matthew.d.roper@intel.com
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
struct xe_bo *bo, *external;
unsigned int bo_flags = XE_BO_CREATE_USER_BIT |
XE_BO_CREATE_VRAM_IF_DGFX(gt);
- struct xe_vm *vm = xe_migrate_get_vm(xe->gt[0].migrate);
+ struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->primary_gt.migrate);
struct ww_acquire_ctx ww;
int err, i;
#include "regs/xe_gt_regs.h"
#include "regs/xe_reg_defs.h"
+#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_pci_test.h"
#include "xe_reg_sr.h"
{
const struct rtp_test_case *param = test->param_value;
struct xe_device *xe = test->priv;
- struct xe_reg_sr *reg_sr = &xe->gt[0].reg_sr;
+ struct xe_gt *gt = &xe_device_get_root_tile(xe)->primary_gt;
+ struct xe_reg_sr *reg_sr = >->reg_sr;
const struct xe_reg_sr_entry *sre, *sr_entry = NULL;
- struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(&xe->gt[0]);
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
unsigned long idx, count = 0;
xe_reg_sr_init(reg_sr, "xe_rtp_tests", xe);
return file->driver_priv;
}
+static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe)
+{
+ return &xe->tiles[0];
+}
+
static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
{
struct xe_gt *gt;
- XE_BUG_ON(gt_id > XE_MAX_GT);
- gt = xe->gt + gt_id;
+ XE_BUG_ON(gt_id > XE_MAX_TILES_PER_DEVICE);
+ gt = &xe->tiles[gt_id].primary_gt;
XE_BUG_ON(gt->info.id != gt_id);
XE_BUG_ON(gt->info.type == XE_GT_TYPE_UNINITIALIZED);
*/
static inline struct xe_gt *to_gt(struct xe_device *xe)
{
- return xe->gt;
+ return &xe_device_get_root_tile(xe)->primary_gt;
}
static inline bool xe_device_guc_submission_enabled(struct xe_device *xe)
#define XE_GT0 0
#define XE_GT1 1
-#define XE_MAX_GT (XE_GT1 + 1)
+#define XE_MAX_TILES_PER_DEVICE (XE_GT1 + 1)
#define XE_MAX_ASID (BIT(20))
(_xe)->info.step.graphics >= (min_step) && \
(_xe)->info.step.graphics < (max_step))
+#define tile_to_xe(tile__) \
+ _Generic(tile__, \
+ const struct xe_tile *: (const struct xe_device *)((tile__)->xe), \
+ struct xe_tile *: (tile__)->xe)
+
+/**
+ * struct xe_tile - hardware tile structure
+ *
+ * From a driver perspective, a "tile" is effectively a complete GPU, containing
+ * an SGunit, 1-2 GTs, and (for discrete platforms) VRAM.
+ *
+ * Multi-tile platforms effectively bundle multiple GPUs behind a single PCI
+ * device and designate one "root" tile as being responsible for external PCI
+ * communication. PCI BAR0 exposes the GGTT and MMIO register space for each
+ * tile in a stacked layout, and PCI BAR2 exposes the local memory associated
+ * with each tile similarly. Device-wide interrupts can be enabled/disabled
+ * at the root tile, and the MSTR_TILE_INTR register will report which tiles
+ * have interrupts that need servicing.
+ */
+struct xe_tile {
+ /** @xe: Backpointer to tile's PCI device */
+ struct xe_device *xe;
+
+ /** @id: ID of the tile */
+ u8 id;
+
+ /**
+ * @primary_gt: Primary GT
+ */
+ struct xe_gt primary_gt;
+
+ /* TODO: Add media GT here */
+};
+
/**
* struct xe_device - Top level struct of XE device
*/
/** @ordered_wq: used to serialize compute mode resume */
struct workqueue_struct *ordered_wq;
- /** @gt: graphics tile */
- struct xe_gt gt[XE_MAX_GT];
+ /** @tiles: device tiles */
+ struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
/**
* @mem_access: keep track of memory access in the device, possibly
};
/**
- * struct xe_gt - Top level struct of a graphics tile
+ * struct xe_gt - A "Graphics Technology" unit of the GPU
*
- * A graphics tile may be a physical split (duplicate pieces of silicon,
- * different GGTT + VRAM) or a virtual split (shared GGTT + VRAM). Either way
- * this structure encapsulates of everything a GT is (MMIO, VRAM, memory
- * management, microcontrols, and a hardware set of engines).
+ * A GT ("Graphics Technology") is the subset of a GPU primarily responsible
+ * for implementing the graphics, compute, and/or media IP. It encapsulates
+ * the hardware engines, programmable execution units, and GuC. Each GT has
+ * its own handling of power management (RC6+forcewake) and multicast register
+ * steering.
+ *
+ * A GPU/tile may have a single GT that supplies all graphics, compute, and
+ * media functionality, or the graphics/compute and media may be split into
+ * separate GTs within a tile.
*/
struct xe_gt {
/** @xe: backpointer to XE device */
struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
+ struct xe_gt *gt = xe_device_get_gt(xe, 0);
struct drm_xe_mmio *args = data;
unsigned int bits_flag, bytes;
struct xe_reg reg;
*/
reg = XE_REG(args->addr);
- xe_force_wake_get(gt_to_fw(&xe->gt[0]), XE_FORCEWAKE_ALL);
+ xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (args->flags & DRM_XE_MMIO_WRITE) {
switch (bits_flag) {
ret = -EINVAL;
goto exit;
}
- xe_mmio_write32(to_gt(xe), reg, args->value);
+ xe_mmio_write32(gt, reg, args->value);
break;
case DRM_XE_MMIO_64BIT:
- xe_mmio_write64(to_gt(xe), reg, args->value);
+ xe_mmio_write64(gt, reg, args->value);
break;
default:
drm_dbg(&xe->drm, "Invalid MMIO bit size");
if (args->flags & DRM_XE_MMIO_READ) {
switch (bits_flag) {
case DRM_XE_MMIO_32BIT:
- args->value = xe_mmio_read32(to_gt(xe), reg);
+ args->value = xe_mmio_read32(gt, reg);
break;
case DRM_XE_MMIO_64BIT:
- args->value = xe_mmio_read64(to_gt(xe), reg);
+ args->value = xe_mmio_read64(gt, reg);
break;
default:
drm_dbg(&xe->drm, "Invalid MMIO bit size");
}
exit:
- xe_force_wake_put(gt_to_fw(&xe->gt[0]), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
return ret;
}
const struct xe_graphics_desc *graphics_desc = NULL;
const struct xe_media_desc *media_desc = NULL;
u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0;
+ struct xe_tile *tile;
struct xe_gt *gt;
u8 id;
xe->info.tile_count = 1 + graphics_desc->max_remote_tiles;
for (id = 0; id < xe->info.tile_count; ++id) {
- gt = xe->gt + id;
+ tile = &xe->tiles[id];
+ tile->xe = xe;
+ tile->id = id;
+
+ gt = &tile->primary_gt;
gt->info.id = id;
gt->xe = xe;
u32 ppgtt_flag = get_ppgtt_flag(job);
struct xe_gt *gt = job->engine->gt;
struct xe_device *xe = gt_to_xe(gt);
- bool lacks_render = !(xe->gt[0].info.engine_mask & XE_HW_ENGINE_RCS_MASK);
+ bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
u32 mask_flags = 0;
dw[i++] = preparser_disable(true);
struct xe_device *xe = vma->vm->xe;
struct xe_gt *gt;
u32 gt_needs_invalidate = 0;
- int seqno[XE_MAX_GT];
+ int seqno[XE_MAX_TILES_PER_DEVICE];
u8 id;
int ret;
struct kref refcount;
/* engine used for (un)binding vma's */
- struct xe_engine *eng[XE_MAX_GT];
+ struct xe_engine *eng[XE_MAX_TILES_PER_DEVICE];
/** Protects @rebind_list and the page-table structures */
struct dma_resv resv;
u64 size;
struct rb_root vmas;
- struct xe_pt *pt_root[XE_MAX_GT];
- struct xe_bo *scratch_bo[XE_MAX_GT];
- struct xe_pt *scratch_pt[XE_MAX_GT][XE_VM_MAX_LEVEL];
+ struct xe_pt *pt_root[XE_MAX_TILES_PER_DEVICE];
+ struct xe_bo *scratch_bo[XE_MAX_TILES_PER_DEVICE];
+ struct xe_pt *scratch_pt[XE_MAX_TILES_PER_DEVICE][XE_VM_MAX_LEVEL];
/** @flags: flags for this VM, statically setup a creation time */
#define XE_VM_FLAGS_64K BIT(0)