drm/xe: Normalize bo flags macros
authorLucas De Marchi <lucas.demarchi@intel.com>
Fri, 22 Mar 2024 14:27:02 +0000 (07:27 -0700)
committerLucas De Marchi <lucas.demarchi@intel.com>
Tue, 2 Apr 2024 17:33:57 +0000 (10:33 -0700)
The flags stored in the BO grew over time without following
much a naming pattern. First of all, get rid of the _BIT suffix that was
banned from everywhere else due to the guideline in
drivers/gpu/drm/i915/i915_reg.h that xe kind of follows:

Define bits using ``REG_BIT(N)``. Do **not** add ``_BIT`` suffix to the name.

Here the flags aren't for a register, but it's good practice to keep it
consistent.

Second divergence on names is the use or not of "CREATE". This is
because most of the flags are passed to xe_bo_create*() family of
functions, changing its behavior. However, since the flags are also
stored in the bo itself and checked elsewhere in the code, it seems
better to just omit the CREATE part.

With those 2 guidelines, all the flags are given the form
XE_BO_FLAG_<FLAG_NAME> with the following commands:

git grep -le "XE_BO_" -- drivers/gpu/drm/xe | xargs sed -i \
-e "s/XE_BO_\([_A-Z0-9]*\)_BIT/XE_BO_\1/g" \
-e 's/XE_BO_CREATE_/XE_BO_FLAG_/g'
git grep -le "XE_BO_" -- drivers/gpu/drm/xe | xargs sed -i -r \
-e 's/XE_BO_(DEFER_BACKING|SCANOUT|FIXED_PLACEMENT|PAGETABLE|NEEDS_CPU_ACCESS|NEEDS_UC|INTERNAL_TEST|INTERNAL_64K|GGTT_INVALIDATE)/XE_BO_FLAG_\1/g'

And then the defines in drivers/gpu/drm/xe/xe_bo.h are adjusted to
follow the coding style.

Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240322142702.186529-3-lucas.demarchi@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
33 files changed:
drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h
drivers/gpu/drm/xe/display/intel_fb_bo.c
drivers/gpu/drm/xe/display/intel_fbdev_fb.c
drivers/gpu/drm/xe/display/xe_dsb_buffer.c
drivers/gpu/drm/xe/display/xe_fb_pin.c
drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
drivers/gpu/drm/xe/display/xe_plane_initial.c
drivers/gpu/drm/xe/tests/xe_bo.c
drivers/gpu/drm/xe/tests/xe_dma_buf.c
drivers/gpu/drm/xe/tests/xe_migrate.c
drivers/gpu/drm/xe/xe_bo.c
drivers/gpu/drm/xe/xe_bo.h
drivers/gpu/drm/xe/xe_bo_evict.c
drivers/gpu/drm/xe/xe_dma_buf.c
drivers/gpu/drm/xe/xe_ggtt.c
drivers/gpu/drm/xe/xe_gsc.c
drivers/gpu/drm/xe/xe_gsc_proxy.c
drivers/gpu/drm/xe/xe_guc_ads.c
drivers/gpu/drm/xe/xe_guc_ct.c
drivers/gpu/drm/xe/xe_guc_hwconfig.c
drivers/gpu/drm/xe/xe_guc_log.c
drivers/gpu/drm/xe/xe_guc_pc.c
drivers/gpu/drm/xe/xe_huc.c
drivers/gpu/drm/xe/xe_hw_engine.c
drivers/gpu/drm/xe/xe_lmtt.c
drivers/gpu/drm/xe/xe_lrc.c
drivers/gpu/drm/xe/xe_memirq.c
drivers/gpu/drm/xe/xe_migrate.c
drivers/gpu/drm/xe/xe_pt.c
drivers/gpu/drm/xe/xe_sa.c
drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
drivers/gpu/drm/xe/xe_uc_fw.c
drivers/gpu/drm/xe/xe_vm.c

index bd233007c1b74af4752c6be52494f97fea302b13..b4ccc4231e7d00b08378c9cc9649a457b22a62ef 100644 (file)
@@ -17,7 +17,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
 {
        struct xe_bo *bo;
        int err;
-       u32 flags = XE_BO_CREATE_PINNED_BIT | XE_BO_CREATE_STOLEN_BIT;
+       u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN;
 
        if (align)
                size = ALIGN(size, align);
index b21da7b745a5e7cd6b3e34e4fb8d42a45b2b6466..dba327f53ac5447a6043fe90394c1aa1a040c0c7 100644 (file)
@@ -11,7 +11,7 @@
 
 void intel_fb_bo_framebuffer_fini(struct xe_bo *bo)
 {
-       if (bo->flags & XE_BO_CREATE_PINNED_BIT) {
+       if (bo->flags & XE_BO_FLAG_PINNED) {
                /* Unpin our kernel fb first */
                xe_bo_lock(bo, false);
                xe_bo_unpin(bo);
@@ -33,9 +33,9 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
        if (ret)
                return ret;
 
-       if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
+       if (!(bo->flags & XE_BO_FLAG_SCANOUT)) {
                /*
-                * XE_BO_SCANOUT_BIT should ideally be set at creation, or is
+                * XE_BO_FLAG_SCANOUT should ideally be set at creation, or is
                 * automatically set when creating FB. We cannot change caching
                 * mode when the boect is VM_BINDed, so we can only set
                 * coherency with display when unbound.
@@ -44,7 +44,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
                        ttm_bo_unreserve(&bo->ttm);
                        return -EINVAL;
                }
-               bo->flags |= XE_BO_SCANOUT_BIT;
+               bo->flags |= XE_BO_FLAG_SCANOUT;
        }
        ttm_bo_unreserve(&bo->ttm);
 
index 51ae3561fd0de31762343f8662c993550c773e92..9e4bcfdbc7e5908b325bba824a733fde21e80749 100644 (file)
@@ -42,9 +42,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
        if (!IS_DGFX(dev_priv)) {
                obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv),
                                           NULL, size,
-                                          ttm_bo_type_kernel, XE_BO_SCANOUT_BIT |
-                                          XE_BO_CREATE_STOLEN_BIT |
-                                          XE_BO_CREATE_PINNED_BIT);
+                                          ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+                                          XE_BO_FLAG_STOLEN |
+                                          XE_BO_FLAG_PINNED);
                if (!IS_ERR(obj))
                        drm_info(&dev_priv->drm, "Allocated fbdev into stolen\n");
                else
@@ -52,9 +52,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
        }
        if (IS_ERR(obj)) {
                obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), NULL, size,
-                                         ttm_bo_type_kernel, XE_BO_SCANOUT_BIT |
-                                         XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) |
-                                         XE_BO_CREATE_PINNED_BIT);
+                                         ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+                                         XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) |
+                                         XE_BO_FLAG_PINNED);
        }
 
        if (IS_ERR(obj)) {
@@ -81,8 +81,8 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
 {
        struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
 
-       if (!(obj->flags & XE_BO_CREATE_SYSTEM_BIT)) {
-               if (obj->flags & XE_BO_CREATE_STOLEN_BIT)
+       if (!(obj->flags & XE_BO_FLAG_SYSTEM)) {
+               if (obj->flags & XE_BO_FLAG_STOLEN)
                        info->fix.smem_start = xe_ttm_stolen_io_offset(obj, 0);
                else
                        info->fix.smem_start =
index 27c2fb1c002a1d1d21e619329f95d8177cab4d33..44c9fd2143cc62885cde9cb0c2cde45c8222d2c3 100644 (file)
@@ -45,8 +45,8 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d
        obj = xe_bo_create_pin_map(i915, xe_device_get_root_tile(i915),
                                   NULL, PAGE_ALIGN(size),
                                   ttm_bo_type_kernel,
-                                  XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) |
-                                  XE_BO_CREATE_GGTT_BIT);
+                                  XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) |
+                                  XE_BO_FLAG_GGTT);
        if (IS_ERR(obj)) {
                kfree(vma);
                return false;
index 2a50a7eaaa3102f183805b3afaf5a8b71d1a9180..3a584bc3a0a30cc4d4b456637bb8ef19aacefb60 100644 (file)
@@ -99,21 +99,21 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb,
        if (IS_DGFX(xe))
                dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
                                           ttm_bo_type_kernel,
-                                          XE_BO_CREATE_VRAM0_BIT |
-                                          XE_BO_CREATE_GGTT_BIT |
-                                          XE_BO_PAGETABLE);
+                                          XE_BO_FLAG_VRAM0 |
+                                          XE_BO_FLAG_GGTT |
+                                          XE_BO_FLAG_PAGETABLE);
        else
                dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
                                           ttm_bo_type_kernel,
-                                          XE_BO_CREATE_STOLEN_BIT |
-                                          XE_BO_CREATE_GGTT_BIT |
-                                          XE_BO_PAGETABLE);
+                                          XE_BO_FLAG_STOLEN |
+                                          XE_BO_FLAG_GGTT |
+                                          XE_BO_FLAG_PAGETABLE);
        if (IS_ERR(dpt))
                dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
                                           ttm_bo_type_kernel,
-                                          XE_BO_CREATE_SYSTEM_BIT |
-                                          XE_BO_CREATE_GGTT_BIT |
-                                          XE_BO_PAGETABLE);
+                                          XE_BO_FLAG_SYSTEM |
+                                          XE_BO_FLAG_GGTT |
+                                          XE_BO_FLAG_PAGETABLE);
        if (IS_ERR(dpt))
                return PTR_ERR(dpt);
 
@@ -262,7 +262,7 @@ static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb,
 
        if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
            intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
-           !(bo->flags & XE_BO_NEEDS_CPU_ACCESS)) {
+           !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) {
                struct xe_tile *tile = xe_device_get_root_tile(xe);
 
                /*
@@ -355,7 +355,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
        struct i915_vma *vma;
 
        /* We reject creating !SCANOUT fb's, so this is weird.. */
-       drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_SCANOUT_BIT));
+       drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT));
 
        vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt);
        if (IS_ERR(vma))
index 25c73602ef555930978f7b7e7a54b26d65d070c3..ac4b870f73fa8a72e92e0795fa8f796145841f8d 100644 (file)
@@ -73,8 +73,8 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
        xe_device_mem_access_get(xe);
        bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2,
                                  ttm_bo_type_kernel,
-                                 XE_BO_CREATE_SYSTEM_BIT |
-                                 XE_BO_CREATE_GGTT_BIT);
+                                 XE_BO_FLAG_SYSTEM |
+                                 XE_BO_FLAG_GGTT);
 
        if (IS_ERR(bo)) {
                drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
index 866d1dd6eeb4b40c890c132370e5687d799ea7fe..7132cd5d9545b1a0be5d253f45d8c38bb15d86ab 100644 (file)
@@ -62,7 +62,7 @@ initial_plane_bo(struct xe_device *xe,
        if (plane_config->size == 0)
                return NULL;
 
-       flags = XE_BO_CREATE_PINNED_BIT | XE_BO_SCANOUT_BIT | XE_BO_CREATE_GGTT_BIT;
+       flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
 
        base = round_down(plane_config->base, page_size);
        if (IS_DGFX(xe)) {
@@ -79,7 +79,7 @@ initial_plane_bo(struct xe_device *xe,
                }
 
                phys_base = pte & ~(page_size - 1);
-               flags |= XE_BO_CREATE_VRAM0_BIT;
+               flags |= XE_BO_FLAG_VRAM0;
 
                /*
                 * We don't currently expect this to ever be placed in the
@@ -101,7 +101,7 @@ initial_plane_bo(struct xe_device *xe,
                if (!stolen)
                        return NULL;
                phys_base = base;
-               flags |= XE_BO_CREATE_STOLEN_BIT;
+               flags |= XE_BO_FLAG_STOLEN;
 
                /*
                 * If the FB is too big, just don't use it since fbdev is not very
index 0926a1c2eb8696740ce6acc593908bba1b6bba0c..9f3c02826464972ba10ec3511fe9d2dbb64e58e8 100644 (file)
@@ -116,7 +116,7 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
        int ret;
 
        /* TODO: Sanity check */
-       unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile);
+       unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
 
        if (IS_DGFX(xe))
                kunit_info(test, "Testing vram id %u\n", tile->id);
@@ -186,7 +186,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_ccs_migrate_kunit);
 static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test)
 {
        struct xe_bo *bo, *external;
-       unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile);
+       unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
        struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
        struct xe_gt *__gt;
        int err, i, id;
index 2a86dc4eb8af098c0e2e2d7dbcecf2cc36f78afd..d54dd5b430079ddb1a4042e90e673946a8759cac 100644 (file)
@@ -36,14 +36,14 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
        xe_bo_assert_held(imported);
 
        mem_type = XE_PL_VRAM0;
-       if (!(params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
+       if (!(params->mem_mask & XE_BO_FLAG_VRAM0))
                /* No VRAM allowed */
                mem_type = XE_PL_TT;
        else if (params->force_different_devices && !p2p_enabled(params))
                /* No P2P */
                mem_type = XE_PL_TT;
        else if (params->force_different_devices && !is_dynamic(params) &&
-                (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
+                (params->mem_mask & XE_BO_FLAG_SYSTEM))
                /* Pin migrated to TT */
                mem_type = XE_PL_TT;
 
@@ -93,7 +93,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
         * possible, saving a migration step as the transfer is just
         * likely as fast from system memory.
         */
-       if (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)
+       if (params->mem_mask & XE_BO_FLAG_SYSTEM)
                KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
        else
                KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
@@ -115,11 +115,11 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
 
        /* No VRAM on this device? */
        if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
-           (params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
+           (params->mem_mask & XE_BO_FLAG_VRAM0))
                return;
 
        size = PAGE_SIZE;
-       if ((params->mem_mask & XE_BO_CREATE_VRAM0_BIT) &&
+       if ((params->mem_mask & XE_BO_FLAG_VRAM0) &&
            xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
                size = SZ_64K;
 
@@ -148,7 +148,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
                 */
                if (params->force_different_devices &&
                    !p2p_enabled(params) &&
-                   !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
+                   !(params->mem_mask & XE_BO_FLAG_SYSTEM)) {
                        KUNIT_FAIL(test,
                                   "xe_gem_prime_import() succeeded when it shouldn't have\n");
                } else {
@@ -161,7 +161,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
                        /* Pinning in VRAM is not allowed. */
                        if (!is_dynamic(params) &&
                            params->force_different_devices &&
-                           !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
+                           !(params->mem_mask & XE_BO_FLAG_SYSTEM))
                                KUNIT_EXPECT_EQ(test, err, -EINVAL);
                        /* Otherwise only expect interrupts or success. */
                        else if (err && err != -EINTR && err != -ERESTARTSYS)
@@ -180,7 +180,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
                           PTR_ERR(import));
        } else if (!params->force_different_devices ||
                   p2p_enabled(params) ||
-                  (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
+                  (params->mem_mask & XE_BO_FLAG_SYSTEM)) {
                /* Shouldn't fail if we can reuse same bo, use p2p or use system */
                KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
                           PTR_ERR(import));
@@ -203,52 +203,52 @@ static const struct dma_buf_attach_ops nop2p_attach_ops = {
  * gem object.
  */
 static const struct dma_buf_test_params test_params[] = {
-       {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+       {.mem_mask = XE_BO_FLAG_VRAM0,
         .attach_ops = &xe_dma_buf_attach_ops},
-       {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+       {.mem_mask = XE_BO_FLAG_VRAM0,
         .attach_ops = &xe_dma_buf_attach_ops,
         .force_different_devices = true},
 
-       {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+       {.mem_mask = XE_BO_FLAG_VRAM0,
         .attach_ops = &nop2p_attach_ops},
-       {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+       {.mem_mask = XE_BO_FLAG_VRAM0,
         .attach_ops = &nop2p_attach_ops,
         .force_different_devices = true},
 
-       {.mem_mask = XE_BO_CREATE_VRAM0_BIT},
-       {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+       {.mem_mask = XE_BO_FLAG_VRAM0},
+       {.mem_mask = XE_BO_FLAG_VRAM0,
         .force_different_devices = true},
 
-       {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+       {.mem_mask = XE_BO_FLAG_SYSTEM,
         .attach_ops = &xe_dma_buf_attach_ops},
-       {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+       {.mem_mask = XE_BO_FLAG_SYSTEM,
         .attach_ops = &xe_dma_buf_attach_ops,
         .force_different_devices = true},
 
-       {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+       {.mem_mask = XE_BO_FLAG_SYSTEM,
         .attach_ops = &nop2p_attach_ops},
-       {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+       {.mem_mask = XE_BO_FLAG_SYSTEM,
         .attach_ops = &nop2p_attach_ops,
         .force_different_devices = true},
 
-       {.mem_mask = XE_BO_CREATE_SYSTEM_BIT},
-       {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+       {.mem_mask = XE_BO_FLAG_SYSTEM},
+       {.mem_mask = XE_BO_FLAG_SYSTEM,
         .force_different_devices = true},
 
-       {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+       {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
         .attach_ops = &xe_dma_buf_attach_ops},
-       {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+       {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
         .attach_ops = &xe_dma_buf_attach_ops,
         .force_different_devices = true},
 
-       {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+       {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
         .attach_ops = &nop2p_attach_ops},
-       {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+       {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
         .attach_ops = &nop2p_attach_ops,
         .force_different_devices = true},
 
-       {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT},
-       {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+       {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0},
+       {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
         .force_different_devices = true},
 
        {}
index ce531498f57fe62bcdc71f606fa9d29bd55fcc64..1332832e2f97b4c0ffc6a5f06b0457eb202c7659 100644 (file)
@@ -113,7 +113,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
                                                   bo->size,
                                                   ttm_bo_type_kernel,
                                                   region |
-                                                  XE_BO_NEEDS_CPU_ACCESS);
+                                                  XE_BO_FLAG_NEEDS_CPU_ACCESS);
        if (IS_ERR(remote)) {
                KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %li\n",
                           str, PTR_ERR(remote));
@@ -191,7 +191,7 @@ out_unlock:
 static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
                             struct kunit *test)
 {
-       test_copy(m, bo, test, XE_BO_CREATE_SYSTEM_BIT);
+       test_copy(m, bo, test, XE_BO_FLAG_SYSTEM);
 }
 
 static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
@@ -203,9 +203,9 @@ static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
                return;
 
        if (bo->ttm.resource->mem_type == XE_PL_VRAM0)
-               region = XE_BO_CREATE_VRAM1_BIT;
+               region = XE_BO_FLAG_VRAM1;
        else
-               region = XE_BO_CREATE_VRAM0_BIT;
+               region = XE_BO_FLAG_VRAM0;
        test_copy(m, bo, test, region);
 }
 
@@ -281,8 +281,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
 
        big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
                                   ttm_bo_type_kernel,
-                                  XE_BO_CREATE_VRAM_IF_DGFX(tile) |
-                                  XE_BO_CREATE_PINNED_BIT);
+                                  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+                                  XE_BO_FLAG_PINNED);
        if (IS_ERR(big)) {
                KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
                goto vunmap;
@@ -290,8 +290,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
 
        pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
                                  ttm_bo_type_kernel,
-                                 XE_BO_CREATE_VRAM_IF_DGFX(tile) |
-                                 XE_BO_CREATE_PINNED_BIT);
+                                 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+                                 XE_BO_FLAG_PINNED);
        if (IS_ERR(pt)) {
                KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
                           PTR_ERR(pt));
@@ -301,8 +301,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
        tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
                                    2 * SZ_4K,
                                    ttm_bo_type_kernel,
-                                   XE_BO_CREATE_VRAM_IF_DGFX(tile) |
-                                   XE_BO_CREATE_PINNED_BIT);
+                                   XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+                                   XE_BO_FLAG_PINNED);
        if (IS_ERR(tiny)) {
                KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
                           PTR_ERR(pt));
index 883f68a527d87fa9c8f80d199a305644f273c5bc..6166bc7156560c53bbf867ffd1361c4cb9121e26 100644 (file)
@@ -111,7 +111,7 @@ bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
 
 static bool xe_bo_is_user(struct xe_bo *bo)
 {
-       return bo->flags & XE_BO_CREATE_USER_BIT;
+       return bo->flags & XE_BO_FLAG_USER;
 }
 
 static struct xe_migrate *
@@ -137,7 +137,7 @@ static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res)
 static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
                           u32 bo_flags, u32 *c)
 {
-       if (bo_flags & XE_BO_CREATE_SYSTEM_BIT) {
+       if (bo_flags & XE_BO_FLAG_SYSTEM) {
                xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
 
                bo->placements[*c] = (struct ttm_place) {
@@ -164,12 +164,12 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
         * For eviction / restore on suspend / resume objects
         * pinned in VRAM must be contiguous
         */
-       if (bo_flags & (XE_BO_CREATE_PINNED_BIT |
-                       XE_BO_CREATE_GGTT_BIT))
+       if (bo_flags & (XE_BO_FLAG_PINNED |
+                       XE_BO_FLAG_GGTT))
                place.flags |= TTM_PL_FLAG_CONTIGUOUS;
 
        if (io_size < vram->usable_size) {
-               if (bo_flags & XE_BO_NEEDS_CPU_ACCESS) {
+               if (bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) {
                        place.fpfn = 0;
                        place.lpfn = io_size >> PAGE_SHIFT;
                } else {
@@ -183,22 +183,22 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
                         u32 bo_flags, u32 *c)
 {
-       if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
+       if (bo_flags & XE_BO_FLAG_VRAM0)
                add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
-       if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
+       if (bo_flags & XE_BO_FLAG_VRAM1)
                add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
 }
 
 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
                           u32 bo_flags, u32 *c)
 {
-       if (bo_flags & XE_BO_CREATE_STOLEN_BIT) {
+       if (bo_flags & XE_BO_FLAG_STOLEN) {
                xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
 
                bo->placements[*c] = (struct ttm_place) {
                        .mem_type = XE_PL_STOLEN,
-                       .flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
-                                            XE_BO_CREATE_GGTT_BIT) ?
+                       .flags = bo_flags & (XE_BO_FLAG_PINNED |
+                                            XE_BO_FLAG_GGTT) ?
                                TTM_PL_FLAG_CONTIGUOUS : 0,
                };
                *c += 1;
@@ -339,7 +339,7 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
                break;
        }
 
-       WARN_ON((bo->flags & XE_BO_CREATE_USER_BIT) && !bo->cpu_caching);
+       WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching);
 
        /*
         * Display scanout is always non-coherent with the CPU cache.
@@ -347,8 +347,8 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
         * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and
         * require a CPU:WC mapping.
         */
-       if ((!bo->cpu_caching && bo->flags & XE_BO_SCANOUT_BIT) ||
-           (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_PAGETABLE))
+       if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) ||
+           (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_FLAG_PAGETABLE))
                caching = ttm_write_combined;
 
        err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
@@ -1102,7 +1102,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
        struct drm_device *ddev = tbo->base.dev;
        struct xe_device *xe = to_xe_device(ddev);
        struct xe_bo *bo = ttm_to_xe_bo(tbo);
-       bool needs_rpm = bo->flags & XE_BO_CREATE_VRAM_MASK;
+       bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
        vm_fault_t ret;
        int idx;
 
@@ -1215,19 +1215,19 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
                return ERR_PTR(-EINVAL);
        }
 
-       if (flags & (XE_BO_CREATE_VRAM_MASK | XE_BO_CREATE_STOLEN_BIT) &&
-           !(flags & XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT) &&
+       if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
+           !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
            ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
             (flags & XE_BO_NEEDS_64K))) {
                aligned_size = ALIGN(size, SZ_64K);
                if (type != ttm_bo_type_device)
                        size = ALIGN(size, SZ_64K);
-               flags |= XE_BO_INTERNAL_64K;
+               flags |= XE_BO_FLAG_INTERNAL_64K;
                alignment = SZ_64K >> PAGE_SHIFT;
 
        } else {
                aligned_size = ALIGN(size, SZ_4K);
-               flags &= ~XE_BO_INTERNAL_64K;
+               flags &= ~XE_BO_FLAG_INTERNAL_64K;
                alignment = SZ_4K >> PAGE_SHIFT;
        }
 
@@ -1256,11 +1256,11 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
        drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
 
        if (resv) {
-               ctx.allow_res_evict = !(flags & XE_BO_CREATE_NO_RESV_EVICT);
+               ctx.allow_res_evict = !(flags & XE_BO_FLAG_NO_RESV_EVICT);
                ctx.resv = resv;
        }
 
-       if (!(flags & XE_BO_FIXED_PLACEMENT_BIT)) {
+       if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) {
                err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
                if (WARN_ON(err)) {
                        xe_ttm_bo_destroy(&bo->ttm);
@@ -1270,7 +1270,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
 
        /* Defer populating type_sg bos */
        placement = (type == ttm_bo_type_sg ||
-                    bo->flags & XE_BO_DEFER_BACKING) ? &sys_placement :
+                    bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement :
                &bo->placement;
        err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
                                   placement, alignment,
@@ -1325,21 +1325,21 @@ static int __xe_bo_fixed_placement(struct xe_device *xe,
 {
        struct ttm_place *place = bo->placements;
 
-       if (flags & (XE_BO_CREATE_USER_BIT|XE_BO_CREATE_SYSTEM_BIT))
+       if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM))
                return -EINVAL;
 
        place->flags = TTM_PL_FLAG_CONTIGUOUS;
        place->fpfn = start >> PAGE_SHIFT;
        place->lpfn = end >> PAGE_SHIFT;
 
-       switch (flags & (XE_BO_CREATE_STOLEN_BIT | XE_BO_CREATE_VRAM_MASK)) {
-       case XE_BO_CREATE_VRAM0_BIT:
+       switch (flags & (XE_BO_FLAG_STOLEN | XE_BO_FLAG_VRAM_MASK)) {
+       case XE_BO_FLAG_VRAM0:
                place->mem_type = XE_PL_VRAM0;
                break;
-       case XE_BO_CREATE_VRAM1_BIT:
+       case XE_BO_FLAG_VRAM1:
                place->mem_type = XE_PL_VRAM1;
                break;
-       case XE_BO_CREATE_STOLEN_BIT:
+       case XE_BO_FLAG_STOLEN:
                place->mem_type = XE_PL_STOLEN;
                break;
 
@@ -1373,7 +1373,7 @@ __xe_bo_create_locked(struct xe_device *xe,
                if (IS_ERR(bo))
                        return bo;
 
-               flags |= XE_BO_FIXED_PLACEMENT_BIT;
+               flags |= XE_BO_FLAG_FIXED_PLACEMENT;
                err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
                if (err) {
                        xe_bo_free(bo);
@@ -1383,7 +1383,7 @@ __xe_bo_create_locked(struct xe_device *xe,
 
        bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
                                    vm && !xe_vm_in_fault_mode(vm) &&
-                                   flags & XE_BO_CREATE_USER_BIT ?
+                                   flags & XE_BO_FLAG_USER ?
                                    &vm->lru_bulk_move : NULL, size,
                                    cpu_caching, type, flags);
        if (IS_ERR(bo))
@@ -1400,13 +1400,13 @@ __xe_bo_create_locked(struct xe_device *xe,
                xe_vm_get(vm);
        bo->vm = vm;
 
-       if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
-               if (!tile && flags & XE_BO_CREATE_STOLEN_BIT)
+       if (bo->flags & XE_BO_FLAG_GGTT) {
+               if (!tile && flags & XE_BO_FLAG_STOLEN)
                        tile = xe_device_get_root_tile(xe);
 
                xe_assert(xe, tile);
 
-               if (flags & XE_BO_FIXED_PLACEMENT_BIT) {
+               if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
                        err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo,
                                                   start + bo->size, U64_MAX);
                } else {
@@ -1449,7 +1449,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
 {
        struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
                                                 cpu_caching, type,
-                                                flags | XE_BO_CREATE_USER_BIT);
+                                                flags | XE_BO_FLAG_USER);
        if (!IS_ERR(bo))
                xe_bo_unlock_vm_held(bo);
 
@@ -1478,12 +1478,12 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
        u64 start = offset == ~0ull ? 0 : offset;
        u64 end = offset == ~0ull ? offset : start + size;
 
-       if (flags & XE_BO_CREATE_STOLEN_BIT &&
+       if (flags & XE_BO_FLAG_STOLEN &&
            xe_ttm_stolen_cpu_access_needs_ggtt(xe))
-               flags |= XE_BO_CREATE_GGTT_BIT;
+               flags |= XE_BO_FLAG_GGTT;
 
        bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
-                                      flags | XE_BO_NEEDS_CPU_ACCESS);
+                                      flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
        if (IS_ERR(bo))
                return bo;
 
@@ -1580,9 +1580,9 @@ struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_til
 int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src)
 {
        struct xe_bo *bo;
-       u32 dst_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile) | XE_BO_CREATE_GGTT_BIT;
+       u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT;
 
-       dst_flags |= (*src)->flags & XE_BO_GGTT_INVALIDATE;
+       dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE;
 
        xe_assert(xe, IS_DGFX(xe));
        xe_assert(xe, !(*src)->vmap.is_iomem);
@@ -1663,8 +1663,8 @@ int xe_bo_pin(struct xe_bo *bo)
        xe_assert(xe, !xe_bo_is_user(bo));
 
        /* Pinned object must be in GGTT or have pinned flag */
-       xe_assert(xe, bo->flags & (XE_BO_CREATE_PINNED_BIT |
-                                  XE_BO_CREATE_GGTT_BIT));
+       xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED |
+                                  XE_BO_FLAG_GGTT));
 
        /*
         * No reason we can't support pinning imported dma-bufs we just don't
@@ -1685,7 +1685,7 @@ int xe_bo_pin(struct xe_bo *bo)
         * during suspend / resume (force restore to same physical address).
         */
        if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
-           bo->flags & XE_BO_INTERNAL_TEST)) {
+           bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
                struct ttm_place *place = &(bo->placements[0]);
 
                if (mem_type_is_vram(place->mem_type)) {
@@ -1753,7 +1753,7 @@ void xe_bo_unpin(struct xe_bo *bo)
        xe_assert(xe, xe_bo_is_pinned(bo));
 
        if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
-           bo->flags & XE_BO_INTERNAL_TEST)) {
+           bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
                struct ttm_place *place = &(bo->placements[0]);
 
                if (mem_type_is_vram(place->mem_type)) {
@@ -1856,7 +1856,7 @@ int xe_bo_vmap(struct xe_bo *bo)
 
        xe_bo_assert_held(bo);
 
-       if (!(bo->flags & XE_BO_NEEDS_CPU_ACCESS))
+       if (!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS))
                return -EINVAL;
 
        if (!iosys_map_is_null(&bo->vmap))
@@ -1938,29 +1938,29 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
 
        bo_flags = 0;
        if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING)
-               bo_flags |= XE_BO_DEFER_BACKING;
+               bo_flags |= XE_BO_FLAG_DEFER_BACKING;
 
        if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT)
-               bo_flags |= XE_BO_SCANOUT_BIT;
+               bo_flags |= XE_BO_FLAG_SCANOUT;
 
-       bo_flags |= args->placement << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1);
+       bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1);
 
        if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
-               if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_CREATE_VRAM_MASK)))
+               if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK)))
                        return -EINVAL;
 
-               bo_flags |= XE_BO_NEEDS_CPU_ACCESS;
+               bo_flags |= XE_BO_FLAG_NEEDS_CPU_ACCESS;
        }
 
        if (XE_IOCTL_DBG(xe, !args->cpu_caching ||
                         args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_CREATE_VRAM_MASK &&
+       if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK &&
                         args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_SCANOUT_BIT &&
+       if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT &&
                         args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB))
                return -EINVAL;
 
@@ -2209,7 +2209,7 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
         * can't be used since there's no CCS storage associated with
         * non-VRAM addresses.
         */
-       if (IS_DGFX(xe) && (bo->flags & XE_BO_CREATE_SYSTEM_BIT))
+       if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM))
                return false;
 
        return true;
@@ -2278,9 +2278,9 @@ int xe_bo_dumb_create(struct drm_file *file_priv,
        bo = xe_bo_create_user(xe, NULL, NULL, args->size,
                               DRM_XE_GEM_CPU_CACHING_WC,
                               ttm_bo_type_device,
-                              XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
-                              XE_BO_SCANOUT_BIT |
-                              XE_BO_NEEDS_CPU_ACCESS);
+                              XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
+                              XE_BO_FLAG_SCANOUT |
+                              XE_BO_FLAG_NEEDS_CPU_ACCESS);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
index 52e441f77e96a674ca5b8c57313297121d295b6b..bae042b35fa83e2507a01f201ffbaa730db1ce8c 100644 (file)
 
 #define XE_DEFAULT_GTT_SIZE_MB          3072ULL /* 3GB by default */
 
-#define XE_BO_CREATE_USER_BIT          BIT(0)
+#define XE_BO_FLAG_USER                BIT(0)
 /* The bits below need to be contiguous, or things break */
-#define XE_BO_CREATE_SYSTEM_BIT                BIT(1)
-#define XE_BO_CREATE_VRAM0_BIT         BIT(2)
-#define XE_BO_CREATE_VRAM1_BIT         BIT(3)
-#define XE_BO_CREATE_VRAM_MASK         (XE_BO_CREATE_VRAM0_BIT | \
-                                        XE_BO_CREATE_VRAM1_BIT)
+#define XE_BO_FLAG_SYSTEM              BIT(1)
+#define XE_BO_FLAG_VRAM0               BIT(2)
+#define XE_BO_FLAG_VRAM1               BIT(3)
+#define XE_BO_FLAG_VRAM_MASK           (XE_BO_FLAG_VRAM0 | XE_BO_FLAG_VRAM1)
 /* -- */
-#define XE_BO_CREATE_STOLEN_BIT                BIT(4)
-#define XE_BO_CREATE_VRAM_IF_DGFX(tile) \
-       (IS_DGFX(tile_to_xe(tile)) ? XE_BO_CREATE_VRAM0_BIT << (tile)->id : \
-        XE_BO_CREATE_SYSTEM_BIT)
-#define XE_BO_CREATE_GGTT_BIT          BIT(5)
-#define XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT BIT(6)
-#define XE_BO_CREATE_PINNED_BIT                BIT(7)
-#define XE_BO_CREATE_NO_RESV_EVICT     BIT(8)
-#define XE_BO_DEFER_BACKING            BIT(9)
-#define XE_BO_SCANOUT_BIT              BIT(10)
-#define XE_BO_FIXED_PLACEMENT_BIT      BIT(11)
-#define XE_BO_PAGETABLE                        BIT(12)
-#define XE_BO_NEEDS_CPU_ACCESS         BIT(13)
-#define XE_BO_NEEDS_UC                 BIT(14)
+#define XE_BO_FLAG_STOLEN              BIT(4)
+#define XE_BO_FLAG_VRAM_IF_DGFX(tile)  (IS_DGFX(tile_to_xe(tile)) ? \
+                                        XE_BO_FLAG_VRAM0 << (tile)->id : \
+                                        XE_BO_FLAG_SYSTEM)
+#define XE_BO_FLAG_GGTT                        BIT(5)
+#define XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE BIT(6)
+#define XE_BO_FLAG_PINNED              BIT(7)
+#define XE_BO_FLAG_NO_RESV_EVICT       BIT(8)
+#define XE_BO_FLAG_DEFER_BACKING       BIT(9)
+#define XE_BO_FLAG_SCANOUT             BIT(10)
+#define XE_BO_FLAG_FIXED_PLACEMENT     BIT(11)
+#define XE_BO_FLAG_PAGETABLE           BIT(12)
+#define XE_BO_FLAG_NEEDS_CPU_ACCESS    BIT(13)
+#define XE_BO_FLAG_NEEDS_UC            BIT(14)
 #define XE_BO_NEEDS_64K                        BIT(15)
-#define XE_BO_GGTT_INVALIDATE          BIT(16)
+#define XE_BO_FLAG_GGTT_INVALIDATE     BIT(16)
 /* this one is trigger internally only */
-#define XE_BO_INTERNAL_TEST            BIT(30)
-#define XE_BO_INTERNAL_64K             BIT(31)
+#define XE_BO_FLAG_INTERNAL_TEST       BIT(30)
+#define XE_BO_FLAG_INTERNAL_64K                BIT(31)
 
 #define XELPG_PPGTT_PTE_PAT3           BIT_ULL(62)
 #define XE2_PPGTT_PTE_PAT4             BIT_ULL(61)
index 630695088b967a33967fc6ada9b723a64636239a..541b49007d738a8357b61e7579bf0f8ac33ed61e 100644 (file)
@@ -146,7 +146,7 @@ int xe_bo_restore_kernel(struct xe_device *xe)
                        return ret;
                }
 
-               if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
+               if (bo->flags & XE_BO_FLAG_GGTT) {
                        struct xe_tile *tile = bo->tile;
 
                        mutex_lock(&tile->mem.ggtt->lock);
index 5b26af21e029fab26be8316cbc3d1a21bfdc8e9e..68f309f5e9815346009f1869086d3d2faff50fda 100644 (file)
@@ -217,7 +217,7 @@ xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
        dma_resv_lock(resv, NULL);
        bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
                                    0, /* Will require 1way or 2way for vm_bind */
-                                   ttm_bo_type_sg, XE_BO_CREATE_SYSTEM_BIT);
+                                   ttm_bo_type_sg, XE_BO_FLAG_SYSTEM);
        if (IS_ERR(bo)) {
                ret = PTR_ERR(bo);
                goto error;
index f659af221bd82b3b192f3292dc7650cfc5651a88..ff2239c0eda53a9212f63734f992d01dd6857a88 100644 (file)
@@ -224,11 +224,11 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
         * scratch entires, rather keep the scratch page in system memory on
         * platforms where 64K pages are needed for VRAM.
         */
-       flags = XE_BO_CREATE_PINNED_BIT;
+       flags = XE_BO_FLAG_PINNED;
        if (ggtt->flags & XE_GGTT_FLAGS_64K)
-               flags |= XE_BO_CREATE_SYSTEM_BIT;
+               flags |= XE_BO_FLAG_SYSTEM;
        else
-               flags |= XE_BO_CREATE_VRAM_IF_DGFX(ggtt->tile);
+               flags |= XE_BO_FLAG_VRAM_IF_DGFX(ggtt->tile);
 
        ggtt->scratch = xe_managed_bo_create_pin_map(xe, ggtt->tile, XE_PAGE_SIZE, flags);
        if (IS_ERR(ggtt->scratch)) {
@@ -375,7 +375,7 @@ int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
 
 void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
 {
-       u16 cache_mode = bo->flags & XE_BO_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
+       u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
        u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
        u64 start = bo->ggtt_node.start;
        u64 offset, pte;
@@ -413,7 +413,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
                xe_ggtt_map_bo(ggtt, bo);
        mutex_unlock(&ggtt->lock);
 
-       if (!err && bo->flags & XE_BO_GGTT_INVALIDATE)
+       if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
                xe_ggtt_invalidate(ggtt);
        xe_device_mem_access_put(tile_to_xe(ggtt->tile));
 
@@ -457,7 +457,7 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
        xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
 
        xe_ggtt_remove_node(ggtt, &bo->ggtt_node,
-                           bo->flags & XE_BO_GGTT_INVALIDATE);
+                           bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
 }
 
 int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
index 92dc442a5114f6cd084ab5a7a9a8a8a22387ed4c..60202b903687789d3054a0fd40a2adc9a3d125d1 100644 (file)
@@ -130,8 +130,8 @@ static int query_compatibility_version(struct xe_gsc *gsc)
 
        bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2,
                                  ttm_bo_type_kernel,
-                                 XE_BO_CREATE_SYSTEM_BIT |
-                                 XE_BO_CREATE_GGTT_BIT);
+                                 XE_BO_FLAG_SYSTEM |
+                                 XE_BO_FLAG_GGTT);
        if (IS_ERR(bo)) {
                xe_gt_err(gt, "failed to allocate bo for GSC version query\n");
                return PTR_ERR(bo);
@@ -468,8 +468,8 @@ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
 
        bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4M,
                                  ttm_bo_type_kernel,
-                                 XE_BO_CREATE_STOLEN_BIT |
-                                 XE_BO_CREATE_GGTT_BIT);
+                                 XE_BO_FLAG_STOLEN |
+                                 XE_BO_FLAG_GGTT);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
index 1ced6b4d494659c84d87c9949ef7f66480498077..35e397b68dfcd8a9aa887240b41deb7b1a300490 100644 (file)
@@ -411,8 +411,8 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
 
        bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_PROXY_CHANNEL_SIZE,
                                  ttm_bo_type_kernel,
-                                 XE_BO_CREATE_SYSTEM_BIT |
-                                 XE_BO_CREATE_GGTT_BIT);
+                                 XE_BO_FLAG_SYSTEM |
+                                 XE_BO_FLAG_GGTT);
        if (IS_ERR(bo)) {
                kfree(csme);
                return PTR_ERR(bo);
index df2bffb7e22047bbfdbe085a9e7436d9c86acd08..e025f3e10c9bfd1b0de65b91228a17ec1f12e87a 100644 (file)
@@ -273,9 +273,9 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
        ads->regset_size = calculate_regset_size(gt);
 
        bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
-                                         XE_BO_CREATE_SYSTEM_BIT |
-                                         XE_BO_CREATE_GGTT_BIT |
-                                         XE_BO_GGTT_INVALIDATE);
+                                         XE_BO_FLAG_SYSTEM |
+                                         XE_BO_FLAG_GGTT |
+                                         XE_BO_FLAG_GGTT_INVALIDATE);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
index f4890e9a1e93357ce2a407698c5e735322dd74d4..6c37f4f9bddd45769766645cfb5a6f3cd669a521 100644 (file)
@@ -159,9 +159,9 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
        primelockdep(ct);
 
        bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
-                                         XE_BO_CREATE_SYSTEM_BIT |
-                                         XE_BO_CREATE_GGTT_BIT |
-                                         XE_BO_GGTT_INVALIDATE);
+                                         XE_BO_FLAG_SYSTEM |
+                                         XE_BO_FLAG_GGTT |
+                                         XE_BO_FLAG_GGTT_INVALIDATE);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
index f035ad59f68e111f5625d4351d080efc11e0b06b..d9b570a154a261e5b257f9cb3dd114b3c85cbcb5 100644 (file)
@@ -78,9 +78,9 @@ int xe_guc_hwconfig_init(struct xe_guc *guc)
                return -EINVAL;
 
        bo = xe_managed_bo_create_pin_map(xe, tile, PAGE_ALIGN(size),
-                                         XE_BO_CREATE_SYSTEM_BIT |
-                                         XE_BO_CREATE_GGTT_BIT |
-                                         XE_BO_GGTT_INVALIDATE);
+                                         XE_BO_FLAG_SYSTEM |
+                                         XE_BO_FLAG_GGTT |
+                                         XE_BO_FLAG_GGTT_INVALIDATE);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
        guc->hwconfig.bo = bo;
index 9302a7faaf0bf7cc022b5f3012aa4a3cbd677f9d..a37ee341942844aeaf8dcb695d092e1af597f79c 100644 (file)
@@ -84,9 +84,9 @@ int xe_guc_log_init(struct xe_guc_log *log)
        struct xe_bo *bo;
 
        bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(),
-                                         XE_BO_CREATE_SYSTEM_BIT |
-                                         XE_BO_CREATE_GGTT_BIT |
-                                         XE_BO_GGTT_INVALIDATE);
+                                         XE_BO_FLAG_SYSTEM |
+                                         XE_BO_FLAG_GGTT |
+                                         XE_BO_FLAG_GGTT_INVALIDATE);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
index 9c110537d135c4972c2abfdbfa66ec056d19a2b4..521ae24f23148d5cee18748d3eaf5e370ae39ee4 100644 (file)
@@ -929,9 +929,9 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
                return err;
 
        bo = xe_managed_bo_create_pin_map(xe, tile, size,
-                                         XE_BO_CREATE_VRAM_IF_DGFX(tile) |
-                                         XE_BO_CREATE_GGTT_BIT |
-                                         XE_BO_GGTT_INVALIDATE);
+                                         XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+                                         XE_BO_FLAG_GGTT |
+                                         XE_BO_FLAG_GGTT_INVALIDATE);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
index b545f850087cd8b9a7ae0031a2feb1fadba9458b..78318d73e4cf861c4e3e57a6fee79073cdbfb20b 100644 (file)
@@ -59,8 +59,8 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
        bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
                                  PXP43_HUC_AUTH_INOUT_SIZE * 2,
                                  ttm_bo_type_kernel,
-                                 XE_BO_CREATE_SYSTEM_BIT |
-                                 XE_BO_CREATE_GGTT_BIT);
+                                 XE_BO_FLAG_SYSTEM |
+                                 XE_BO_FLAG_GGTT);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
index b94924a4f31902c66dda56c8395689e7d9e83e47..a688bb2d96ce3a272e68901467e037f6ce5b8702 100644 (file)
@@ -518,9 +518,9 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
        xe_reg_sr_apply_whitelist(hwe);
 
        hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
-                                                XE_BO_CREATE_VRAM_IF_DGFX(tile) |
-                                                XE_BO_CREATE_GGTT_BIT |
-                                                XE_BO_GGTT_INVALIDATE);
+                                                XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+                                                XE_BO_FLAG_GGTT |
+                                                XE_BO_FLAG_GGTT_INVALIDATE);
        if (IS_ERR(hwe->hwsp)) {
                err = PTR_ERR(hwe->hwsp);
                goto err_name;
index 7f504392a8bf2735a7f7b437b655a4598c43c7a4..418661a8891839962bd0e90ccfd63c6dd533e7d7 100644 (file)
@@ -70,8 +70,8 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
                                  PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
                                             lmtt->ops->lmtt_pte_num(level)),
                                  ttm_bo_type_kernel,
-                                 XE_BO_CREATE_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
-                                 XE_BO_NEEDS_64K | XE_BO_CREATE_PINNED_BIT);
+                                 XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
+                                 XE_BO_NEEDS_64K | XE_BO_FLAG_PINNED);
        if (IS_ERR(bo)) {
                err = PTR_ERR(bo);
                goto out_free_pt;
index 2ba111b89a4727a9492756f029cf0da2d65bb01a..552ebf6eeee7c67a5872831d5ceb2ce1d4696ce7 100644 (file)
@@ -743,9 +743,9 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
        lrc->bo = xe_bo_create_pin_map(xe, tile, vm,
                                      ring_size + xe_lrc_size(xe, hwe->class),
                                      ttm_bo_type_kernel,
-                                     XE_BO_CREATE_VRAM_IF_DGFX(tile) |
-                                     XE_BO_CREATE_GGTT_BIT |
-                                     XE_BO_GGTT_INVALIDATE);
+                                     XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+                                     XE_BO_FLAG_GGTT |
+                                     XE_BO_FLAG_GGTT_INVALIDATE);
        if (IS_ERR(lrc->bo))
                return PTR_ERR(lrc->bo);
 
index 0eb28681bec7d0396651009681b2494329d1dffe..95b6e9d7b7dbd1fb5811497eeec09e85ca3c9f42 100644 (file)
@@ -127,11 +127,11 @@ static int memirq_alloc_pages(struct xe_memirq *memirq)
        /* XXX: convert to managed bo */
        bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
                                  ttm_bo_type_kernel,
-                                 XE_BO_CREATE_SYSTEM_BIT |
-                                 XE_BO_CREATE_GGTT_BIT |
-                                 XE_BO_GGTT_INVALIDATE |
-                                 XE_BO_NEEDS_UC |
-                                 XE_BO_NEEDS_CPU_ACCESS);
+                                 XE_BO_FLAG_SYSTEM |
+                                 XE_BO_FLAG_GGTT |
+                                 XE_BO_FLAG_GGTT_INVALIDATE |
+                                 XE_BO_FLAG_NEEDS_UC |
+                                 XE_BO_FLAG_NEEDS_CPU_ACCESS);
        if (IS_ERR(bo)) {
                err = PTR_ERR(bo);
                goto out;
index ee1bb938c493487415445cd41c8b771080464522..5e0f48c51b7250e71a86d3bf752c845d5873da3a 100644 (file)
@@ -155,8 +155,8 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
        bo = xe_bo_create_pin_map(vm->xe, tile, vm,
                                  num_entries * XE_PAGE_SIZE,
                                  ttm_bo_type_kernel,
-                                 XE_BO_CREATE_VRAM_IF_DGFX(tile) |
-                                 XE_BO_CREATE_PINNED_BIT);
+                                 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+                                 XE_BO_FLAG_PINNED);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
index 580fe869b414f4278677e23d263a0ff99ba5dbda..271f13eeb85233ff1674fb2adf2543d6fc5fb6fc 100644 (file)
@@ -108,11 +108,11 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
        pt->level = level;
        bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
                                  ttm_bo_type_kernel,
-                                 XE_BO_CREATE_VRAM_IF_DGFX(tile) |
-                                 XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT |
-                                 XE_BO_CREATE_PINNED_BIT |
-                                 XE_BO_CREATE_NO_RESV_EVICT |
-                                 XE_BO_PAGETABLE);
+                                 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+                                 XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
+                                 XE_BO_FLAG_PINNED |
+                                 XE_BO_FLAG_NO_RESV_EVICT |
+                                 XE_BO_FLAG_PAGETABLE);
        if (IS_ERR(bo)) {
                err = PTR_ERR(bo);
                goto err_kfree;
index 164202ac64544e8f17e7c6eb74c4baee9787bc89..8941522b7705d7ddd9f996f064ff709744280e92 100644 (file)
@@ -48,9 +48,9 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
        sa_manager->bo = NULL;
 
        bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel,
-                                 XE_BO_CREATE_VRAM_IF_DGFX(tile) |
-                                 XE_BO_CREATE_GGTT_BIT |
-                                 XE_BO_GGTT_INVALIDATE);
+                                 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+                                 XE_BO_FLAG_GGTT |
+                                 XE_BO_FLAG_GGTT_INVALIDATE);
        if (IS_ERR(bo)) {
                drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
                        PTR_ERR(bo));
index fb35e46d68b4982de2483dbf6b86bcb6698d2972..6ffecf9f23d1872e41fb844f8d6f9ceec0c2af25 100644 (file)
@@ -303,7 +303,7 @@ static int __xe_ttm_stolen_io_mem_reserve_stolen(struct xe_device *xe,
        XE_WARN_ON(IS_DGFX(xe));
 
        /* XXX: Require BO to be mapped to GGTT? */
-       if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_CREATE_GGTT_BIT)))
+       if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_GGTT)))
                return -EIO;
 
        /* GGTT is always contiguously mapped */
index 3554f66872b9de6dccd095d9f590bbd95ddda279..ec62296aec33881849a12fc495a9670c9229769b 100644 (file)
@@ -763,8 +763,8 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw)
                return 0;
 
        err = uc_fw_copy(uc_fw, fw->data, fw->size,
-                        XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_GGTT_BIT |
-                        XE_BO_GGTT_INVALIDATE);
+                        XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT |
+                        XE_BO_FLAG_GGTT_INVALIDATE);
 
        uc_fw_release(fw);
 
index 8b32aa5003dff766e902cd44ce61be245c6e03b4..f4bfb27059569fce44427cb38ae7cb11e15e3492 100644 (file)
@@ -3069,7 +3069,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                        goto put_obj;
                }
 
-               if (bos[i]->flags & XE_BO_INTERNAL_64K) {
+               if (bos[i]->flags & XE_BO_FLAG_INTERNAL_64K) {
                        if (XE_IOCTL_DBG(xe, obj_offset &
                                         XE_64K_PAGE_MASK) ||
                            XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||