{
u64 addr = batch_base_ofs + drm_suballoc_soffset(bb->bo);
- XE_BUG_ON(!(wa_eng->vm->flags & XE_VM_FLAG_MIGRATION));
+ XE_WARN_ON(!(wa_eng->vm->flags & XE_VM_FLAG_MIGRATION));
return __xe_bb_create_job(wa_eng, bb, &addr);
}
4 * second_idx,
};
- BUG_ON(second_idx > bb->len);
- BUG_ON(!(kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION));
+ XE_WARN_ON(second_idx > bb->len);
+ XE_WARN_ON(!(kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION));
return __xe_bb_create_job(kernel_eng, bb, addr);
}
{
u64 addr = xe_sa_bo_gpu_addr(bb->bo);
- BUG_ON(kernel_eng->vm && kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION);
+ XE_WARN_ON(kernel_eng->vm && kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION);
return __xe_bb_create_job(kernel_eng, bb, &addr);
}
static struct xe_tile *
mem_type_to_tile(struct xe_device *xe, u32 mem_type)
{
- XE_BUG_ON(mem_type != XE_PL_STOLEN && !mem_type_is_vram(mem_type));
+ XE_WARN_ON(mem_type != XE_PL_STOLEN && !mem_type_is_vram(mem_type));
return &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
}
struct ttm_place place = { .mem_type = mem_type };
u64 io_size = tile->mem.vram.io_size;
- XE_BUG_ON(!tile->mem.vram.usable_size);
+ XE_WARN_ON(!tile->mem.vram.usable_size);
/*
* For eviction / restore on suspend / resume objects
unsigned long num_pages = tt->num_pages;
int ret;
- XE_BUG_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
+ XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
if (xe_tt->sg)
return 0;
ttm);
struct sg_table *sg;
- XE_BUG_ON(!attach);
- XE_BUG_ON(!ttm_bo->ttm);
+ XE_WARN_ON(!attach);
+ XE_WARN_ON(!ttm_bo->ttm);
if (new_res->mem_type == XE_PL_SYSTEM)
goto out;
else if (mem_type_is_vram(old_mem_type))
tile = mem_type_to_tile(xe, old_mem_type);
- XE_BUG_ON(!tile);
- XE_BUG_ON(!tile->migrate);
+ XE_WARN_ON(!tile);
+ XE_WARN_ON(!tile->migrate);
trace_xe_bo_move(bo);
xe_device_mem_access_get(xe);
goto out;
}
- XE_BUG_ON(new_mem->start !=
+ XE_WARN_ON(new_mem->start !=
bo->placements->fpfn);
iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
int err;
/* Only kernel objects should set GT */
- XE_BUG_ON(tile && type != ttm_bo_type_kernel);
+ XE_WARN_ON(tile && type != ttm_bo_type_kernel);
if (XE_WARN_ON(!size))
return ERR_PTR(-EINVAL);
if (!tile && flags & XE_BO_CREATE_STOLEN_BIT)
tile = xe_device_get_root_tile(xe);
- XE_BUG_ON(!tile);
+ XE_WARN_ON(!tile);
if (flags & XE_BO_CREATE_STOLEN_BIT &&
flags & XE_BO_FIXED_PLACEMENT_BIT) {
struct xe_device *xe = xe_bo_device(bo);
int err;
- XE_BUG_ON(bo->vm);
- XE_BUG_ON(!xe_bo_is_user(bo));
+ XE_WARN_ON(bo->vm);
+ XE_WARN_ON(!xe_bo_is_user(bo));
if (!xe_bo_is_pinned(bo)) {
err = xe_bo_validate(bo, NULL, false);
int err;
/* We currently don't expect user BO to be pinned */
- XE_BUG_ON(xe_bo_is_user(bo));
+ XE_WARN_ON(xe_bo_is_user(bo));
/* Pinned object must be in GGTT or have pinned flag */
- XE_BUG_ON(!(bo->flags & (XE_BO_CREATE_PINNED_BIT |
+ XE_WARN_ON(!(bo->flags & (XE_BO_CREATE_PINNED_BIT |
XE_BO_CREATE_GGTT_BIT)));
/*
* No reason we can't support pinning imported dma-bufs we just don't
* expect to pin an imported dma-buf.
*/
- XE_BUG_ON(bo->ttm.base.import_attach);
+ XE_WARN_ON(bo->ttm.base.import_attach);
/* We only expect at most 1 pin */
- XE_BUG_ON(xe_bo_is_pinned(bo));
+ XE_WARN_ON(xe_bo_is_pinned(bo));
err = xe_bo_validate(bo, NULL, false);
if (err)
struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) {
- XE_BUG_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS));
+ XE_WARN_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS));
place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
{
struct xe_device *xe = xe_bo_device(bo);
- XE_BUG_ON(bo->vm);
- XE_BUG_ON(!xe_bo_is_pinned(bo));
- XE_BUG_ON(!xe_bo_is_user(bo));
+ XE_WARN_ON(bo->vm);
+ XE_WARN_ON(!xe_bo_is_pinned(bo));
+ XE_WARN_ON(!xe_bo_is_user(bo));
if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) {
spin_lock(&xe->pinned.lock);
{
struct xe_device *xe = xe_bo_device(bo);
- XE_BUG_ON(bo->ttm.base.import_attach);
- XE_BUG_ON(!xe_bo_is_pinned(bo));
+ XE_WARN_ON(bo->ttm.base.import_attach);
+ XE_WARN_ON(!xe_bo_is_pinned(bo));
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
bo->flags & XE_BO_INTERNAL_TEST)) {
struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) {
- XE_BUG_ON(list_empty(&bo->pinned_link));
+ XE_WARN_ON(list_empty(&bo->pinned_link));
spin_lock(&xe->pinned.lock);
list_del_init(&bo->pinned_link);
struct xe_res_cursor cur;
u64 page;
- XE_BUG_ON(page_size > PAGE_SIZE);
+ XE_WARN_ON(page_size > PAGE_SIZE);
page = offset >> PAGE_SHIFT;
offset &= (PAGE_SIZE - 1);
if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
- XE_BUG_ON(!bo->ttm.ttm);
+ XE_WARN_ON(!bo->ttm.ttm);
xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT,
page_size, &cur);
LIST_HEAD(objs);
LIST_HEAD(dups);
- XE_BUG_ON(!ww);
+ XE_WARN_ON(!ww);
tv_bo.num_shared = num_resv;
tv_bo.bo = &bo->ttm;
static inline void xe_bo_unlock_vm_held(struct xe_bo *bo)
{
if (bo) {
- XE_BUG_ON(bo->vm && bo->ttm.base.resv != xe_vm_resv(bo->vm));
+ XE_WARN_ON(bo->vm && bo->ttm.base.resv != xe_vm_resv(bo->vm));
if (bo->vm)
xe_vm_assert_held(bo->vm);
else
struct ww_acquire_ctx *ctx)
{
if (bo) {
- XE_BUG_ON(bo->vm || (bo->ttm.type != ttm_bo_type_sg &&
- bo->ttm.base.resv != &bo->ttm.base._resv));
+ XE_WARN_ON(bo->vm || (bo->ttm.type != ttm_bo_type_sg &&
+ bo->ttm.base.resv != &bo->ttm.base._resv));
dma_resv_lock(bo->ttm.base.resv, ctx);
}
}
static inline void xe_bo_unlock_no_vm(struct xe_bo *bo)
{
if (bo) {
- XE_BUG_ON(bo->vm || (bo->ttm.type != ttm_bo_type_sg &&
- bo->ttm.base.resv != &bo->ttm.base._resv));
+ XE_WARN_ON(bo->vm || (bo->ttm.type != ttm_bo_type_sg &&
+ bo->ttm.base.resv != &bo->ttm.base._resv));
dma_resv_unlock(bo->ttm.base.resv);
}
}
static inline u32
xe_bo_ggtt_addr(struct xe_bo *bo)
{
- XE_BUG_ON(bo->ggtt_node.size > bo->size);
- XE_BUG_ON(bo->ggtt_node.start + bo->ggtt_node.size > (1ull << 32));
+ XE_WARN_ON(bo->ggtt_node.size > bo->size);
+ XE_WARN_ON(bo->ggtt_node.start + bo->ggtt_node.size > (1ull << 32));
return bo->ggtt_node.start;
}
* We expect validate to trigger a move VRAM and our move code
* should setup the iosys map.
*/
- XE_BUG_ON(iosys_map_is_null(&bo->vmap));
- XE_BUG_ON(!xe_bo_is_vram(bo));
+ XE_WARN_ON(iosys_map_is_null(&bo->vmap));
+ XE_WARN_ON(!xe_bo_is_vram(bo));
xe_bo_put(bo);
lrc_desc = xe_lrc_descriptor(lrc);
if (GRAPHICS_VERx100(xe) >= 1250) {
- XE_BUG_ON(!FIELD_FIT(XEHP_SW_CTX_ID, ctx_id));
+ XE_WARN_ON(!FIELD_FIT(XEHP_SW_CTX_ID, ctx_id));
lrc_desc |= FIELD_PREP(XEHP_SW_CTX_ID, ctx_id);
} else {
- XE_BUG_ON(!FIELD_FIT(GEN11_SW_CTX_ID, ctx_id));
+ XE_WARN_ON(!FIELD_FIT(GEN11_SW_CTX_ID, ctx_id));
lrc_desc |= FIELD_PREP(GEN11_SW_CTX_ID, ctx_id);
}
struct xe_execlist_port *port = exl->port;
enum xe_engine_priority priority = exl->active_priority;
- XE_BUG_ON(priority == XE_ENGINE_PRIORITY_UNSET);
- XE_BUG_ON(priority < 0);
- XE_BUG_ON(priority >= ARRAY_SIZE(exl->port->active));
+ XE_WARN_ON(priority == XE_ENGINE_PRIORITY_UNSET);
+ XE_WARN_ON(priority < 0);
+ XE_WARN_ON(priority >= ARRAY_SIZE(exl->port->active));
spin_lock_irq(&port->lock);
struct xe_device *xe = gt_to_xe(e->gt);
int err;
- XE_BUG_ON(xe_device_guc_submission_enabled(xe));
+ XE_WARN_ON(xe_device_guc_submission_enabled(xe));
drm_info(&xe->drm, "Enabling execlist submission (GuC submission disabled)\n");
struct xe_execlist_engine *exl = e->execlist;
unsigned long flags;
- XE_BUG_ON(xe_device_guc_submission_enabled(gt_to_xe(e->gt)));
+ XE_WARN_ON(xe_device_guc_submission_enabled(gt_to_xe(e->gt)));
spin_lock_irqsave(&exl->port->lock, flags);
if (WARN_ON(exl->active_priority != XE_ENGINE_PRIORITY_UNSET))
mutex_init(&fw->lock);
/* Assuming gen11+ so assert this assumption is correct */
- XE_BUG_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11);
+ XE_WARN_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11);
if (xe->info.graphics_verx100 >= 1270) {
domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
int i, j;
/* Assuming gen11+ so assert this assumption is correct */
- XE_BUG_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11);
+ XE_WARN_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11);
if (!xe_gt_is_media_type(gt))
domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER],
xe_force_wake_ref(struct xe_force_wake *fw,
enum xe_force_wake_domains domain)
{
- XE_BUG_ON(!domain);
+ XE_WARN_ON(!domain);
return fw->domains[ffs(domain) - 1].ref;
}
xe_force_wake_assert_held(struct xe_force_wake *fw,
enum xe_force_wake_domains domain)
{
- XE_BUG_ON(!(fw->awake_domains & domain));
+ XE_WARN_ON(!(fw->awake_domains & domain));
}
#endif
void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte)
{
- XE_BUG_ON(addr & XE_PTE_MASK);
- XE_BUG_ON(addr >= ggtt->size);
+ XE_WARN_ON(addr & XE_PTE_MASK);
+ XE_WARN_ON(addr >= ggtt->size);
writeq(pte, &ggtt->gsm[addr >> XE_PTE_SHIFT]);
}
u64 end = start + size - 1;
u64 scratch_pte;
- XE_BUG_ON(start >= end);
+ XE_WARN_ON(start >= end);
if (ggtt->scratch)
scratch_pte = xe_ggtt_pte_encode(ggtt->scratch, 0);
for (addr = 0; addr < ggtt->size; addr += XE_PAGE_SIZE) {
unsigned int i = addr / XE_PAGE_SIZE;
- XE_BUG_ON(addr > U32_MAX);
+ XE_WARN_ON(addr > U32_MAX);
if (ggtt->gsm[i] == scratch_pte)
continue;
if (XE_WARN_ON(bo->ggtt_node.size)) {
/* Someone's already inserted this BO in the GGTT */
- XE_BUG_ON(bo->ggtt_node.size != bo->size);
+ XE_WARN_ON(bo->ggtt_node.size != bo->size);
return 0;
}
return;
/* This BO is not currently in the GGTT */
- XE_BUG_ON(bo->ggtt_node.size != bo->size);
+ XE_WARN_ON(bo->ggtt_node.size != bo->size);
xe_ggtt_remove_node(ggtt, &bo->ggtt_node);
}
case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
return f25_mhz;
default:
- XE_BUG_ON("NOT_POSSIBLE");
+ XE_WARN_ON("NOT_POSSIBLE");
return 0;
}
}
u32 freq = 0;
/* Assuming gen11+ so assert this assumption is correct */
- XE_BUG_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11);
+ XE_WARN_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11);
if (ctc_reg & CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(gt);
char name[8];
int i;
- XE_BUG_ON(!minor->debugfs_root);
+ XE_WARN_ON(!minor->debugfs_root);
sprintf(name, "gt%d", gt->info.id);
root = debugfs_create_dir(name, minor->debugfs_root);
u32 action[MAX_TLB_INVALIDATION_LEN];
int len = 0;
- XE_BUG_ON(!vma);
+ XE_WARN_ON(!vma);
action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
start = ALIGN_DOWN(xe_vma_start(vma), length);
}
- XE_BUG_ON(length < SZ_4K);
- XE_BUG_ON(!is_power_of_2(length));
- XE_BUG_ON(length & GENMASK(ilog2(SZ_16M) - 1, ilog2(SZ_2M) + 1));
- XE_BUG_ON(!IS_ALIGNED(start, length));
+ XE_WARN_ON(length < SZ_4K);
+ XE_WARN_ON(!is_power_of_2(length));
+ XE_WARN_ON(length & GENMASK(ilog2(SZ_16M) - 1, ilog2(SZ_2M) + 1));
+ XE_WARN_ON(!IS_ALIGNED(start, length));
action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
action[len++] = xe_vma_vm(vma)->usm.asid;
action[len++] = ilog2(length) - ilog2(SZ_4K);
}
- XE_BUG_ON(len > MAX_TLB_INVALIDATION_LEN);
+ XE_WARN_ON(len > MAX_TLB_INVALIDATION_LEN);
return send_tlb_invalidation(>->uc.guc, fence, action, len);
}
{
u32 addr = xe_bo_ggtt_addr(bo);
- XE_BUG_ON(addr < xe_wopcm_size(guc_to_xe(guc)));
- XE_BUG_ON(addr >= GUC_GGTT_TOP);
- XE_BUG_ON(bo->size > GUC_GGTT_TOP - addr);
+ XE_WARN_ON(addr < xe_wopcm_size(guc_to_xe(guc)));
+ XE_WARN_ON(addr >= GUC_GGTT_TOP);
+ XE_WARN_ON(bo->size > GUC_GGTT_TOP - addr);
return addr;
}
BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
- XE_BUG_ON(guc->ct.enabled);
- XE_BUG_ON(!len);
- XE_BUG_ON(len > VF_SW_FLAG_COUNT);
- XE_BUG_ON(len > MED_VF_SW_FLAG_COUNT);
- XE_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) !=
+ XE_WARN_ON(guc->ct.enabled);
+ XE_WARN_ON(!len);
+ XE_WARN_ON(len > VF_SW_FLAG_COUNT);
+ XE_WARN_ON(len > MED_VF_SW_FLAG_COUNT);
+ XE_WARN_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) !=
GUC_HXG_ORIGIN_HOST);
- XE_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) !=
+ XE_WARN_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) !=
GUC_HXG_TYPE_REQUEST);
retry:
};
int ret;
- XE_BUG_ON(len > 2);
- XE_BUG_ON(len == 1 && upper_32_bits(val));
+ XE_WARN_ON(len > 2);
+ XE_WARN_ON(len == 1 && upper_32_bits(val));
/* Self config must go over MMIO */
ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
static size_t guc_ads_regset_size(struct xe_guc_ads *ads)
{
- XE_BUG_ON(!ads->regset_size);
+ XE_WARN_ON(!ads->regset_size);
return ads->regset_size;
}
struct xe_gt *gt = ads_to_gt(ads);
u32 prev_regset_size = ads->regset_size;
- XE_BUG_ON(!ads->bo);
+ XE_WARN_ON(!ads->bo);
ads->golden_lrc_size = calculate_golden_lrc_size(ads);
ads->regset_size = calculate_regset_size(gt);
regset_used += count * sizeof(struct guc_mmio_reg);
}
- XE_BUG_ON(regset_used > ads->regset_size);
+ XE_WARN_ON(regset_used > ads->regset_size);
}
static void guc_um_init_params(struct xe_guc_ads *ads)
offsetof(struct __guc_ads_blob, system_info));
u32 base = xe_bo_ggtt_addr(ads->bo);
- XE_BUG_ON(!ads->bo);
+ XE_WARN_ON(!ads->bo);
xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
guc_policies_init(ads);
offsetof(struct __guc_ads_blob, system_info));
u32 base = xe_bo_ggtt_addr(ads->bo);
- XE_BUG_ON(!ads->bo);
+ XE_WARN_ON(!ads->bo);
xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
guc_policies_init(ads);
engine_enabled_masks[guc_class]))
continue;
- XE_BUG_ON(!gt->default_lrc[class]);
+ XE_WARN_ON(!gt->default_lrc[class]);
real_size = xe_lrc_size(xe, class);
alloc_size = PAGE_ALIGN(real_size);
offset += alloc_size;
}
- XE_BUG_ON(total_size != ads->golden_lrc_size);
+ XE_WARN_ON(total_size != ads->golden_lrc_size);
}
void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads)
struct xe_bo *bo;
int err;
- XE_BUG_ON(guc_ct_size() % PAGE_SIZE);
+ XE_WARN_ON(guc_ct_size() % PAGE_SIZE);
mutex_init(&ct->lock);
spin_lock_init(&ct->fast_lock);
struct xe_device *xe = ct_to_xe(ct);
int err;
- XE_BUG_ON(ct->enabled);
+ XE_WARN_ON(ct->enabled);
guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
{
- XE_BUG_ON(g2h_len > ct->ctbs.g2h.info.space);
+ XE_WARN_ON(g2h_len > ct->ctbs.g2h.info.space);
if (g2h_len) {
lockdep_assert_held(&ct->fast_lock);
full_len = len + GUC_CTB_HDR_LEN;
lockdep_assert_held(&ct->lock);
- XE_BUG_ON(full_len > (GUC_CTB_MSG_MAX_LEN - GUC_CTB_HDR_LEN));
- XE_BUG_ON(tail > h2g->info.size);
+ XE_WARN_ON(full_len > (GUC_CTB_MSG_MAX_LEN - GUC_CTB_HDR_LEN));
+ XE_WARN_ON(tail > h2g->info.size);
/* Command will wrap, zero fill (NOPs), return and check credits again */
if (tail + full_len > h2g->info.size) {
{
int ret;
- XE_BUG_ON(g2h_len && g2h_fence);
- XE_BUG_ON(num_g2h && g2h_fence);
- XE_BUG_ON(g2h_len && !num_g2h);
- XE_BUG_ON(!g2h_len && num_g2h);
+ XE_WARN_ON(g2h_len && g2h_fence);
+ XE_WARN_ON(num_g2h && g2h_fence);
+ XE_WARN_ON(g2h_len && !num_g2h);
+ XE_WARN_ON(!g2h_len && num_g2h);
lockdep_assert_held(&ct->lock);
if (unlikely(ct->ctbs.h2g.info.broken)) {
unsigned int sleep_period_ms = 1;
int ret;
- XE_BUG_ON(g2h_len && g2h_fence);
+ XE_WARN_ON(g2h_len && g2h_fence);
lockdep_assert_held(&ct->lock);
xe_device_assert_mem_access(ct_to_xe(ct));
{
int ret;
- XE_BUG_ON(g2h_len && g2h_fence);
+ XE_WARN_ON(g2h_len && g2h_fence);
mutex_lock(&ct->lock);
ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
{
struct xe_device *xe = guc_to_xe(guc);
- XE_BUG_ON(!guc->hwconfig.bo);
+ XE_WARN_ON(!guc->hwconfig.bo);
xe_map_memcpy_from(xe, dst, &guc->hwconfig.bo->vmap, 0,
guc->hwconfig.size);
size_t size;
int i, j;
- XE_BUG_ON(!log->bo);
+ XE_WARN_ON(!log->bo);
size = log->bo->size;
#define DW_PER_READ 128
- XE_BUG_ON(size % (DW_PER_READ * sizeof(u32)));
+ XE_WARN_ON(size % (DW_PER_READ * sizeof(u32)));
for (i = 0; i < size / sizeof(u32); i += DW_PER_READ) {
u32 read[DW_PER_READ];
static void __guc_engine_policy_add_##func(struct engine_policy *policy, \
u32 data) \
{ \
- XE_BUG_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
+ XE_WARN_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
\
policy->h2g.klv[policy->count].kl = \
FIELD_PREP(GUC_KLV_0_KEY, \
u32 timeslice_us = e->sched_props.timeslice_us;
u32 preempt_timeout_us = e->sched_props.preempt_timeout_us;
- XE_BUG_ON(!engine_registered(e));
+ XE_WARN_ON(!engine_registered(e));
__guc_engine_policy_start_klv(&policy, e->guc->id);
__guc_engine_policy_add_priority(&policy, xe_engine_prio_to_guc[prio]);
int len = 0;
int i;
- XE_BUG_ON(!xe_engine_is_parallel(e));
+ XE_WARN_ON(!xe_engine_is_parallel(e));
action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
action[len++] = info->flags;
action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
}
- XE_BUG_ON(len > MAX_MLRC_REG_SIZE);
+ XE_WARN_ON(len > MAX_MLRC_REG_SIZE);
#undef MAX_MLRC_REG_SIZE
xe_guc_ct_send(&guc->ct, action, len, 0, 0);
struct xe_lrc *lrc = e->lrc;
struct guc_ctxt_registration_info info;
- XE_BUG_ON(engine_registered(e));
+ XE_WARN_ON(engine_registered(e));
memset(&info, 0, sizeof(info));
info.context_idx = e->guc->id;
if (wq_wait_for_space(e, wq_space_until_wrap(e)))
return -ENODEV;
- XE_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
+ XE_WARN_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
parallel_write(xe, map, wq[e->guc->wqi_tail / sizeof(u32)],
FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
wqi[i++] = lrc->ring.tail / sizeof(u64);
}
- XE_BUG_ON(i != wqi_size / sizeof(u32));
+ XE_WARN_ON(i != wqi_size / sizeof(u32));
iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch,
wq[e->guc->wqi_tail / sizeof(u32)]));
xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size);
e->guc->wqi_tail += wqi_size;
- XE_BUG_ON(e->guc->wqi_tail > WQ_SIZE);
+ XE_WARN_ON(e->guc->wqi_tail > WQ_SIZE);
xe_device_wmb(xe);
int len = 0;
bool extra_submit = false;
- XE_BUG_ON(!engine_registered(e));
+ XE_WARN_ON(!engine_registered(e));
if (xe_engine_is_parallel(e))
wq_item_append(e);
struct xe_engine *e = job->engine;
bool lr = xe_engine_is_lr(e);
- XE_BUG_ON((engine_destroyed(e) || engine_pending_disable(e)) &&
- !engine_banned(e) && !engine_suspended(e));
+ XE_WARN_ON((engine_destroyed(e) || engine_pending_disable(e)) &&
+ !engine_banned(e) && !engine_suspended(e));
trace_xe_sched_job_run(job);
struct xe_engine *e = msg->private_data;
struct xe_guc *guc = engine_to_guc(e);
- XE_BUG_ON(e->flags & ENGINE_FLAG_KERNEL);
+ XE_WARN_ON(e->flags & ENGINE_FLAG_KERNEL);
trace_xe_engine_cleanup_entity(e);
if (engine_registered(e))
{
struct xe_guc *guc = engine_to_guc(e);
- XE_BUG_ON(!engine_suspended(e) && !engine_killed(e) &&
- !guc_read_stopped(guc));
- XE_BUG_ON(!e->guc->suspend_pending);
+ XE_WARN_ON(!engine_suspended(e) && !engine_killed(e) &&
+ !guc_read_stopped(guc));
+ XE_WARN_ON(!e->guc->suspend_pending);
e->guc->suspend_pending = false;
smp_wmb();
__guc_engine_process_msg_resume(msg);
break;
default:
- XE_BUG_ON("Unknown message type");
+ XE_WARN_ON("Unknown message type");
}
}
long timeout;
int err;
- XE_BUG_ON(!xe_device_guc_submission_enabled(guc_to_xe(guc)));
+ XE_WARN_ON(!xe_device_guc_submission_enabled(guc_to_xe(guc)));
ge = kzalloc(sizeof(*ge), GFP_KERNEL);
if (!ge)
{
struct xe_gpu_scheduler *sched = &e->guc->sched;
- XE_BUG_ON(engine_registered(e));
- XE_BUG_ON(engine_banned(e));
- XE_BUG_ON(engine_killed(e));
+ XE_WARN_ON(engine_registered(e));
+ XE_WARN_ON(engine_banned(e));
+ XE_WARN_ON(engine_killed(e));
sched->base.timeout = job_timeout_ms;
{
struct xe_sched_msg *msg = e->guc->static_msgs + STATIC_MSG_RESUME;
- XE_BUG_ON(e->guc->suspend_pending);
+ XE_WARN_ON(e->guc->suspend_pending);
guc_engine_add_msg(e, msg, RESUME);
}
struct xe_engine *e;
unsigned long index;
- XE_BUG_ON(guc_read_stopped(guc) != 1);
+ XE_WARN_ON(guc_read_stopped(guc) != 1);
mutex_lock(&guc->submission_state.lock);
struct xe_engine *e;
unsigned long index;
- XE_BUG_ON(guc_read_stopped(guc) != 1);
+ XE_WARN_ON(guc_read_stopped(guc) != 1);
mutex_lock(&guc->submission_state.lock);
atomic_dec(&guc->submission_state.stopped);
return NULL;
}
- XE_BUG_ON(e->guc->id != guc_id);
+ XE_WARN_ON(e->guc->id != guc_id);
return e;
}
if (xe_uc_fw_is_disabled(&huc->fw))
return 0;
- XE_BUG_ON(xe_uc_fw_is_running(&huc->fw));
+ XE_WARN_ON(xe_uc_fw_is_running(&huc->fw));
if (!xe_uc_fw_is_loaded(&huc->fw))
return -ENOEXEC;
static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg,
u32 val)
{
- XE_BUG_ON(reg.addr & hwe->mmio_base);
+ XE_WARN_ON(reg.addr & hwe->mmio_base);
xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
reg.addr += hwe->mmio_base;
static u32 hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
{
- XE_BUG_ON(reg.addr & hwe->mmio_base);
+ XE_WARN_ON(reg.addr & hwe->mmio_base);
xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
reg.addr += hwe->mmio_base;
info = &engine_infos[id];
- XE_BUG_ON(hwe->gt);
+ XE_WARN_ON(hwe->gt);
hwe->gt = gt;
hwe->class = info->class;
struct xe_tile *tile = gt_to_tile(gt);
int err;
- XE_BUG_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name);
- XE_BUG_ON(!(gt->info.engine_mask & BIT(id)));
+ XE_WARN_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name);
+ XE_WARN_ON(!(gt->info.engine_mask & BIT(id)));
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
xe_reg_sr_apply_whitelist(hwe);
struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
trace_xe_hw_fence_free(fence);
- XE_BUG_ON(!list_empty(&fence->irq_link));
+ XE_WARN_ON(!list_empty(&fence->irq_link));
call_rcu(&dma_fence->rcu, fence_free);
}
*regs |= MI_LRI_LRM_CS_MMIO;
regs++;
- XE_BUG_ON(!count);
+ XE_WARN_ON(!count);
do {
u32 offset = 0;
u8 v;
{ \
struct iosys_map map = lrc->bo->vmap; \
\
- XE_BUG_ON(iosys_map_is_null(&map)); \
+ XE_WARN_ON(iosys_map_is_null(&map)); \
iosys_map_incr(&map, __xe_lrc_##elem##_offset(lrc)); \
return map; \
} \
u32 rhs;
size_t aligned_size;
- XE_BUG_ON(!IS_ALIGNED(size, 4));
+ XE_WARN_ON(!IS_ALIGNED(size, 4));
aligned_size = ALIGN(size, 8);
ring = __xe_lrc_ring_map(lrc);
- XE_BUG_ON(lrc->ring.tail >= lrc->ring.size);
+ XE_WARN_ON(lrc->ring.tail >= lrc->ring.size);
rhs = lrc->ring.size - lrc->ring.tail;
if (size > rhs) {
__xe_lrc_write_ring(lrc, ring, data, rhs);
#include <linux/bug.h>
#define XE_WARN_ON WARN_ON
-#define XE_BUG_ON BUG_ON
#define XE_IOCTL_DBG(xe, cond) \
((cond) && (drm_dbg(&(xe)->drm, \
static u64 xe_migrate_vm_addr(u64 slot, u32 level)
{
- XE_BUG_ON(slot >= NUM_PT_SLOTS);
+ XE_WARN_ON(slot >= NUM_PT_SLOTS);
/* First slot is reserved for mapping of PT bo and bb, start from 1 */
return (slot + 1ULL) << xe_pt_shift(level + 1);
BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
/* Need to be sure everything fits in the first PT, or create more */
- XE_BUG_ON(m->batch_base_ofs + batch->size >= SZ_2M);
+ XE_WARN_ON(m->batch_base_ofs + batch->size >= SZ_2M);
bo = xe_bo_create_pin_map(vm->xe, tile, vm,
num_entries * XE_PAGE_SIZE,
}
if (!IS_DGFX(xe)) {
- XE_BUG_ON(xe->info.supports_usm);
+ XE_WARN_ON(xe->info.supports_usm);
/* Write out batch too */
m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
NUM_CCS_BYTES_PER_BLOCK);
- XE_BUG_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER);
+ XE_WARN_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER);
*cs++ = XY_CTRL_SURF_COPY_BLT |
(src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
(dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
u64 src_ofs, u64 dst_ofs, unsigned int size,
unsigned int pitch)
{
- XE_BUG_ON(size / pitch > S16_MAX);
- XE_BUG_ON(pitch / 4 > S16_MAX);
- XE_BUG_ON(pitch > U16_MAX);
+ XE_WARN_ON(size / pitch > S16_MAX);
+ XE_WARN_ON(pitch / 4 > S16_MAX);
+ XE_WARN_ON(pitch > U16_MAX);
bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch;
* At the moment, we don't support copying CCS metadata from
* system to system.
*/
- XE_BUG_ON(!src_is_vram && !dst_is_vram);
+ XE_WARN_ON(!src_is_vram && !dst_is_vram);
emit_copy_ccs(gt, bb, dst_ofs, dst_is_vram, src_ofs,
src_is_vram, dst_size);
*cs++ = upper_32_bits(src_ofs);
*cs++ = FIELD_PREP(PVC_MS_MOCS_INDEX_MASK, mocs);
- XE_BUG_ON(cs - bb->cs != len + bb->len);
+ XE_WARN_ON(cs - bb->cs != len + bb->len);
bb->len += len;
}
*cs++ = 0;
}
- XE_BUG_ON(cs - bb->cs != len + bb->len);
+ XE_WARN_ON(cs - bb->cs != len + bb->len);
bb->len += len;
}
* PDE. This requires a BO that is almost vm->size big.
*
* This shouldn't be possible in practice.. might change when 16K
- * pages are used. Hence the BUG_ON.
+ * pages are used. Hence the XE_WARN_ON.
*/
- XE_BUG_ON(update->qwords > 0x1ff);
+ XE_WARN_ON(update->qwords > 0x1ff);
if (!ppgtt_ofs) {
ppgtt_ofs = xe_migrate_vram_ofs(xe_bo_addr(update->pt_bo, 0,
XE_PAGE_SIZE));
* Worst case: Sum(2 * (each lower level page size) + (top level page size))
* Should be reasonably bound..
*/
- XE_BUG_ON(batch_size >= SZ_128K);
+ XE_WARN_ON(batch_size >= SZ_128K);
bb = xe_bb_new(gt, batch_size, !eng && xe->info.supports_usm);
if (IS_ERR(bb))
if (!IS_DGFX(xe)) {
ppgtt_ofs = NUM_KERNEL_PDE - 1;
if (eng) {
- XE_BUG_ON(num_updates > NUM_VMUSA_WRITES_PER_UNIT);
+ XE_WARN_ON(num_updates > NUM_VMUSA_WRITES_PER_UNIT);
sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
GFP_KERNEL, true, 0);
for (i = 0; i < num_updates; i++) {
struct xe_bo *pt_bo = updates[i].pt_bo;
- BUG_ON(pt_bo->size != SZ_4K);
+ XE_WARN_ON(pt_bo->size != SZ_4K);
addr = xe_pte_encode(pt_bo, 0, XE_CACHE_WB, 0);
bb->cs[bb->len++] = lower_32_bits(addr);
pte |= XE_PDPE_PS_1G;
/* XXX: Does hw support 1 GiB pages? */
- XE_BUG_ON(pt_level > 2);
+ XE_WARN_ON(pt_level > 2);
return pte;
}
pt->level = level;
pt->base.dir = level ? &as_xe_pt_dir(pt)->dir : NULL;
- XE_BUG_ON(level > XE_VM_MAX_LEVEL);
+ XE_WARN_ON(level > XE_VM_MAX_LEVEL);
return pt;
if (!pt)
return;
- XE_BUG_ON(!list_empty(&pt->bo->vmas));
+ XE_WARN_ON(!list_empty(&pt->bo->vmas));
xe_bo_unpin(pt->bo);
xe_bo_put_deferred(pt->bo, deferred);
struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
pgoff_t end_offset;
- XE_BUG_ON(!*child);
- XE_BUG_ON(!level && xe_child->is_compact);
+ XE_WARN_ON(!*child);
+ XE_WARN_ON(!level && xe_child->is_compact);
/*
* Note that we're called from an entry callback, and we're dealing
*num_entries = 0;
err = xe_pt_stage_bind(tile, vma, entries, num_entries);
if (!err)
- BUG_ON(!*num_entries);
+ XE_WARN_ON(!*num_entries);
else /* abort! */
xe_pt_abort_bind(vma, entries, *num_entries);
u64 end;
u64 start;
- XE_BUG_ON(entry->pt->is_compact);
+ XE_WARN_ON(entry->pt->is_compact);
start = entry->ofs * page_size;
end = start + page_size * entry->qwords;
vm_dbg(&xe->drm,
err = xe_pt_prepare_bind(tile, vma, entries, &num_entries, rebind);
if (err)
goto err;
- XE_BUG_ON(num_entries > ARRAY_SIZE(entries));
+ XE_WARN_ON(num_entries > ARRAY_SIZE(entries));
xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
xe_pt_calc_rfence_interval(vma, &bind_pt_update, entries,
{
struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
- XE_BUG_ON(!*child);
- XE_BUG_ON(!level && xe_child->is_compact);
+ XE_WARN_ON(!*child);
+ XE_WARN_ON(!level && xe_child->is_compact);
xe_pt_check_kill(addr, next, level - 1, xe_child, action, walk);
xe_vma_start(vma), xe_vma_end(vma) - 1, e);
num_entries = xe_pt_stage_unbind(tile, vma, entries);
- XE_BUG_ON(num_entries > ARRAY_SIZE(entries));
+ XE_WARN_ON(num_entries > ARRAY_SIZE(entries));
xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries,
if (!res)
goto fallback;
- XE_BUG_ON(start + size > res->size);
+ XE_WARN_ON(start + size > res->size);
cur->mem_type = res->mem_type;
while (start >= sg_dma_len(sgl)) {
start -= sg_dma_len(sgl);
sgl = sg_next(sgl);
- XE_BUG_ON(!sgl);
+ XE_WARN_ON(!sgl);
}
cur->start = start;
u64 start, u64 size,
struct xe_res_cursor *cur)
{
- XE_BUG_ON(!sg);
- XE_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE) ||
- !IS_ALIGNED(size, PAGE_SIZE));
+ XE_WARN_ON(!sg);
+ XE_WARN_ON(!IS_ALIGNED(start, PAGE_SIZE) ||
+ !IS_ALIGNED(size, PAGE_SIZE));
cur->node = NULL;
cur->start = start;
cur->remaining = size;
struct list_head *next;
u64 start;
- XE_BUG_ON(size > cur->remaining);
+ XE_WARN_ON(size > cur->remaining);
cur->remaining -= size;
if (!cur->remaining)
i = emit_user_interrupt(dw, i);
- XE_BUG_ON(i > MAX_JOB_SIZE_DW);
+ XE_WARN_ON(i > MAX_JOB_SIZE_DW);
xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
}
i = emit_user_interrupt(dw, i);
- XE_BUG_ON(i > MAX_JOB_SIZE_DW);
+ XE_WARN_ON(i > MAX_JOB_SIZE_DW);
xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
}
i = emit_user_interrupt(dw, i);
- XE_BUG_ON(i > MAX_JOB_SIZE_DW);
+ XE_WARN_ON(i > MAX_JOB_SIZE_DW);
xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
}
i = emit_user_interrupt(dw, i);
- XE_BUG_ON(i > MAX_JOB_SIZE_DW);
+ XE_WARN_ON(i > MAX_JOB_SIZE_DW);
xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
}
/* Sanity check */
for (j = 0; j < e->width; ++j)
- XE_BUG_ON(cf->base.seqno != fences[j]->seqno);
+ XE_WARN_ON(cf->base.seqno != fences[j]->seqno);
job->fence = &cf->base;
}
struct xe_ttm_stolen_mgr *mgr = to_stolen_mgr(ttm_mgr);
struct xe_res_cursor cur;
- XE_BUG_ON(!mgr->io_base);
+ XE_WARN_ON(!mgr->io_base);
if (xe_ttm_stolen_cpu_access_needs_ggtt(xe))
return mgr->io_base + xe_bo_ggtt_addr(bo) + offset;
#ifdef CONFIG_X86
struct xe_bo *bo = ttm_to_xe_bo(mem->bo);
- XE_BUG_ON(IS_DGFX(xe));
+ XE_WARN_ON(IS_DGFX(xe));
/* XXX: Require BO to be mapped to GGTT? */
if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_CREATE_GGTT_BIT)))
if (type == XE_UC_FW_TYPE_GUC)
return container_of(uc_fw, struct xe_gt, uc.guc.fw);
- XE_BUG_ON(type != XE_UC_FW_TYPE_HUC);
+ XE_WARN_ON(type != XE_UC_FW_TYPE_HUC);
return container_of(uc_fw, struct xe_gt, uc.huc.fw);
}
u32 count;
int i;
- XE_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
+ XE_WARN_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
entries = blobs_all[uc_fw->type].entries;
count = blobs_all[uc_fw->type].count;
struct xe_device *xe = uc_fw_to_xe(uc_fw);
u32 size = min_t(u32, uc_fw->rsa_size, max_len);
- XE_BUG_ON(size % 4);
- XE_BUG_ON(!xe_uc_fw_is_available(uc_fw));
+ XE_WARN_ON(size % 4);
+ XE_WARN_ON(!xe_uc_fw_is_available(uc_fw));
xe_map_memcpy_from(xe, dst, &uc_fw->bo->vmap,
xe_uc_fw_rsa_offset(uc_fw), size);
struct xe_gt *gt = uc_fw_to_gt(uc_fw);
struct xe_guc *guc = >->uc.guc;
- XE_BUG_ON(uc_fw->type != XE_UC_FW_TYPE_GUC);
+ XE_WARN_ON(uc_fw->type != XE_UC_FW_TYPE_GUC);
XE_WARN_ON(uc_fw->major_ver_found < 70);
if (uc_fw->major_ver_found > 70 || uc_fw->minor_ver_found >= 6) {
* before we're looked at the HW caps to see if we have uc support
*/
BUILD_BUG_ON(XE_UC_FIRMWARE_UNINITIALIZED);
- XE_BUG_ON(uc_fw->status);
- XE_BUG_ON(uc_fw->path);
+ XE_WARN_ON(uc_fw->status);
+ XE_WARN_ON(uc_fw->path);
uc_fw_auto_select(xe, uc_fw);
xe_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
int err;
/* make sure the status was cleared the last time we reset the uc */
- XE_BUG_ON(xe_uc_fw_is_loaded(uc_fw));
+ XE_WARN_ON(xe_uc_fw_is_loaded(uc_fw));
if (!xe_uc_fw_is_loadable(uc_fw))
return -ENOEXEC;
__xe_uc_fw_status(struct xe_uc_fw *uc_fw)
{
/* shouldn't call this before checking hw/blob availability */
- XE_BUG_ON(uc_fw->status == XE_UC_FIRMWARE_UNINITIALIZED);
+ XE_WARN_ON(uc_fw->status == XE_UC_FIRMWARE_UNINITIALIZED);
return uc_fw->status;
}
bool read_only = xe_vma_read_only(vma);
lockdep_assert_held(&vm->lock);
- XE_BUG_ON(!xe_vma_is_userptr(vma));
+ XE_WARN_ON(!xe_vma_is_userptr(vma));
retry:
if (vma->gpuva.flags & XE_VMA_DESTROYED)
return 0;
struct dma_fence *fence;
link = list->next;
- XE_BUG_ON(link == list);
+ XE_WARN_ON(link == list);
fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
e, e->compute.context,
int err;
bool wait;
- XE_BUG_ON(!xe_vm_in_compute_mode(vm));
+ XE_WARN_ON(!xe_vm_in_compute_mode(vm));
down_write(&vm->lock);
long wait;
int __maybe_unused tries = 0;
- XE_BUG_ON(!xe_vm_in_compute_mode(vm));
+ XE_WARN_ON(!xe_vm_in_compute_mode(vm));
trace_xe_vm_rebind_worker_enter(vm);
down_write(&vm->lock);
struct dma_fence *fence;
long err;
- XE_BUG_ON(!xe_vma_is_userptr(vma));
+ XE_WARN_ON(!xe_vma_is_userptr(vma));
trace_xe_vma_userptr_invalidate(vma);
if (!mmu_notifier_range_blockable(range))
struct xe_tile *tile;
u8 id;
- XE_BUG_ON(start >= end);
- XE_BUG_ON(end >= vm->size);
+ XE_WARN_ON(start >= end);
+ XE_WARN_ON(end >= vm->size);
if (!bo && !is_null) /* userptr */
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
struct xe_vm *vm = xe_vma_vm(vma);
lockdep_assert_held_write(&vm->lock);
- XE_BUG_ON(!list_empty(&vma->combined_links.destroy));
+ XE_WARN_ON(!list_empty(&vma->combined_links.destroy));
if (xe_vma_is_userptr(vma)) {
XE_WARN_ON(!(vma->gpuva.flags & XE_VMA_DESTROYED));
if (xe_vm_is_closed_or_banned(vm))
return NULL;
- XE_BUG_ON(start + range > vm->size);
+ XE_WARN_ON(start + range > vm->size);
gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
{
int err;
- XE_BUG_ON(xe_vma_vm(vma) != vm);
+ XE_WARN_ON(xe_vma_vm(vma) != vm);
lockdep_assert_held(&vm->lock);
err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
{
- XE_BUG_ON(xe_vma_vm(vma) != vm);
+ XE_WARN_ON(xe_vma_vm(vma) != vm);
lockdep_assert_held(&vm->lock);
drm_gpuva_remove(&vma->gpuva);
struct drm_gpuva *gpuva, *next;
u8 id;
- XE_BUG_ON(vm->preempt.num_engines);
+ XE_WARN_ON(vm->preempt.num_engines);
xe_vm_close(vm);
flush_async_ops(vm);
struct async_op_fence *afence =
container_of(fence, struct async_op_fence, fence);
- XE_BUG_ON(xe_vm_no_dma_fences(afence->vm));
+ XE_WARN_ON(xe_vm_no_dma_fences(afence->vm));
smp_rmb();
return wait_event_interruptible(afence->wq, afence->started);
} else {
int i;
- XE_BUG_ON(!xe_vm_in_fault_mode(vm));
+ XE_WARN_ON(!xe_vm_in_fault_mode(vm));
fence = dma_fence_get_stub();
if (last_op) {
{
int err;
- XE_BUG_ON(region > ARRAY_SIZE(region_to_mem_type));
+ XE_WARN_ON(region > ARRAY_SIZE(region_to_mem_type));
if (!xe_vma_has_no_bo(vma)) {
err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
return -ENODATA;
break;
default:
- XE_BUG_ON("NOT POSSIBLE");
+ XE_WARN_ON("NOT POSSIBLE");
return -EINVAL;
}
(ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
break;
default:
- XE_BUG_ON("NOT POSSIBLE");
+ XE_WARN_ON("NOT POSSIBLE");
}
}
#else
}
break;
case XE_VM_BIND_OP_UNMAP_ALL:
- XE_BUG_ON(!bo);
+ XE_WARN_ON(!bo);
err = xe_bo_lock(bo, &ww, 0, true);
if (err)
}
break;
default:
- XE_BUG_ON("NOT POSSIBLE");
+ XE_WARN_ON("NOT POSSIBLE");
ops = ERR_PTR(-EINVAL);
}
int err, i;
lockdep_assert_held_write(&vm->lock);
- XE_BUG_ON(num_ops_list > 1 && !async);
+ XE_WARN_ON(num_ops_list > 1 && !async);
if (num_syncs && async) {
u64 seqno;
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
bool first = !async_list;
- XE_BUG_ON(!first && !async);
+ XE_WARN_ON(!first && !async);
INIT_LIST_HEAD(&op->link);
if (first)
/* Nothing to do */
break;
default:
- XE_BUG_ON("NOT POSSIBLE");
+ XE_WARN_ON("NOT POSSIBLE");
}
last_op = op;
/* Nothing to do */
break;
default:
- XE_BUG_ON("NOT POSSIBLE");
+ XE_WARN_ON("NOT POSSIBLE");
}
op->flags |= XE_VMA_OP_COMMITTED;
op->flags & XE_VMA_OP_LAST);
break;
default:
- XE_BUG_ON("NOT POSSIBLE");
+ XE_WARN_ON("NOT POSSIBLE");
}
ttm_eu_backoff_reservation(&ww, &objs);
op);
break;
default:
- XE_BUG_ON("NOT POSSIBLE");
+ XE_WARN_ON("NOT POSSIBLE");
}
return ret;
/* Nothing to do */
break;
default:
- XE_BUG_ON("NOT POSSIBLE");
+ XE_WARN_ON("NOT POSSIBLE");
}
}
LIST_HEAD(objs);
LIST_HEAD(dups);
- XE_BUG_ON(!ww);
+ XE_WARN_ON(!ww);
tv_vm.num_shared = num_resv;
tv_vm.bo = xe_vm_ttm_bo(vm);
u8 id;
int ret;
- XE_BUG_ON(!xe_vm_in_fault_mode(xe_vma_vm(vma)));
+ XE_WARN_ON(!xe_vm_in_fault_mode(xe_vma_vm(vma)));
XE_WARN_ON(xe_vma_is_null(vma));
trace_xe_vma_usm_invalidate(vma);
passed = (rvalue & mask) <= (value & mask);
break;
default:
- XE_BUG_ON("Not possible");
+ XE_WARN_ON("Not possible");
}
return passed ? 0 : 1;
u32 mask;
int err;
- XE_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK));
- XE_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK);
- XE_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
- XE_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
+ XE_WARN_ON(!(base & GUC_WOPCM_OFFSET_MASK));
+ XE_WARN_ON(base & ~GUC_WOPCM_OFFSET_MASK);
+ XE_WARN_ON(!(size & GUC_WOPCM_SIZE_MASK));
+ XE_WARN_ON(size & ~GUC_WOPCM_SIZE_MASK);
mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
err = xe_mmio_write32_and_verify(gt, GUC_WOPCM_SIZE, size, mask,
drm_dbg(&xe->drm, "WOPCM: %uK\n", wopcm->size / SZ_1K);
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
- XE_BUG_ON(guc_fw_size >= wopcm->size);
- XE_BUG_ON(huc_fw_size >= wopcm->size);
- XE_BUG_ON(ctx_rsvd + WOPCM_RESERVED_SIZE >= wopcm->size);
+ XE_WARN_ON(guc_fw_size >= wopcm->size);
+ XE_WARN_ON(huc_fw_size >= wopcm->size);
+ XE_WARN_ON(ctx_rsvd + WOPCM_RESERVED_SIZE >= wopcm->size);
locked = __wopcm_regs_locked(gt, &guc_wopcm_base, &guc_wopcm_size);
if (locked) {
guc_fw_size, huc_fw_size)) {
wopcm->guc.base = guc_wopcm_base;
wopcm->guc.size = guc_wopcm_size;
- XE_BUG_ON(!wopcm->guc.base);
- XE_BUG_ON(!wopcm->guc.size);
+ XE_WARN_ON(!wopcm->guc.base);
+ XE_WARN_ON(!wopcm->guc.size);
} else {
drm_notice(&xe->drm, "Unsuccessful WOPCM partitioning\n");
return -E2BIG;