*
* However, if we cannot reset an engine by itself, we cannot
* cleanup a hanging persistent context without causing
- * colateral damage, and we should not pretend we can by
+ * collateral damage, and we should not pretend we can by
* exposing the interface.
*/
if (!intel_has_reset_engine(to_gt(i915)))
*
* However, if we cannot reset an engine by itself, we cannot
* cleanup a hanging persistent context without causing
- * colateral damage, and we should not pretend we can by
+ * collateral damage, and we should not pretend we can by
* exposing the interface.
*/
if (!intel_has_reset_engine(to_gt(ctx->i915)))
/*
* One for the xarray and one for the caller. We need to grab
- * the reference *prior* to making the ctx visble to userspace
+ * the reference *prior* to making the ctx visible to userspace
* in gem_context_register(), as at any point after that
* userspace can try to race us with another thread destroying
* the context under our feet.
* Execbuf uses the I915_EXEC_RING_MASK as an index into this
* array to select which HW context + engine to execute on. For
* the default array, the user_ring_map[] is used to translate
- * the legacy uABI onto the approprate index (e.g. both
+ * the legacy uABI onto the appropriate index (e.g. both
* I915_EXEC_DEFAULT and I915_EXEC_RENDER select the same
- * context, and I915_EXEC_BSD is weird). For a use defined
+ * context, and I915_EXEC_BSD is weird). For a user defined
* array, execbuf uses I915_EXEC_RING_MASK as a plain index.
*
* User defined by I915_CONTEXT_PARAM_ENGINE (when the
* @vm: unique address space (GTT)
*
* In full-ppgtt mode, each context has its own address space ensuring
- * complete seperation of one client from all others.
+ * complete separation of one client from all others.
*
* In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT.
* For objects created by userspace through GEM_CREATE with pat_index
* set by set_pat extension, simply return 0 here without touching
* the cache setting, because such objects should have an immutable
- * cache setting by desgin and always managed by userspace.
+ * cache setting by design and always managed by userspace.
*/
if (i915_gem_object_has_cache_level(obj, cache_level))
return 0;
struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */
/**
- * Indicate either the size of the hastable used to resolve
+ * Indicate either the size of the hashtable used to resolve
* relocation handles, or if negative that we are using a direct
* index into the execobj[].
*/
/*
* Error path, cannot use intel_context_timeline_lock as
- * that is user interruptable and this clean up step
+ * that is user interruptible and this clean up step
* must be done.
*/
mutex_lock(&ce->timeline->mutex);
/*
* Anything smaller than the min_page_size can't be freely inserted into
- * the GTT, due to alignemnt restrictions. For such special objects,
+ * the GTT, due to alignment restrictions. For such special objects,
* make sure we force memcpy based suspend-resume. In the future we can
* revisit this, either by allowing special mis-aligned objects in the
* migration path, or by mapping all of LMEM upfront using cheap 1G
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
- /* Consider only shrinkable ojects. */
+ /* Consider only shrinkable objects. */
if (!i915_gem_object_is_shrinkable(obj))
return false;
* i915_gem_shrink_all - Shrink buffer object caches completely
* @i915: i915 device
*
- * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
+ * This is a simple wrapper around i915_gem_shrink() to aggressively shrink all
* caches completely. It also first waits for and retires all outstanding
* requests to also be able to release backing storage for active objects.
*
* Since neither of this applies for new tiling layouts on modern platforms like
* W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled.
* Anything else can be handled in userspace entirely without the kernel's
- * invovlement.
+ * involvement.
*/
/**
* If we need to place an LMEM resource which doesn't need CPU
* access then we should try not to victimize mappable objects
* first, since we likely end up stealing more of the mappable
- * portion. And likewise when we try to find space for a mappble
+ * portion. And likewise when we try to find space for a mappable
* object, we know not to ever victimize objects that don't
* occupy any mappable pages.
*/
* sequence, where at the end we can do the move for real.
*
* The special case here is when the dst_mem is TTM_PL_SYSTEM,
- * which doens't require any kind of move, so it should be safe
+ * which doesn't require any kind of move, so it should be safe
* to skip all the below and call ttm_bo_move_null() here, where
* the caller in __i915_ttm_get_pages() will take care of the
* rest, since we should have a valid ttm_tt.
/*
* Make sure that we don't burst into a ball of flames upon falling back
- * to tmpfs, which we rely on if on the off-chance we encouter a failure
+ * to tmpfs, which we rely on if on the off-chance we encounter a failure
* when setting up gemfs.
*/
}
/*
- * Allocate in the mappable portion, should be no suprises here.
+ * Allocate in the mappable portion, should be no surprises here.
*/
err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0);
if (err)