{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- int err;
obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
if (IS_ERR(obj))
return vma;
}
+ return vma;
+}
+
+struct i915_vma *
+__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
+{
+ struct i915_vma *vma;
+ int err;
+
+ vma = __vm_create_scratch_for_read(vm, size);
+ if (IS_ERR(vma))
+ return vma;
+
err = i915_vma_pin(vma, 0, 0,
i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
if (err) {
struct i915_vma *
__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
+struct i915_vma *
+__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
+
static inline struct sgt_dma {
struct scatterlist *sg;
dma_addr_t dma, max;
if (err)
goto err_pm;
+ err = i915_vma_pin_ww(vma, &ww, 0, 0,
+ i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
+ if (err)
+ goto err_unpin;
+
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_unpin;
+ goto err_vma;
}
err = i915_request_await_object(rq, vma->obj, true);
err_rq:
i915_request_put(rq);
+err_vma:
+ i915_vma_unpin(vma);
err_unpin:
intel_context_unpin(ce);
err_pm:
}
i915_gem_ww_ctx_fini(&ww);
intel_engine_pm_put(ce->engine);
- i915_vma_unpin(vma);
i915_vma_put(vma);
return err;
}
int err = 0;
u32 *cs;
- scratch = __vm_create_scratch_for_read(&siblings[0]->gt->ggtt->vm,
- PAGE_SIZE);
+ scratch =
+ __vm_create_scratch_for_read_pinned(&siblings[0]->gt->ggtt->vm,
+ PAGE_SIZE);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
- return __vm_create_scratch_for_read(>->ggtt->vm, PAGE_SIZE);
+ return __vm_create_scratch_for_read_pinned(>->ggtt->vm, PAGE_SIZE);
}
static bool is_active(struct i915_request *rq)
if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
arg->mocs = table;
- arg->scratch = __vm_create_scratch_for_read(>->ggtt->vm, PAGE_SIZE);
+ arg->scratch =
+ __vm_create_scratch_for_read_pinned(>->ggtt->vm, PAGE_SIZE);
if (IS_ERR(arg->scratch))
return PTR_ERR(arg->scratch);
u32 *cs, *results;
sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
- scratch = __vm_create_scratch_for_read(ce->vm, sz);
+ scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
for (i = 0; i < ARRAY_SIZE(client); i++) {
client[i].scratch[0] =
- __vm_create_scratch_for_read(gt->vm, 4096);
+ __vm_create_scratch_for_read_pinned(gt->vm, 4096);
if (IS_ERR(client[i].scratch[0])) {
err = PTR_ERR(client[i].scratch[0]);
goto err;
}
client[i].scratch[1] =
- __vm_create_scratch_for_read(gt->vm, 4096);
+ __vm_create_scratch_for_read_pinned(gt->vm, 4096);
if (IS_ERR(client[i].scratch[1])) {
err = PTR_ERR(client[i].scratch[1]);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);