drm/xe/pm: fix unbalanced ref handling
[linux-2.6-block.git] / drivers / gpu / drm / xe / tests / xe_migrate.c
CommitLineData
dd08ebf6
MB
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2020-2022 Intel Corporation
4 */
5
6#include <kunit/test.h>
7
8#include "xe_pci.h"
9
10static bool sanity_fence_failed(struct xe_device *xe, struct dma_fence *fence,
11 const char *str, struct kunit *test)
12{
13 long ret;
14
15 if (IS_ERR(fence)) {
16 KUNIT_FAIL(test, "Failed to create fence for %s: %li\n", str,
17 PTR_ERR(fence));
18 return true;
19 }
20 if (!fence)
21 return true;
22
23 ret = dma_fence_wait_timeout(fence, false, 5 * HZ);
24 if (ret <= 0) {
25 KUNIT_FAIL(test, "Fence timed out for %s: %li\n", str, ret);
26 return true;
27 }
28
29 return false;
30}
31
32static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
33 struct xe_bb *bb, u32 second_idx, const char *str,
34 struct kunit *test)
35{
36 struct xe_sched_job *job = xe_bb_create_migration_job(m->eng, bb,
37 m->batch_base_ofs,
38 second_idx);
39 struct dma_fence *fence;
40
41 if (IS_ERR(job)) {
42 KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
43 PTR_ERR(job));
44 return PTR_ERR(job);
45 }
46
47 xe_sched_job_arm(job);
48 fence = dma_fence_get(&job->drm.s_fence->finished);
49 xe_sched_job_push(job);
50
51 if (sanity_fence_failed(xe, fence, str, test))
52 return -ETIMEDOUT;
53
54 dma_fence_put(fence);
55 kunit_info(test, "%s: Job completed\n", str);
56 return 0;
57}
58
59static void
60sanity_populate_cb(struct xe_migrate_pt_update *pt_update,
61 struct xe_gt *gt, struct iosys_map *map, void *dst,
62 u32 qword_ofs, u32 num_qwords,
63 const struct xe_vm_pgtable_update *update)
64{
65 int i;
66 u64 *ptr = dst;
67
68 for (i = 0; i < num_qwords; i++)
69 ptr[i] = (qword_ofs + i - update->ofs) * 0x1111111111111111ULL;
70}
71
72static const struct xe_migrate_pt_update_ops sanity_ops = {
73 .populate = sanity_populate_cb,
74};
75
76#define check(_retval, _expected, str, _test) \
77 do { if ((_retval) != (_expected)) { \
78 KUNIT_FAIL(_test, "Sanity check failed: " str \
79 " expected %llx, got %llx\n", \
80 (u64)(_expected), (u64)(_retval)); \
81 } } while (0)
82
83static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
84 struct kunit *test)
85{
86 struct xe_device *xe = gt_to_xe(m->gt);
87 u64 retval, expected = 0xc0c0c0c0c0c0c0c0ULL;
88 bool big = bo->size >= SZ_2M;
89 struct dma_fence *fence;
90 const char *str = big ? "Copying big bo" : "Copying small bo";
91 int err;
92
93 struct xe_bo *sysmem = xe_bo_create_locked(xe, m->gt, NULL,
94 bo->size,
95 ttm_bo_type_kernel,
96 XE_BO_CREATE_SYSTEM_BIT);
97 if (IS_ERR(sysmem)) {
98 KUNIT_FAIL(test, "Failed to allocate sysmem bo for %s: %li\n",
99 str, PTR_ERR(sysmem));
100 return;
101 }
102
103 err = xe_bo_validate(sysmem, NULL, false);
104 if (err) {
105 KUNIT_FAIL(test, "Failed to validate system bo for %s: %li\n",
106 str, err);
107 goto out_unlock;
108 }
109
110 err = xe_bo_vmap(sysmem);
111 if (err) {
112 KUNIT_FAIL(test, "Failed to vmap system bo for %s: %li\n",
113 str, err);
114 goto out_unlock;
115 }
116
117 xe_map_memset(xe, &sysmem->vmap, 0, 0xd0, sysmem->size);
118 fence = xe_migrate_clear(m, sysmem, sysmem->ttm.resource, 0xc0c0c0c0);
119 if (!sanity_fence_failed(xe, fence, big ? "Clearing sysmem big bo" :
120 "Clearing sysmem small bo", test)) {
121 retval = xe_map_rd(xe, &sysmem->vmap, 0, u64);
122 check(retval, expected, "sysmem first offset should be cleared",
123 test);
124 retval = xe_map_rd(xe, &sysmem->vmap, sysmem->size - 8, u64);
125 check(retval, expected, "sysmem last offset should be cleared",
126 test);
127 }
128 dma_fence_put(fence);
129
130 /* Try to copy 0xc0 from sysmem to lmem with 2MB or 64KiB/4KiB pages */
131 xe_map_memset(xe, &sysmem->vmap, 0, 0xc0, sysmem->size);
132 xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size);
133
134 fence = xe_migrate_copy(m, sysmem, sysmem->ttm.resource,
135 bo->ttm.resource);
136 if (!sanity_fence_failed(xe, fence, big ? "Copying big bo sysmem -> vram" :
137 "Copying small bo sysmem -> vram", test)) {
138 retval = xe_map_rd(xe, &bo->vmap, 0, u64);
139 check(retval, expected,
140 "sysmem -> vram bo first offset should be copied", test);
141 retval = xe_map_rd(xe, &bo->vmap, bo->size - 8, u64);
142 check(retval, expected,
143 "sysmem -> vram bo offset should be copied", test);
144 }
145 dma_fence_put(fence);
146
147 /* And other way around.. slightly hacky.. */
148 xe_map_memset(xe, &sysmem->vmap, 0, 0xd0, sysmem->size);
149 xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size);
150
151 fence = xe_migrate_copy(m, sysmem, bo->ttm.resource,
152 sysmem->ttm.resource);
153 if (!sanity_fence_failed(xe, fence, big ? "Copying big bo vram -> sysmem" :
154 "Copying small bo vram -> sysmem", test)) {
155 retval = xe_map_rd(xe, &sysmem->vmap, 0, u64);
156 check(retval, expected,
157 "vram -> sysmem bo first offset should be copied", test);
158 retval = xe_map_rd(xe, &sysmem->vmap, bo->size - 8, u64);
159 check(retval, expected,
160 "vram -> sysmem bo last offset should be copied", test);
161 }
162 dma_fence_put(fence);
163
164 xe_bo_vunmap(sysmem);
165out_unlock:
166 xe_bo_unlock_no_vm(sysmem);
167 xe_bo_put(sysmem);
168}
169
170static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt,
171 struct kunit *test)
172{
173 struct xe_device *xe = gt_to_xe(m->gt);
174 struct dma_fence *fence;
175 u64 retval, expected;
176 int i;
177
178 struct xe_vm_pgtable_update update = {
179 .ofs = 1,
180 .qwords = 0x10,
181 .pt_bo = pt,
182 };
183 struct xe_migrate_pt_update pt_update = {
184 .ops = &sanity_ops,
185 };
186
187 /* Test xe_migrate_update_pgtables() updates the pagetable as expected */
188 expected = 0xf0f0f0f0f0f0f0f0ULL;
189 xe_map_memset(xe, &pt->vmap, 0, (u8)expected, pt->size);
190
191 fence = xe_migrate_update_pgtables(m, NULL, NULL, m->eng, &update, 1,
192 NULL, 0, &pt_update);
193 if (sanity_fence_failed(xe, fence, "Migration pagetable update", test))
194 return;
195
196 dma_fence_put(fence);
197 retval = xe_map_rd(xe, &pt->vmap, 0, u64);
198 check(retval, expected, "PTE[0] must stay untouched", test);
199
200 for (i = 0; i < update.qwords; i++) {
201 retval = xe_map_rd(xe, &pt->vmap, (update.ofs + i) * 8, u64);
202 check(retval, i * 0x1111111111111111ULL, "PTE update", test);
203 }
204
205 retval = xe_map_rd(xe, &pt->vmap, 8 * (update.ofs + update.qwords),
206 u64);
207 check(retval, expected, "PTE[0x11] must stay untouched", test);
208}
209
210static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
211{
212 struct xe_gt *gt = m->gt;
213 struct xe_device *xe = gt_to_xe(gt);
214 struct xe_bo *pt, *bo = m->pt_bo, *big, *tiny;
215 struct xe_res_cursor src_it;
216 struct dma_fence *fence;
217 u64 retval, expected;
218 struct xe_bb *bb;
219 int err;
220 u8 id = gt->info.id;
221
222 err = xe_bo_vmap(bo);
223 if (err) {
224 KUNIT_FAIL(test, "Failed to vmap our pagetables: %li\n",
225 PTR_ERR(bo));
226 return;
227 }
228
229 big = xe_bo_create_pin_map(xe, m->gt, m->eng->vm, SZ_4M,
230 ttm_bo_type_kernel,
231 XE_BO_CREATE_VRAM_IF_DGFX(m->gt) |
232 XE_BO_CREATE_PINNED_BIT);
233 if (IS_ERR(big)) {
234 KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
235 goto vunmap;
236 }
237
238 pt = xe_bo_create_pin_map(xe, m->gt, m->eng->vm, GEN8_PAGE_SIZE,
239 ttm_bo_type_kernel,
240 XE_BO_CREATE_VRAM_IF_DGFX(m->gt) |
241 XE_BO_CREATE_PINNED_BIT);
242 if (IS_ERR(pt)) {
243 KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
244 PTR_ERR(pt));
245 goto free_big;
246 }
247
248 tiny = xe_bo_create_pin_map(xe, m->gt, m->eng->vm,
249 2 * SZ_4K,
250 ttm_bo_type_kernel,
251 XE_BO_CREATE_VRAM_IF_DGFX(m->gt) |
252 XE_BO_CREATE_PINNED_BIT);
253 if (IS_ERR(tiny)) {
254 KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
255 PTR_ERR(pt));
256 goto free_pt;
257 }
258
259 bb = xe_bb_new(m->gt, 32, xe->info.supports_usm);
260 if (IS_ERR(bb)) {
261 KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n",
262 PTR_ERR(bb));
263 goto free_tiny;
264 }
265
857912c3
LDM
266 kunit_info(test, "Starting tests, top level PT addr: %lx, special pagetable base addr: %lx\n",
267 (unsigned long)xe_bo_main_addr(m->eng->vm->pt_root[id]->bo, GEN8_PAGE_SIZE),
268 (unsigned long)xe_bo_main_addr(m->pt_bo, GEN8_PAGE_SIZE));
dd08ebf6
MB
269
270 /* First part of the test, are we updating our pagetable bo with a new entry? */
271 xe_map_wr(xe, &bo->vmap, GEN8_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64, 0xdeaddeadbeefbeef);
272 expected = gen8_pte_encode(NULL, pt, 0, XE_CACHE_WB, 0, 0);
273 if (m->eng->vm->flags & XE_VM_FLAGS_64K)
274 expected |= GEN12_PTE_PS64;
275 xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
276 emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt),
277 &src_it, GEN8_PAGE_SIZE, pt);
278 run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test);
279
280 retval = xe_map_rd(xe, &bo->vmap, GEN8_PAGE_SIZE * (NUM_KERNEL_PDE - 1),
281 u64);
282 check(retval, expected, "PTE entry write", test);
283
284 /* Now try to write data to our newly mapped 'pagetable', see if it succeeds */
285 bb->len = 0;
286 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
287 xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead);
288 expected = 0x12345678U;
289
290 emit_clear(m->gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4,
291 expected, IS_DGFX(xe));
292 run_sanity_job(m, xe, bb, 1, "Writing to our newly mapped pagetable",
293 test);
294
295 retval = xe_map_rd(xe, &pt->vmap, 0, u32);
296 check(retval, expected, "Write to PT after adding PTE", test);
297
298 /* Sanity checks passed, try the full ones! */
299
300 /* Clear a small bo */
301 kunit_info(test, "Clearing small buffer object\n");
302 xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size);
303 expected = 0x224488ff;
304 fence = xe_migrate_clear(m, tiny, tiny->ttm.resource, expected);
305 if (sanity_fence_failed(xe, fence, "Clearing small bo", test))
306 goto out;
307
308 dma_fence_put(fence);
309 retval = xe_map_rd(xe, &tiny->vmap, 0, u32);
310 check(retval, expected, "Command clear small first value", test);
311 retval = xe_map_rd(xe, &tiny->vmap, tiny->size - 4, u32);
312 check(retval, expected, "Command clear small last value", test);
313
314 if (IS_DGFX(xe)) {
315 kunit_info(test, "Copying small buffer object to system\n");
316 test_copy(m, tiny, test);
317 }
318
319 /* Clear a big bo with a fixed value */
320 kunit_info(test, "Clearing big buffer object\n");
321 xe_map_memset(xe, &big->vmap, 0, 0x11, big->size);
322 expected = 0x11223344U;
323 fence = xe_migrate_clear(m, big, big->ttm.resource, expected);
324 if (sanity_fence_failed(xe, fence, "Clearing big bo", test))
325 goto out;
326
327 dma_fence_put(fence);
328 retval = xe_map_rd(xe, &big->vmap, 0, u32);
329 check(retval, expected, "Command clear big first value", test);
330 retval = xe_map_rd(xe, &big->vmap, big->size - 4, u32);
331 check(retval, expected, "Command clear big last value", test);
332
333 if (IS_DGFX(xe)) {
334 kunit_info(test, "Copying big buffer object to system\n");
335 test_copy(m, big, test);
336 }
337
338 test_pt_update(m, pt, test);
339
340out:
341 xe_bb_free(bb, NULL);
342free_tiny:
343 xe_bo_unpin(tiny);
344 xe_bo_put(tiny);
345free_pt:
346 xe_bo_unpin(pt);
347 xe_bo_put(pt);
348free_big:
349 xe_bo_unpin(big);
350 xe_bo_put(big);
351vunmap:
352 xe_bo_vunmap(m->pt_bo);
353}
354
355static int migrate_test_run_device(struct xe_device *xe)
356{
357 struct kunit *test = xe_cur_kunit();
358 struct xe_gt *gt;
359 int id;
360
361 for_each_gt(gt, xe, id) {
362 struct xe_migrate *m = gt->migrate;
363 struct ww_acquire_ctx ww;
364
365 kunit_info(test, "Testing gt id %d.\n", id);
366 xe_vm_lock(m->eng->vm, &ww, 0, true);
367 xe_migrate_sanity_test(m, test);
368 xe_vm_unlock(m->eng->vm, &ww);
369 }
370
371 return 0;
372}
373
374void xe_migrate_sanity_kunit(struct kunit *test)
375{
376 xe_call_for_each_device(migrate_test_run_device);
377}
378EXPORT_SYMBOL(xe_migrate_sanity_kunit);