Merge tag 'soc-drivers-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-block.git] / drivers / gpu / drm / xe / tests / xe_migrate.c
CommitLineData
dd08ebf6
MB
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2020-2022 Intel Corporation
4 */
5
6#include <kunit/test.h>
60d5c6ab 7#include <kunit/visibility.h>
dd08ebf6 8
353dfaaa 9#include "tests/xe_migrate_test.h"
af049be5 10#include "tests/xe_pci_test.h"
353dfaaa 11
dd08ebf6
MB
12#include "xe_pci.h"
13
14static bool sanity_fence_failed(struct xe_device *xe, struct dma_fence *fence,
15 const char *str, struct kunit *test)
16{
17 long ret;
18
19 if (IS_ERR(fence)) {
20 KUNIT_FAIL(test, "Failed to create fence for %s: %li\n", str,
21 PTR_ERR(fence));
22 return true;
23 }
24 if (!fence)
25 return true;
26
27 ret = dma_fence_wait_timeout(fence, false, 5 * HZ);
28 if (ret <= 0) {
29 KUNIT_FAIL(test, "Fence timed out for %s: %li\n", str, ret);
30 return true;
31 }
32
33 return false;
34}
35
36static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
37 struct xe_bb *bb, u32 second_idx, const char *str,
38 struct kunit *test)
39{
5a92da34 40 u64 batch_base = xe_migrate_batch_base(m, xe->info.has_usm);
9b9529ce 41 struct xe_sched_job *job = xe_bb_create_migration_job(m->q, bb,
f7339fe7 42 batch_base,
dd08ebf6
MB
43 second_idx);
44 struct dma_fence *fence;
45
46 if (IS_ERR(job)) {
47 KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
48 PTR_ERR(job));
49 return PTR_ERR(job);
50 }
51
52 xe_sched_job_arm(job);
53 fence = dma_fence_get(&job->drm.s_fence->finished);
54 xe_sched_job_push(job);
55
56 if (sanity_fence_failed(xe, fence, str, test))
57 return -ETIMEDOUT;
58
59 dma_fence_put(fence);
60 kunit_info(test, "%s: Job completed\n", str);
61 return 0;
62}
63
64static void
65sanity_populate_cb(struct xe_migrate_pt_update *pt_update,
876611c2 66 struct xe_tile *tile, struct iosys_map *map, void *dst,
dd08ebf6
MB
67 u32 qword_ofs, u32 num_qwords,
68 const struct xe_vm_pgtable_update *update)
69{
7cba3396
TH
70 struct migrate_test_params *p =
71 to_migrate_test_params(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));
dd08ebf6
MB
72 int i;
73 u64 *ptr = dst;
17a28ea2
TH
74 u64 value;
75
76 for (i = 0; i < num_qwords; i++) {
77 value = (qword_ofs + i - update->ofs) * 0x1111111111111111ULL;
78 if (map)
876611c2 79 xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
17a28ea2
TH
80 sizeof(u64), u64, value);
81 else
82 ptr[i] = value;
83 }
7cba3396
TH
84
85 kunit_info(xe_cur_kunit(), "Used %s.\n", map ? "CPU" : "GPU");
86 if (p->force_gpu && map)
87 KUNIT_FAIL(xe_cur_kunit(), "GPU pagetable update used CPU.\n");
dd08ebf6
MB
88}
89
90static const struct xe_migrate_pt_update_ops sanity_ops = {
91 .populate = sanity_populate_cb,
92};
93
94#define check(_retval, _expected, str, _test) \
95 do { if ((_retval) != (_expected)) { \
96 KUNIT_FAIL(_test, "Sanity check failed: " str \
97 " expected %llx, got %llx\n", \
98 (u64)(_expected), (u64)(_retval)); \
99 } } while (0)
100
101static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
cf0b9e94 102 struct kunit *test, u32 region)
dd08ebf6 103{
08dea767 104 struct xe_device *xe = tile_to_xe(m->tile);
11a2407e 105 u64 retval, expected = 0;
dd08ebf6
MB
106 bool big = bo->size >= SZ_2M;
107 struct dma_fence *fence;
108 const char *str = big ? "Copying big bo" : "Copying small bo";
109 int err;
110
cf0b9e94 111 struct xe_bo *remote = xe_bo_create_locked(xe, m->tile, NULL,
dd08ebf6
MB
112 bo->size,
113 ttm_bo_type_kernel,
cf0b9e94 114 region |
6a024f1b 115 XE_BO_NEEDS_CPU_ACCESS);
cf0b9e94 116 if (IS_ERR(remote)) {
689a930b
DG
117 KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
118 str, remote);
dd08ebf6
MB
119 return;
120 }
121
cf0b9e94 122 err = xe_bo_validate(remote, NULL, false);
dd08ebf6 123 if (err) {
689a930b 124 KUNIT_FAIL(test, "Failed to validate system bo for %s: %i\n",
dd08ebf6
MB
125 str, err);
126 goto out_unlock;
127 }
128
cf0b9e94 129 err = xe_bo_vmap(remote);
dd08ebf6 130 if (err) {
689a930b 131 KUNIT_FAIL(test, "Failed to vmap system bo for %s: %i\n",
dd08ebf6
MB
132 str, err);
133 goto out_unlock;
134 }
135
cf0b9e94
DK
136 xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
137 fence = xe_migrate_clear(m, remote, remote->ttm.resource);
138 if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" :
139 "Clearing remote small bo", test)) {
140 retval = xe_map_rd(xe, &remote->vmap, 0, u64);
141 check(retval, expected, "remote first offset should be cleared",
dd08ebf6 142 test);
cf0b9e94
DK
143 retval = xe_map_rd(xe, &remote->vmap, remote->size - 8, u64);
144 check(retval, expected, "remote last offset should be cleared",
dd08ebf6
MB
145 test);
146 }
147 dma_fence_put(fence);
148
cf0b9e94
DK
149 /* Try to copy 0xc0 from remote to vram with 2MB or 64KiB/4KiB pages */
150 xe_map_memset(xe, &remote->vmap, 0, 0xc0, remote->size);
dd08ebf6
MB
151 xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size);
152
370997d1 153 expected = 0xc0c0c0c0c0c0c0c0;
cf0b9e94 154 fence = xe_migrate_copy(m, remote, bo, remote->ttm.resource,
266c8588 155 bo->ttm.resource, false);
cf0b9e94
DK
156 if (!sanity_fence_failed(xe, fence, big ? "Copying big bo remote -> vram" :
157 "Copying small bo remote -> vram", test)) {
dd08ebf6
MB
158 retval = xe_map_rd(xe, &bo->vmap, 0, u64);
159 check(retval, expected,
cf0b9e94 160 "remote -> vram bo first offset should be copied", test);
dd08ebf6
MB
161 retval = xe_map_rd(xe, &bo->vmap, bo->size - 8, u64);
162 check(retval, expected,
cf0b9e94 163 "remote -> vram bo offset should be copied", test);
dd08ebf6
MB
164 }
165 dma_fence_put(fence);
166
167 /* And other way around.. slightly hacky.. */
cf0b9e94 168 xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
dd08ebf6
MB
169 xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size);
170
cf0b9e94 171 fence = xe_migrate_copy(m, bo, remote, bo->ttm.resource,
266c8588 172 remote->ttm.resource, false);
cf0b9e94
DK
173 if (!sanity_fence_failed(xe, fence, big ? "Copying big bo vram -> remote" :
174 "Copying small bo vram -> remote", test)) {
175 retval = xe_map_rd(xe, &remote->vmap, 0, u64);
dd08ebf6 176 check(retval, expected,
cf0b9e94
DK
177 "vram -> remote bo first offset should be copied", test);
178 retval = xe_map_rd(xe, &remote->vmap, bo->size - 8, u64);
dd08ebf6 179 check(retval, expected,
cf0b9e94 180 "vram -> remote bo last offset should be copied", test);
dd08ebf6
MB
181 }
182 dma_fence_put(fence);
183
cf0b9e94 184 xe_bo_vunmap(remote);
dd08ebf6 185out_unlock:
cf0b9e94
DK
186 xe_bo_unlock(remote);
187 xe_bo_put(remote);
188}
189
190static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
191 struct kunit *test)
192{
193 test_copy(m, bo, test, XE_BO_CREATE_SYSTEM_BIT);
194}
195
196static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
197 struct kunit *test)
198{
199 u32 region;
200
201 if (bo->ttm.resource->mem_type == XE_PL_SYSTEM)
202 return;
203
204 if (bo->ttm.resource->mem_type == XE_PL_VRAM0)
205 region = XE_BO_CREATE_VRAM1_BIT;
206 else
207 region = XE_BO_CREATE_VRAM0_BIT;
208 test_copy(m, bo, test, region);
dd08ebf6
MB
209}
210
211static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt,
7cba3396 212 struct kunit *test, bool force_gpu)
dd08ebf6 213{
08dea767 214 struct xe_device *xe = tile_to_xe(m->tile);
dd08ebf6
MB
215 struct dma_fence *fence;
216 u64 retval, expected;
7cba3396 217 ktime_t then, now;
dd08ebf6
MB
218 int i;
219
220 struct xe_vm_pgtable_update update = {
221 .ofs = 1,
222 .qwords = 0x10,
223 .pt_bo = pt,
224 };
225 struct xe_migrate_pt_update pt_update = {
226 .ops = &sanity_ops,
227 };
7cba3396
TH
228 struct migrate_test_params p = {
229 .base.id = XE_TEST_LIVE_MIGRATE,
230 .force_gpu = force_gpu,
231 };
dd08ebf6 232
7cba3396 233 test->priv = &p;
dd08ebf6
MB
234 /* Test xe_migrate_update_pgtables() updates the pagetable as expected */
235 expected = 0xf0f0f0f0f0f0f0f0ULL;
236 xe_map_memset(xe, &pt->vmap, 0, (u8)expected, pt->size);
237
7cba3396 238 then = ktime_get();
fa85b083 239 fence = xe_migrate_update_pgtables(m, m->q->vm, NULL, m->q, &update, 1,
dd08ebf6 240 NULL, 0, &pt_update);
7cba3396 241 now = ktime_get();
dd08ebf6
MB
242 if (sanity_fence_failed(xe, fence, "Migration pagetable update", test))
243 return;
244
7cba3396
TH
245 kunit_info(test, "Updating without syncing took %llu us,\n",
246 (unsigned long long)ktime_to_us(ktime_sub(now, then)));
247
dd08ebf6
MB
248 dma_fence_put(fence);
249 retval = xe_map_rd(xe, &pt->vmap, 0, u64);
250 check(retval, expected, "PTE[0] must stay untouched", test);
251
252 for (i = 0; i < update.qwords; i++) {
253 retval = xe_map_rd(xe, &pt->vmap, (update.ofs + i) * 8, u64);
254 check(retval, i * 0x1111111111111111ULL, "PTE update", test);
255 }
256
257 retval = xe_map_rd(xe, &pt->vmap, 8 * (update.ofs + update.qwords),
258 u64);
259 check(retval, expected, "PTE[0x11] must stay untouched", test);
260}
261
262static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
263{
08dea767
MR
264 struct xe_tile *tile = m->tile;
265 struct xe_device *xe = tile_to_xe(tile);
dd08ebf6
MB
266 struct xe_bo *pt, *bo = m->pt_bo, *big, *tiny;
267 struct xe_res_cursor src_it;
268 struct dma_fence *fence;
269 u64 retval, expected;
270 struct xe_bb *bb;
271 int err;
08dea767 272 u8 id = tile->id;
dd08ebf6
MB
273
274 err = xe_bo_vmap(bo);
275 if (err) {
276 KUNIT_FAIL(test, "Failed to vmap our pagetables: %li\n",
277 PTR_ERR(bo));
278 return;
279 }
280
9b9529ce 281 big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
dd08ebf6 282 ttm_bo_type_kernel,
876611c2 283 XE_BO_CREATE_VRAM_IF_DGFX(tile) |
dd08ebf6
MB
284 XE_BO_CREATE_PINNED_BIT);
285 if (IS_ERR(big)) {
286 KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
287 goto vunmap;
288 }
289
9b9529ce 290 pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
dd08ebf6 291 ttm_bo_type_kernel,
876611c2 292 XE_BO_CREATE_VRAM_IF_DGFX(tile) |
dd08ebf6
MB
293 XE_BO_CREATE_PINNED_BIT);
294 if (IS_ERR(pt)) {
295 KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
296 PTR_ERR(pt));
297 goto free_big;
298 }
299
9b9529ce 300 tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
dd08ebf6
MB
301 2 * SZ_4K,
302 ttm_bo_type_kernel,
876611c2 303 XE_BO_CREATE_VRAM_IF_DGFX(tile) |
dd08ebf6
MB
304 XE_BO_CREATE_PINNED_BIT);
305 if (IS_ERR(tiny)) {
306 KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
307 PTR_ERR(pt));
308 goto free_pt;
309 }
310
5a92da34 311 bb = xe_bb_new(tile->primary_gt, 32, xe->info.has_usm);
dd08ebf6
MB
312 if (IS_ERR(bb)) {
313 KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n",
314 PTR_ERR(bb));
315 goto free_tiny;
316 }
317
857912c3 318 kunit_info(test, "Starting tests, top level PT addr: %lx, special pagetable base addr: %lx\n",
9b9529ce 319 (unsigned long)xe_bo_main_addr(m->q->vm->pt_root[id]->bo, XE_PAGE_SIZE),
58e19acf 320 (unsigned long)xe_bo_main_addr(m->pt_bo, XE_PAGE_SIZE));
dd08ebf6
MB
321
322 /* First part of the test, are we updating our pagetable bo with a new entry? */
58e19acf
LDM
323 xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
324 0xdeaddeadbeefbeef);
e814389f 325 expected = m->q->vm->pt_ops->pte_encode_bo(pt, 0, xe->pat.idx[XE_CACHE_WB], 0);
9b9529ce 326 if (m->q->vm->flags & XE_VM_FLAG_64K)
58e19acf 327 expected |= XE_PTE_PS64;
9922bb40
TH
328 if (xe_bo_is_vram(pt))
329 xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
330 else
a21fe5ee 331 xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it);
9922bb40 332
65ef8dba 333 emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false,
7425c43c 334 &src_it, XE_PAGE_SIZE, pt->ttm.resource);
9922bb40 335
dd08ebf6
MB
336 run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test);
337
58e19acf 338 retval = xe_map_rd(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1),
dd08ebf6
MB
339 u64);
340 check(retval, expected, "PTE entry write", test);
341
342 /* Now try to write data to our newly mapped 'pagetable', see if it succeeds */
343 bb->len = 0;
344 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
345 xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead);
11a2407e 346 expected = 0;
dd08ebf6 347
f6929e80 348 emit_clear(tile->primary_gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4,
11a2407e 349 IS_DGFX(xe));
dd08ebf6
MB
350 run_sanity_job(m, xe, bb, 1, "Writing to our newly mapped pagetable",
351 test);
352
353 retval = xe_map_rd(xe, &pt->vmap, 0, u32);
354 check(retval, expected, "Write to PT after adding PTE", test);
355
356 /* Sanity checks passed, try the full ones! */
357
358 /* Clear a small bo */
359 kunit_info(test, "Clearing small buffer object\n");
360 xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size);
11a2407e
BV
361 expected = 0;
362 fence = xe_migrate_clear(m, tiny, tiny->ttm.resource);
dd08ebf6
MB
363 if (sanity_fence_failed(xe, fence, "Clearing small bo", test))
364 goto out;
365
366 dma_fence_put(fence);
367 retval = xe_map_rd(xe, &tiny->vmap, 0, u32);
368 check(retval, expected, "Command clear small first value", test);
369 retval = xe_map_rd(xe, &tiny->vmap, tiny->size - 4, u32);
370 check(retval, expected, "Command clear small last value", test);
371
3690a01b 372 kunit_info(test, "Copying small buffer object to system\n");
cf0b9e94
DK
373 test_copy_sysmem(m, tiny, test);
374 if (xe->info.tile_count > 1) {
375 kunit_info(test, "Copying small buffer object to other vram\n");
376 test_copy_vram(m, tiny, test);
377 }
dd08ebf6 378
11a2407e 379 /* Clear a big bo */
dd08ebf6
MB
380 kunit_info(test, "Clearing big buffer object\n");
381 xe_map_memset(xe, &big->vmap, 0, 0x11, big->size);
11a2407e
BV
382 expected = 0;
383 fence = xe_migrate_clear(m, big, big->ttm.resource);
dd08ebf6
MB
384 if (sanity_fence_failed(xe, fence, "Clearing big bo", test))
385 goto out;
386
387 dma_fence_put(fence);
388 retval = xe_map_rd(xe, &big->vmap, 0, u32);
389 check(retval, expected, "Command clear big first value", test);
390 retval = xe_map_rd(xe, &big->vmap, big->size - 4, u32);
391 check(retval, expected, "Command clear big last value", test);
392
3690a01b 393 kunit_info(test, "Copying big buffer object to system\n");
cf0b9e94
DK
394 test_copy_sysmem(m, big, test);
395 if (xe->info.tile_count > 1) {
396 kunit_info(test, "Copying big buffer object to other vram\n");
397 test_copy_vram(m, big, test);
398 }
dd08ebf6 399
7cba3396
TH
400 kunit_info(test, "Testing page table update using CPU if GPU idle.\n");
401 test_pt_update(m, pt, test, false);
402 kunit_info(test, "Testing page table update using GPU\n");
403 test_pt_update(m, pt, test, true);
dd08ebf6
MB
404
405out:
406 xe_bb_free(bb, NULL);
407free_tiny:
408 xe_bo_unpin(tiny);
409 xe_bo_put(tiny);
410free_pt:
411 xe_bo_unpin(pt);
412 xe_bo_put(pt);
413free_big:
414 xe_bo_unpin(big);
415 xe_bo_put(big);
416vunmap:
417 xe_bo_vunmap(m->pt_bo);
418}
419
420static int migrate_test_run_device(struct xe_device *xe)
421{
422 struct kunit *test = xe_cur_kunit();
08dea767 423 struct xe_tile *tile;
dd08ebf6
MB
424 int id;
425
08dea767
MR
426 for_each_tile(tile, xe, id) {
427 struct xe_migrate *m = tile->migrate;
dd08ebf6 428
08dea767 429 kunit_info(test, "Testing tile id %d.\n", id);
d00e9cc2 430 xe_vm_lock(m->q->vm, true);
907a319c 431 xe_device_mem_access_get(xe);
dd08ebf6 432 xe_migrate_sanity_test(m, test);
907a319c 433 xe_device_mem_access_put(xe);
d00e9cc2 434 xe_vm_unlock(m->q->vm);
dd08ebf6
MB
435 }
436
437 return 0;
438}
439
440void xe_migrate_sanity_kunit(struct kunit *test)
441{
442 xe_call_for_each_device(migrate_test_run_device);
443}
60d5c6ab 444EXPORT_SYMBOL_IF_KUNIT(xe_migrate_sanity_kunit);