Merge tag 'soc-ep93xx-dt-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-block.git] / drivers / gpu / drm / xe / xe_gt_tlb_invalidation.c
CommitLineData
a9351846
MB
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
a9351846 6#include "xe_gt_tlb_invalidation.h"
ea9f879d 7
b67cb798 8#include "abi/guc_actions_abi.h"
565ce72e 9#include "xe_device.h"
93dd6ad8 10#include "xe_force_wake.h"
ea9f879d 11#include "xe_gt.h"
e8b9b309 12#include "xe_gt_printk.h"
a9351846
MB
13#include "xe_guc.h"
14#include "xe_guc_ct.h"
39fa14e5 15#include "xe_gt_stats.h"
3121fed0 16#include "xe_mmio.h"
0a382f9b 17#include "xe_pm.h"
ef3fcfe0 18#include "xe_sriov.h"
24b52db6 19#include "xe_trace.h"
3121fed0 20#include "regs/xe_guc_regs.h"
a9351846 21
61ac0353
MB
22#define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS
23
eb523ec3
ND
24/*
25 * TLB inval depends on pending commands in the CT queue and then the real
26 * invalidation time. Double up the time to process full CT queue
27 * just to be on the safe side.
28 */
29static long tlb_timeout_jiffies(struct xe_gt *gt)
30{
31 /* this reflects what HW/GuC needs to process TLB inv request */
32 const long hw_tlb_timeout = HZ / 4;
33
34 /* this estimates actual delay caused by the CTB transport */
35 long delay = xe_guc_ct_queue_proc_time_jiffies(&gt->uc.guc.ct);
36
37 return hw_tlb_timeout + 2 * delay;
38}
39
61ac0353
MB
40static void
41__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
42{
43 bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
44
45 trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
0a382f9b 46 xe_gt_tlb_invalidation_fence_fini(fence);
61ac0353
MB
47 dma_fence_signal(&fence->base);
48 if (!stack)
49 dma_fence_put(&fence->base);
50}
51
52static void
53invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
54{
55 list_del(&fence->link);
56 __invalidation_fence_signal(xe, fence);
57}
38224c00 58
38224c00
MB
59static void xe_gt_tlb_fence_timeout(struct work_struct *work)
60{
61 struct xe_gt *gt = container_of(work, struct xe_gt,
62 tlb_invalidation.fence_tdr.work);
501c4255 63 struct xe_device *xe = gt_to_xe(gt);
38224c00
MB
64 struct xe_gt_tlb_invalidation_fence *fence, *next;
65
35c8a964 66 spin_lock_irq(&gt->tlb_invalidation.pending_lock);
38224c00
MB
67 list_for_each_entry_safe(fence, next,
68 &gt->tlb_invalidation.pending_fences, link) {
69 s64 since_inval_ms = ktime_ms_delta(ktime_get(),
70 fence->invalidation_time);
71
eb523ec3 72 if (msecs_to_jiffies(since_inval_ms) < tlb_timeout_jiffies(gt))
38224c00
MB
73 break;
74
501c4255 75 trace_xe_gt_tlb_invalidation_fence_timeout(xe, fence);
e8b9b309
MW
76 xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
77 fence->seqno, gt->tlb_invalidation.seqno_recv);
38224c00 78
38224c00 79 fence->base.error = -ETIME;
61ac0353 80 invalidation_fence_signal(xe, fence);
38224c00
MB
81 }
82 if (!list_empty(&gt->tlb_invalidation.pending_fences))
83 queue_delayed_work(system_wq,
84 &gt->tlb_invalidation.fence_tdr,
eb523ec3 85 tlb_timeout_jiffies(gt));
35c8a964 86 spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
38224c00
MB
87}
88
c6b0948f
MB
89/**
90 * xe_gt_tlb_invalidation_init - Initialize GT TLB invalidation state
91 * @gt: graphics tile
92 *
93 * Initialize GT TLB invalidation state, purely software initialization, should
94 * be called once during driver load.
95 *
96 * Return: 0 on success, negative error code on error.
97 */
a9351846
MB
98int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
99{
62ad0621 100 gt->tlb_invalidation.seqno = 1;
fc108a8b 101 INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
35c8a964 102 spin_lock_init(&gt->tlb_invalidation.pending_lock);
f4a8add9 103 spin_lock_init(&gt->tlb_invalidation.lock);
38224c00
MB
104 INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
105 xe_gt_tlb_fence_timeout);
a9351846
MB
106
107 return 0;
108}
109
c6b0948f
MB
110/**
111 * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
112 * @gt: graphics tile
113 *
114 * Signal any pending invalidation fences, should be called during a GT reset
115 */
2ca01fe3 116void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
fc108a8b
MB
117{
118 struct xe_gt_tlb_invalidation_fence *fence, *next;
2ca01fe3 119 int pending_seqno;
fc108a8b 120
2ca01fe3
MA
121 /*
122 * CT channel is already disabled at this point. No new TLB requests can
123 * appear.
124 */
38224c00 125
fc108a8b 126 mutex_lock(&gt->uc.guc.ct.lock);
35c8a964 127 spin_lock_irq(&gt->tlb_invalidation.pending_lock);
2ca01fe3
MA
128 cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
129 /*
130 * We might have various kworkers waiting for TLB flushes to complete
131 * which are not tracked with an explicit TLB fence, however at this
132 * stage that will never happen since the CT is already disabled, so
133 * make sure we signal them here under the assumption that we have
134 * completed a full GT reset.
135 */
136 if (gt->tlb_invalidation.seqno == 1)
137 pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
138 else
139 pending_seqno = gt->tlb_invalidation.seqno - 1;
140 WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
2ca01fe3 141
fc108a8b 142 list_for_each_entry_safe(fence, next,
9f9f09d4 143 &gt->tlb_invalidation.pending_fences, link)
501c4255 144 invalidation_fence_signal(gt_to_xe(gt), fence);
35c8a964 145 spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
fc108a8b
MB
146 mutex_unlock(&gt->uc.guc.ct.lock);
147}
148
35c8a964
MA
149static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
150{
151 int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv);
152
153 if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
154 return false;
155
156 if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
157 return true;
158
159 return seqno_recv >= seqno;
160}
161
fc108a8b 162static int send_tlb_invalidation(struct xe_guc *guc,
332dd011
MB
163 struct xe_gt_tlb_invalidation_fence *fence,
164 u32 *action, int len)
a9351846
MB
165{
166 struct xe_gt *gt = guc_to_gt(guc);
501c4255 167 struct xe_device *xe = gt_to_xe(gt);
a9351846
MB
168 int seqno;
169 int ret;
170
61ac0353
MB
171 xe_gt_assert(gt, fence);
172
a9351846
MB
173 /*
174 * XXX: The seqno algorithm relies on TLB invalidation being processed
175 * in order which they currently are, if that changes the algorithm will
176 * need to be updated.
177 */
565ce72e 178
a9351846 179 mutex_lock(&guc->ct.lock);
62ad0621 180 seqno = gt->tlb_invalidation.seqno;
61ac0353
MB
181 fence->seqno = seqno;
182 trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
a9351846 183 action[1] = seqno;
332dd011 184 ret = xe_guc_ct_send_locked(&guc->ct, action, len,
a9351846 185 G2H_LEN_DW_TLB_INVALIDATE, 1);
6482253e 186 if (!ret) {
35c8a964
MA
187 spin_lock_irq(&gt->tlb_invalidation.pending_lock);
188 /*
189 * We haven't actually published the TLB fence as per
190 * pending_fences, but in theory our seqno could have already
191 * been written as we acquired the pending_lock. In such a case
192 * we can just go ahead and signal the fence here.
193 */
194 if (tlb_invalidation_seqno_past(gt, seqno)) {
501c4255 195 __invalidation_fence_signal(xe, fence);
35c8a964
MA
196 } else {
197 fence->invalidation_time = ktime_get();
198 list_add_tail(&fence->link,
199 &gt->tlb_invalidation.pending_fences);
200
201 if (list_is_singular(&gt->tlb_invalidation.pending_fences))
202 queue_delayed_work(system_wq,
203 &gt->tlb_invalidation.fence_tdr,
eb523ec3 204 tlb_timeout_jiffies(gt));
35c8a964
MA
205 }
206 spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
6482253e 207 } else if (ret < 0) {
501c4255 208 __invalidation_fence_signal(xe, fence);
38224c00 209 }
4803f6e2
MA
210 if (!ret) {
211 gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
212 TLB_INVALIDATION_SEQNO_MAX;
213 if (!gt->tlb_invalidation.seqno)
214 gt->tlb_invalidation.seqno = 1;
4803f6e2 215 }
a9351846 216 mutex_unlock(&guc->ct.lock);
39fa14e5 217 xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
a9351846
MB
218
219 return ret;
220}
221
da3799c9
MB
222#define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
223 XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
224 XE_GUC_TLB_INVAL_FLUSH_CACHE)
225
226/**
227 * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
228 * @gt: graphics tile
61ac0353
MB
229 * @fence: invalidation fence which will be signal on TLB invalidation
230 * completion
da3799c9
MB
231 *
232 * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
61ac0353 233 * caller can use the invalidation fence to wait for completion.
da3799c9 234 *
61ac0353 235 * Return: 0 on success, negative error code on error
da3799c9 236 */
61ac0353
MB
237static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
238 struct xe_gt_tlb_invalidation_fence *fence)
da3799c9
MB
239{
240 u32 action[] = {
241 XE_GUC_ACTION_TLB_INVALIDATION,
242 0, /* seqno, replaced in send_tlb_invalidation */
243 MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
244 };
245
61ac0353 246 return send_tlb_invalidation(&gt->uc.guc, fence, action,
da3799c9
MB
247 ARRAY_SIZE(action));
248}
249
3121fed0
MB
250/**
251 * xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
252 * @gt: graphics tile
253 *
254 * Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
255 * synchronous.
256 *
257 * Return: 0 on success, negative error code on error
258 */
259int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
260{
261 struct xe_device *xe = gt_to_xe(gt);
262
263 if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
264 gt->uc.guc.submission_state.enabled) {
61ac0353
MB
265 struct xe_gt_tlb_invalidation_fence fence;
266 int ret;
3121fed0 267
61ac0353
MB
268 xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
269 ret = xe_gt_tlb_invalidation_guc(gt, &fence);
0a382f9b
MB
270 if (ret < 0) {
271 xe_gt_tlb_invalidation_fence_fini(&fence);
61ac0353 272 return ret;
0a382f9b 273 }
3121fed0 274
61ac0353 275 xe_gt_tlb_invalidation_fence_wait(&fence);
8ed9aaae 276 } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
ef3fcfe0
MW
277 if (IS_SRIOV_VF(xe))
278 return 0;
279
649a125a 280 xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
3121fed0
MB
281 if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
282 xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
283 PVC_GUC_TLB_INV_DESC1_INVALIDATE);
284 xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0,
285 PVC_GUC_TLB_INV_DESC0_VALID);
286 } else {
287 xe_mmio_write32(gt, GUC_TLB_INV_CR,
288 GUC_TLB_INV_CR_INVALIDATE);
289 }
27ee413b 290 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
3121fed0
MB
291 }
292
293 return 0;
294}
295
c6b0948f 296/**
c4f18703
MB
297 * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
298 * address range
299 *
c6b0948f
MB
300 * @gt: graphics tile
301 * @fence: invalidation fence which will be signal on TLB invalidation
61ac0353 302 * completion
c4f18703
MB
303 * @start: start address
304 * @end: end address
305 * @asid: address space id
c6b0948f 306 *
332dd011 307 * Issue a range based TLB invalidation if supported, if not fallback to a full
61ac0353
MB
308 * TLB invalidation. Completion of TLB is asynchronous and caller can use
309 * the invalidation fence to wait for completion.
c6b0948f 310 *
61ac0353 311 * Return: Negative error code on error, 0 on success
c6b0948f 312 */
c4f18703
MB
313int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
314 struct xe_gt_tlb_invalidation_fence *fence,
315 u64 start, u64 end, u32 asid)
a9351846 316{
332dd011
MB
317 struct xe_device *xe = gt_to_xe(gt);
318#define MAX_TLB_INVALIDATION_LEN 7
319 u32 action[MAX_TLB_INVALIDATION_LEN];
7d623575 320 int len = 0;
332dd011 321
61ac0353
MB
322 xe_gt_assert(gt, fence);
323
a9e483dd
MB
324 /* Execlists not supported */
325 if (gt_to_xe(gt)->info.force_execlist) {
61ac0353 326 __invalidation_fence_signal(xe, fence);
a9e483dd
MB
327 return 0;
328 }
329
da3799c9
MB
330 action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
331 action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
332dd011 332 if (!xe->info.has_range_tlb_invalidation) {
332dd011
MB
333 action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
334 } else {
c4f18703
MB
335 u64 orig_start = start;
336 u64 length = end - start;
e29a7a34 337 u64 align;
332dd011
MB
338
339 if (length < SZ_4K)
340 length = SZ_4K;
341
342 /*
343 * We need to invalidate a higher granularity if start address
344 * is not aligned to length. When start is not aligned with
345 * length we need to find the length large enough to create an
346 * address mask covering the required range.
347 */
348 align = roundup_pow_of_two(length);
c4f18703
MB
349 start = ALIGN_DOWN(start, align);
350 end = ALIGN(end, align);
332dd011
MB
351 length = align;
352 while (start + length < end) {
353 length <<= 1;
c4f18703 354 start = ALIGN_DOWN(orig_start, length);
332dd011
MB
355 }
356
357 /*
358 * Minimum invalidation size for a 2MB page that the hardware
359 * expects is 16MB
360 */
361 if (length >= SZ_2M) {
362 length = max_t(u64, SZ_16M, length);
c4f18703 363 start = ALIGN_DOWN(orig_start, length);
332dd011
MB
364 }
365
c73acc1e
FD
366 xe_gt_assert(gt, length >= SZ_4K);
367 xe_gt_assert(gt, is_power_of_2(length));
c4f18703
MB
368 xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
369 ilog2(SZ_2M) + 1)));
c73acc1e 370 xe_gt_assert(gt, IS_ALIGNED(start, length));
332dd011 371
332dd011 372 action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
c4f18703 373 action[len++] = asid;
332dd011
MB
374 action[len++] = lower_32_bits(start);
375 action[len++] = upper_32_bits(start);
376 action[len++] = ilog2(length) - ilog2(SZ_4K);
377 }
378
c73acc1e 379 xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
332dd011 380
7d623575 381 return send_tlb_invalidation(&gt->uc.guc, fence, action, len);
a9351846
MB
382}
383
c4f18703
MB
384/**
385 * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
386 * @gt: graphics tile
387 * @fence: invalidation fence which will be signal on TLB invalidation
388 * completion, can be NULL
389 * @vma: VMA to invalidate
390 *
391 * Issue a range based TLB invalidation if supported, if not fallback to a full
61ac0353
MB
392 * TLB invalidation. Completion of TLB is asynchronous and caller can use
393 * the invalidation fence to wait for completion.
c4f18703 394 *
61ac0353 395 * Return: Negative error code on error, 0 on success
c4f18703
MB
396 */
397int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
398 struct xe_gt_tlb_invalidation_fence *fence,
399 struct xe_vma *vma)
400{
401 xe_gt_assert(gt, vma);
402
403 return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma),
404 xe_vma_end(vma),
405 xe_vma_vm(vma)->usm.asid);
406}
407
c6b0948f
MB
408/**
409 * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
410 * @guc: guc
411 * @msg: message indicating TLB invalidation done
412 * @len: length of message
413 *
414 * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
415 * invalidation fences for seqno. Algorithm for this depends on seqno being
416 * received in-order and asserts this assumption.
417 *
418 * Return: 0 on success, -EPROTO for malformed messages.
419 */
a9351846
MB
420int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
421{
422 struct xe_gt *gt = guc_to_gt(guc);
501c4255 423 struct xe_device *xe = gt_to_xe(gt);
35c8a964
MA
424 struct xe_gt_tlb_invalidation_fence *fence, *next;
425 unsigned long flags;
fc108a8b 426
a9351846
MB
427 if (unlikely(len != 1))
428 return -EPROTO;
429
35c8a964
MA
430 /*
431 * This can also be run both directly from the IRQ handler and also in
432 * process_g2h_msg(). Only one may process any individual CT message,
433 * however the order they are processed here could result in skipping a
434 * seqno. To handle that we just process all the seqnos from the last
435 * seqno_recv up to and including the one in msg[0]. The delta should be
436 * very small so there shouldn't be much of pending_fences we actually
437 * need to iterate over here.
438 *
439 * From GuC POV we expect the seqnos to always appear in-order, so if we
440 * see something later in the timeline we can be sure that anything
441 * appearing earlier has already signalled, just that we have yet to
442 * officially process the CT message like if racing against
443 * process_g2h_msg().
444 */
445 spin_lock_irqsave(&gt->tlb_invalidation.pending_lock, flags);
446 if (tlb_invalidation_seqno_past(gt, msg[0])) {
447 spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
448 return 0;
fa4fe0db 449 }
a9351846 450
86ed0925 451 WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
a9351846 452
35c8a964
MA
453 list_for_each_entry_safe(fence, next,
454 &gt->tlb_invalidation.pending_fences, link) {
501c4255 455 trace_xe_gt_tlb_invalidation_fence_recv(xe, fence);
35c8a964
MA
456
457 if (!tlb_invalidation_seqno_past(gt, fence->seqno))
458 break;
459
501c4255 460 invalidation_fence_signal(xe, fence);
fc108a8b
MB
461 }
462
35c8a964
MA
463 if (!list_empty(&gt->tlb_invalidation.pending_fences))
464 mod_delayed_work(system_wq,
465 &gt->tlb_invalidation.fence_tdr,
eb523ec3 466 tlb_timeout_jiffies(gt));
35c8a964
MA
467 else
468 cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
469
470 spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
471
a9351846
MB
472 return 0;
473}
a522b285
MB
474
475static const char *
476invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
477{
478 return "xe";
479}
480
481static const char *
482invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
483{
484 return "invalidation_fence";
485}
486
487static const struct dma_fence_ops invalidation_fence_ops = {
488 .get_driver_name = invalidation_fence_get_driver_name,
489 .get_timeline_name = invalidation_fence_get_timeline_name,
490};
491
492/**
493 * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
494 * @gt: GT
495 * @fence: TLB invalidation fence to initialize
61ac0353 496 * @stack: fence is stack variable
a522b285 497 *
0a382f9b
MB
498 * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
499 * must be called if fence is not signaled.
a522b285
MB
500 */
501void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
61ac0353
MB
502 struct xe_gt_tlb_invalidation_fence *fence,
503 bool stack)
a522b285 504{
0a382f9b
MB
505 xe_pm_runtime_get_noresume(gt_to_xe(gt));
506
a522b285
MB
507 spin_lock_irq(&gt->tlb_invalidation.lock);
508 dma_fence_init(&fence->base, &invalidation_fence_ops,
509 &gt->tlb_invalidation.lock,
510 dma_fence_context_alloc(1), 1);
511 spin_unlock_irq(&gt->tlb_invalidation.lock);
512 INIT_LIST_HEAD(&fence->link);
61ac0353
MB
513 if (stack)
514 set_bit(FENCE_STACK_BIT, &fence->base.flags);
515 else
516 dma_fence_get(&fence->base);
0a382f9b
MB
517 fence->gt = gt;
518}
519
520/**
521 * xe_gt_tlb_invalidation_fence_fini - Finalize TLB invalidation fence
522 * @fence: TLB invalidation fence to finalize
523 *
524 * Drop PM ref which fence took durinig init.
525 */
526void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
527{
528 xe_pm_runtime_put(gt_to_xe(fence->gt));
a522b285 529}