drm/xe/migrate: prevent infinite recursion
authorMatthew Auld <matthew.auld@intel.com>
Thu, 31 Jul 2025 09:38:09 +0000 (10:38 +0100)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Tue, 12 Aug 2025 16:52:26 +0000 (12:52 -0400)
If the buf + offset is not aligned to XE_CAHELINE_BYTES we fallback to
using a bounce buffer. However the bounce buffer here is allocated on
the stack, and the only alignment requirement here is that it's
naturally aligned to u8, and not XE_CACHELINE_BYTES. If the bounce
buffer is also misaligned we then recurse back into the function again,
however the new bounce buffer might also not be aligned, and might never
be until we eventually blow through the stack, as we keep recursing.

Instead of using the stack use kmalloc, which should respect the
power-of-two alignment request here. Fixes a kernel panic when
triggering this path through eudebug.

v2 (Stuart):
 - Add build bug check for power-of-two restriction
 - s/EINVAL/ENOMEM/

Fixes: 270172f64b11 ("drm/xe: Update xe_ttm_access_memory to use GPU for non-visible access")
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Maciej Patelczyk <maciej.patelczyk@intel.com>
Cc: Stuart Summers <stuart.summers@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Stuart Summers <stuart.summers@intel.com>
Link: https://lore.kernel.org/r/20250731093807.207572-6-matthew.auld@intel.com
(cherry picked from commit 38b34e928a08ba594c4bbf7118aa3aadacd62fff)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_migrate.c

index ba1cff2e4cda3f6154707b25e5b8d0e0ff062822..6193e2ca3741ed5237175ac3609a69afa9b3d17d 100644 (file)
@@ -1820,15 +1820,19 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
        if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
            !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
                int buf_offset = 0;
+               void *bounce;
+               int err;
+
+               BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));
+               bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);
+               if (!bounce)
+                       return -ENOMEM;
 
                /*
                 * Less than ideal for large unaligned access but this should be
                 * fairly rare, can fixup if this becomes common.
                 */
                do {
-                       u8 bounce[XE_CACHELINE_BYTES];
-                       void *ptr = (void *)bounce;
-                       int err;
                        int copy_bytes = min_t(int, bytes_left,
                                               XE_CACHELINE_BYTES -
                                               (offset & XE_CACHELINE_MASK));
@@ -1837,22 +1841,22 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
                        err = xe_migrate_access_memory(m, bo,
                                                       offset &
                                                       ~XE_CACHELINE_MASK,
-                                                      (void *)ptr,
-                                                      sizeof(bounce), 0);
+                                                      bounce,
+                                                      XE_CACHELINE_BYTES, 0);
                        if (err)
-                               return err;
+                               break;
 
                        if (write) {
-                               memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes);
+                               memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);
 
                                err = xe_migrate_access_memory(m, bo,
                                                               offset & ~XE_CACHELINE_MASK,
-                                                              (void *)ptr,
-                                                              sizeof(bounce), write);
+                                                              bounce,
+                                                              XE_CACHELINE_BYTES, write);
                                if (err)
-                                       return err;
+                                       break;
                        } else {
-                               memcpy(buf + buf_offset, ptr + ptr_offset,
+                               memcpy(buf + buf_offset, bounce + ptr_offset,
                                       copy_bytes);
                        }
 
@@ -1861,7 +1865,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
                        offset += copy_bytes;
                } while (bytes_left);
 
-               return 0;
+               kfree(bounce);
+               return err;
        }
 
        dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);