selftests/mm: fix FORCE_READ to read input value correctly
authorZi Yan <ziy@nvidia.com>
Tue, 5 Aug 2025 17:51:40 +0000 (13:51 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 28 Aug 2025 05:45:42 +0000 (22:45 -0700)
FORCE_READ() converts input value x to its pointer type then reads from
address x.  This is wrong.  If x is a non-pointer, it would be caught it
easily.  But all FORCE_READ() callers are trying to read from a pointer
and FORCE_READ() basically reads a pointer to a pointer instead of the
original typed pointer.  Almost no access violation was found, except the
one from split_huge_page_test.

Fix it by implementing a simplified READ_ONCE() instead.

Link: https://lkml.kernel.org/r/20250805175140.241656-1-ziy@nvidia.com
Fixes: 3f6bfd4789a0 ("selftests/mm: reuse FORCE_READ to replace "asm volatile("" : "+r" (XXX));"")
Signed-off-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: wang lian <lianux.mm@gmail.com>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Jann Horn <jannh@google.com>
Cc: Kairui Song <ryncsn@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: SeongJae Park <sj@kernel.org>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
tools/testing/selftests/mm/cow.c
tools/testing/selftests/mm/guard-regions.c
tools/testing/selftests/mm/hugetlb-madvise.c
tools/testing/selftests/mm/migration.c
tools/testing/selftests/mm/pagemap_ioctl.c
tools/testing/selftests/mm/split_huge_page_test.c
tools/testing/selftests/mm/vm_util.h

index d30625c18259b99aeb0edad782940b30f1babb5e..c744c603d688e44c2d961db14389a330c6fb339a 100644 (file)
@@ -1554,8 +1554,8 @@ static void run_with_zeropage(non_anon_test_fn fn, const char *desc)
        }
 
        /* Read from the page to populate the shared zeropage. */
-       FORCE_READ(mem);
-       FORCE_READ(smem);
+       FORCE_READ(*mem);
+       FORCE_READ(*smem);
 
        fn(mem, smem, pagesize);
 munmap:
index b0d42eb04e3ae86d298492e23f5c85e24f387537..8dd81c0a4a5a2c25f204e8f40c105505dec8b163 100644 (file)
@@ -145,7 +145,7 @@ static bool try_access_buf(char *ptr, bool write)
                if (write)
                        *ptr = 'x';
                else
-                       FORCE_READ(ptr);
+                       FORCE_READ(*ptr);
        }
 
        signal_jump_set = false;
index 1afe14b9dc0c34a5e12ec7af67bdd745795a3ec7..c5940c0595be8872d6ab7a649a6e6b75d764ec87 100644 (file)
@@ -50,8 +50,10 @@ void read_fault_pages(void *addr, unsigned long nr_pages)
        unsigned long i;
 
        for (i = 0; i < nr_pages; i++) {
+               unsigned long *addr2 =
+                       ((unsigned long *)(addr + (i * huge_page_size)));
                /* Prevent the compiler from optimizing out the entire loop: */
-               FORCE_READ(((unsigned long *)(addr + (i * huge_page_size))));
+               FORCE_READ(*addr2);
        }
 }
 
index c5a73617796aeb6f99fd52889447149366d7ce28..ea945eebec2f62c348c4f5c470f5192300e3f032 100644 (file)
@@ -110,7 +110,7 @@ void *access_mem(void *ptr)
                 * the memory access actually happens and prevents the compiler
                 * from optimizing away this entire loop.
                 */
-               FORCE_READ((uint64_t *)ptr);
+               FORCE_READ(*(uint64_t *)ptr);
        }
 
        return NULL;
index 0d4209eef0c3d4c5ea5365ea9c8f08fd7c4643d3..e6face7c0166eaddae61b50e422f9706cd92a763 100644 (file)
@@ -1525,7 +1525,7 @@ void zeropfn_tests(void)
 
        ret = madvise(mem, hpage_size, MADV_HUGEPAGE);
        if (!ret) {
-               FORCE_READ(mem);
+               FORCE_READ(*mem);
 
                ret = pagemap_ioctl(mem, hpage_size, &vec, 1, 0,
                                    0, PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO);
index 05de1fc0005b7873de15ad63ea8d314cd6ae5e7f..44a3f8a58806709035476862ea845327ea5f61d6 100644 (file)
@@ -439,8 +439,11 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
        }
        madvise(*addr, fd_size, MADV_HUGEPAGE);
 
-       for (size_t i = 0; i < fd_size; i++)
-               FORCE_READ((*addr + i));
+       for (size_t i = 0; i < fd_size; i++) {
+               char *addr2 = *addr + i;
+
+               FORCE_READ(*addr2);
+       }
 
        if (!check_huge_file(*addr, fd_size / pmd_pagesize, pmd_pagesize)) {
                ksft_print_msg("No large pagecache folio generated, please provide a filesystem supporting large folio\n");
index c20298ae98ea5c5395da66c59e43a5c3b64c762d..b55d1809debc066d4966095a9599598a3a6bcc3c 100644 (file)
@@ -23,7 +23,7 @@
  * anything with it in order to trigger a read page fault. We therefore must use
  * volatile to stop the compiler from optimising this away.
  */
-#define FORCE_READ(x) (*(volatile typeof(x) *)x)
+#define FORCE_READ(x) (*(const volatile typeof(x) *)&(x))
 
 extern unsigned int __page_size;
 extern unsigned int __page_shift;