{
#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
const void *end = ptr + n - 1;
- struct page *endpage;
bool is_reserved, is_cma;
/*
((unsigned long)end & (unsigned long)PAGE_MASK)))
return;
- /* Allow if fully inside the same compound (__GFP_COMP) page. */
- endpage = virt_to_head_page(end);
- if (likely(endpage == page))
- return;
-
/*
* Reject if range is entirely either Reserved (i.e. special or
* device memory), or CMA. Otherwise, reject since the object spans
if (folio_test_slab(folio)) {
/* Check slab allocator for flags and size. */
__check_heap_object(ptr, n, folio_slab(folio), to_user);
+ } else if (folio_test_large(folio)) {
+ unsigned long offset = ptr - folio_address(folio);
+ if (offset + n > folio_size(folio))
+ usercopy_abort("page alloc", NULL, to_user, offset, n);
} else {
/* Verify object does not incorrectly span multiple pages. */
check_page_span(ptr, n, folio_page(folio, 0), to_user);