1 // SPDX-License-Identifier: GPL-2.0
3 * A test case of using hugepage memory in a user application using the
4 * mmap system call with MAP_HUGETLB flag. Before running this program
5 * make sure the administrator has allocated enough default sized huge
6 * pages to cover the 2 MB allocation.
14 #define MAP_LENGTH (2UL * 1024 * 1024)
17 #define MAP_HUGETLB 0x40000 /* arch specific */
20 #define PAGE_SIZE 4096
22 #define PAGE_COMPOUND_HEAD (1UL << 15)
23 #define PAGE_COMPOUND_TAIL (1UL << 16)
24 #define PAGE_HUGE (1UL << 17)
26 #define HEAD_PAGE_FLAGS (PAGE_COMPOUND_HEAD | PAGE_HUGE)
27 #define TAIL_PAGE_FLAGS (PAGE_COMPOUND_TAIL | PAGE_HUGE)
29 #define PM_PFRAME_BITS 55
30 #define PM_PFRAME_MASK ~((1UL << PM_PFRAME_BITS) - 1)
33 * For ia64 architecture, Linux kernel reserves Region number 4 for hugepages.
34 * That means the addresses starting with 0x800000... will need to be
35 * specified. Specifying a fixed address is not required on ppc64, i386
39 #define MAP_ADDR (void *)(0x8000000000000000UL)
40 #define MAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_FIXED)
43 #define MAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB)
46 static void write_bytes(char *addr, size_t length)
50 for (i = 0; i < length; i++)
51 *(addr + i) = (char)i;
54 static unsigned long virt_to_pfn(void *addr)
57 unsigned long pagemap;
59 fd = open("/proc/self/pagemap", O_RDONLY);
63 lseek(fd, (unsigned long)addr / PAGE_SIZE * sizeof(pagemap), SEEK_SET);
64 read(fd, &pagemap, sizeof(pagemap));
67 return pagemap & ~PM_PFRAME_MASK;
70 static int check_page_flags(unsigned long pfn)
73 unsigned long pageflags;
75 fd = open("/proc/kpageflags", O_RDONLY);
79 lseek(fd, pfn * sizeof(pageflags), SEEK_SET);
81 read(fd, &pageflags, sizeof(pageflags));
82 if ((pageflags & HEAD_PAGE_FLAGS) != HEAD_PAGE_FLAGS) {
84 printf("Head page flags (%lx) is invalid\n", pageflags);
89 * pages other than the first page must be tail and shouldn't be head;
90 * this also verifies kernel has correctly set the fake page_head to tail
91 * while hugetlb_free_vmemmap is enabled.
93 for (i = 1; i < MAP_LENGTH / PAGE_SIZE; i++) {
94 read(fd, &pageflags, sizeof(pageflags));
95 if ((pageflags & TAIL_PAGE_FLAGS) != TAIL_PAGE_FLAGS ||
96 (pageflags & HEAD_PAGE_FLAGS) == HEAD_PAGE_FLAGS) {
98 printf("Tail page flags (%lx) is invalid\n", pageflags);
108 int main(int argc, char **argv)
113 addr = mmap(MAP_ADDR, MAP_LENGTH, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0);
114 if (addr == MAP_FAILED) {
119 /* Trigger allocation of HugeTLB page. */
120 write_bytes(addr, MAP_LENGTH);
122 pfn = virt_to_pfn(addr);
124 munmap(addr, MAP_LENGTH);
125 perror("virt_to_pfn");
129 printf("Returned address is %p whose pfn is %lx\n", addr, pfn);
131 if (check_page_flags(pfn) < 0) {
132 munmap(addr, MAP_LENGTH);
133 perror("check_page_flags");
137 /* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
138 if (munmap(addr, MAP_LENGTH)) {