1 // SPDX-License-Identifier: GPL-2.0
5 * Basic functional testing of madvise MADV_DONTNEED and MADV_REMOVE
8 * Before running this test, make sure the administrator has pre-allocated
9 * at least MIN_FREE_PAGES hugetlb pages and they are free. In addition,
10 * the test takes an argument that is the path to a file in a hugetlbfs
11 * filesystem. Therefore, a hugetlbfs filesystem must be mounted on some
23 #define MIN_FREE_PAGES 20
24 #define NR_HUGE_PAGES 10 /* common number of pages to map/allocate */
26 #define validate_free_pages(exp_free) \
28 int fhp = get_free_hugepages(); \
29 if (fhp != (exp_free)) { \
30 printf("Unexpected number of free huge " \
31 "pages line %d\n", __LINE__); \
36 unsigned long huge_page_size;
37 unsigned long base_page_size;
40 * default_huge_page_size copied from mlock2-tests.c
42 unsigned long default_huge_page_size(void)
44 unsigned long hps = 0;
47 FILE *f = fopen("/proc/meminfo", "r");
51 while (getline(&line, &linelen, f) > 0) {
52 if (sscanf(line, "Hugepagesize: %lu kB", &hps) == 1) {
63 unsigned long get_free_hugepages(void)
65 unsigned long fhp = 0;
68 FILE *f = fopen("/proc/meminfo", "r");
72 while (getline(&line, &linelen, f) > 0) {
73 if (sscanf(line, "HugePages_Free: %lu", &fhp) == 1)
82 void write_fault_pages(void *addr, unsigned long nr_pages)
86 for (i = 0; i < nr_pages; i++)
87 *((unsigned long *)(addr + (i * huge_page_size))) = i;
90 void read_fault_pages(void *addr, unsigned long nr_pages)
92 unsigned long dummy = 0;
95 for (i = 0; i < nr_pages; i++)
96 dummy += *((unsigned long *)(addr + (i * huge_page_size)));
99 int main(int argc, char **argv)
101 unsigned long free_hugepages;
106 huge_page_size = default_huge_page_size();
107 if (!huge_page_size) {
108 printf("Unable to determine huge page size, exiting!\n");
111 base_page_size = sysconf(_SC_PAGE_SIZE);
112 if (!huge_page_size) {
113 printf("Unable to determine base page size, exiting!\n");
117 free_hugepages = get_free_hugepages();
118 if (free_hugepages < MIN_FREE_PAGES) {
119 printf("Not enough free huge pages to test, exiting!\n");
123 fd = memfd_create(argv[0], MFD_HUGETLB);
125 perror("memfd_create() failed");
130 * Test validity of MADV_DONTNEED addr and length arguments. mmap
131 * size is NR_HUGE_PAGES + 2. One page at the beginning and end of
132 * the mapping will be unmapped so we KNOW there is nothing mapped
135 addr = mmap(NULL, (NR_HUGE_PAGES + 2) * huge_page_size,
136 PROT_READ | PROT_WRITE,
137 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
139 if (addr == MAP_FAILED) {
143 if (munmap(addr, huge_page_size) ||
144 munmap(addr + (NR_HUGE_PAGES + 1) * huge_page_size,
149 addr = addr + huge_page_size;
151 write_fault_pages(addr, NR_HUGE_PAGES);
152 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
154 /* addr before mapping should fail */
155 ret = madvise(addr - base_page_size, NR_HUGE_PAGES * huge_page_size,
158 printf("Unexpected success of madvise call with invalid addr line %d\n",
163 /* addr + length after mapping should fail */
164 ret = madvise(addr, (NR_HUGE_PAGES * huge_page_size) + base_page_size,
167 printf("Unexpected success of madvise call with invalid length line %d\n",
172 (void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
175 * Test alignment of MADV_DONTNEED addr and length arguments
177 addr = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
178 PROT_READ | PROT_WRITE,
179 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
181 if (addr == MAP_FAILED) {
185 write_fault_pages(addr, NR_HUGE_PAGES);
186 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
188 /* addr is not huge page size aligned and should fail */
189 ret = madvise(addr + base_page_size,
190 NR_HUGE_PAGES * huge_page_size - base_page_size,
193 printf("Unexpected success of madvise call with unaligned start address %d\n",
198 /* addr + length should be aligned down to huge page size */
200 ((NR_HUGE_PAGES - 1) * huge_page_size) + base_page_size,
206 /* should free all but last page in mapping */
207 validate_free_pages(free_hugepages - 1);
209 (void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
210 validate_free_pages(free_hugepages);
213 * Test MADV_DONTNEED on anonymous private mapping
215 addr = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
216 PROT_READ | PROT_WRITE,
217 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
219 if (addr == MAP_FAILED) {
223 write_fault_pages(addr, NR_HUGE_PAGES);
224 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
226 if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
231 /* should free all pages in mapping */
232 validate_free_pages(free_hugepages);
234 (void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
237 * Test MADV_DONTNEED on private mapping of hugetlb file
239 if (fallocate(fd, 0, 0, NR_HUGE_PAGES * huge_page_size)) {
243 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
245 addr = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
246 PROT_READ | PROT_WRITE,
248 if (addr == MAP_FAILED) {
253 /* read should not consume any pages */
254 read_fault_pages(addr, NR_HUGE_PAGES);
255 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
257 /* madvise should not free any pages */
258 if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
262 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
264 /* writes should allocate private pages */
265 write_fault_pages(addr, NR_HUGE_PAGES);
266 validate_free_pages(free_hugepages - (2 * NR_HUGE_PAGES));
268 /* madvise should free private pages */
269 if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
273 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
275 /* writes should allocate private pages */
276 write_fault_pages(addr, NR_HUGE_PAGES);
277 validate_free_pages(free_hugepages - (2 * NR_HUGE_PAGES));
280 * The fallocate below certainly should free the pages associated
281 * with the file. However, pages in the private mapping are also
282 * freed. This is not the 'correct' behavior, but is expected
283 * because this is how it has worked since the initial hugetlb
286 if (fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
287 0, NR_HUGE_PAGES * huge_page_size)) {
291 validate_free_pages(free_hugepages);
293 (void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
296 * Test MADV_DONTNEED on shared mapping of hugetlb file
298 if (fallocate(fd, 0, 0, NR_HUGE_PAGES * huge_page_size)) {
302 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
304 addr = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
305 PROT_READ | PROT_WRITE,
307 if (addr == MAP_FAILED) {
312 /* write should not consume any pages */
313 write_fault_pages(addr, NR_HUGE_PAGES);
314 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
316 /* madvise should not free any pages */
317 if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
321 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
324 * Test MADV_REMOVE on shared mapping of hugetlb file
326 * madvise is same as hole punch and should free all pages.
328 if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_REMOVE)) {
332 validate_free_pages(free_hugepages);
333 (void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
336 * Test MADV_REMOVE on shared and private mapping of hugetlb file
338 if (fallocate(fd, 0, 0, NR_HUGE_PAGES * huge_page_size)) {
342 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
344 addr = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
345 PROT_READ | PROT_WRITE,
347 if (addr == MAP_FAILED) {
352 /* shared write should not consume any additional pages */
353 write_fault_pages(addr, NR_HUGE_PAGES);
354 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
356 addr2 = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
357 PROT_READ | PROT_WRITE,
359 if (addr2 == MAP_FAILED) {
364 /* private read should not consume any pages */
365 read_fault_pages(addr2, NR_HUGE_PAGES);
366 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
368 /* private write should consume additional pages */
369 write_fault_pages(addr2, NR_HUGE_PAGES);
370 validate_free_pages(free_hugepages - (2 * NR_HUGE_PAGES));
372 /* madvise of shared mapping should not free any pages */
373 if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
377 validate_free_pages(free_hugepages - (2 * NR_HUGE_PAGES));
379 /* madvise of private mapping should free private pages */
380 if (madvise(addr2, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
384 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
386 /* private write should consume additional pages again */
387 write_fault_pages(addr2, NR_HUGE_PAGES);
388 validate_free_pages(free_hugepages - (2 * NR_HUGE_PAGES));
391 * madvise should free both file and private pages although this is
392 * not correct. private pages should not be freed, but this is
393 * expected. See comment associated with FALLOC_FL_PUNCH_HOLE call.
395 if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_REMOVE)) {
399 validate_free_pages(free_hugepages);
401 (void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
402 (void)munmap(addr2, NR_HUGE_PAGES * huge_page_size);