Commit | Line | Data |
---|---|---|
2874c5fd | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
1da177e4 LT |
2 | /* internal.h: mm/ internal definitions |
3 | * | |
4 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
1da177e4 | 6 | */ |
0f8053a5 NP |
7 | #ifndef __MM_INTERNAL_H |
8 | #define __MM_INTERNAL_H | |
9 | ||
29f175d1 | 10 | #include <linux/fs.h> |
0f8053a5 | 11 | #include <linux/mm.h> |
e9b61f19 | 12 | #include <linux/pagemap.h> |
2aff7a47 | 13 | #include <linux/rmap.h> |
edf14cdb | 14 | #include <linux/tracepoint-defs.h> |
1da177e4 | 15 | |
0e499ed3 MWO |
16 | struct folio_batch; |
17 | ||
dd56b046 MG |
18 | /* |
19 | * The set of flags that only affect watermark checking and reclaim | |
20 | * behaviour. This is used by the MM to obey the caller constraints | |
21 | * about IO, FS and watermark checking while ignoring placement | |
22 | * hints such as HIGHMEM usage. | |
23 | */ | |
24 | #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | |
dcda9b04 | 25 | __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ |
e838a45f | 26 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ |
704687de | 27 | __GFP_ATOMIC|__GFP_NOLOCKDEP) |
dd56b046 MG |
28 | |
29 | /* The GFP flags allowed during early boot */ | |
30 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) | |
31 | ||
32 | /* Control allocation cpuset and node placement constraints */ | |
33 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | |
34 | ||
35 | /* Do not use these with a slab allocator */ | |
36 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) | |
37 | ||
3f913fc5 QZ |
38 | /* |
39 | * Different from WARN_ON_ONCE(), no warning will be issued | |
40 | * when we specify __GFP_NOWARN. | |
41 | */ | |
42 | #define WARN_ON_ONCE_GFP(cond, gfp) ({ \ | |
43 | static bool __section(".data.once") __warned; \ | |
44 | int __ret_warn_once = !!(cond); \ | |
45 | \ | |
46 | if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \ | |
47 | __warned = true; \ | |
48 | WARN_ON(1); \ | |
49 | } \ | |
50 | unlikely(__ret_warn_once); \ | |
51 | }) | |
52 | ||
62906027 NP |
53 | void page_writeback_init(void); |
54 | ||
eec20426 MWO |
55 | /* |
56 | * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages, | |
57 | * its nr_pages_mapped would be 0x400000: choose the COMPOUND_MAPPED bit | |
58 | * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently | |
59 | * leaves nr_pages_mapped at 0, but avoid surprise if it participates later. | |
60 | */ | |
61 | #define COMPOUND_MAPPED 0x800000 | |
62 | #define FOLIO_PAGES_MAPPED (COMPOUND_MAPPED - 1) | |
63 | ||
64 | /* | |
65 | * How many individual pages have an elevated _mapcount. Excludes | |
66 | * the folio's entire_mapcount. | |
67 | */ | |
68 | static inline int folio_nr_pages_mapped(struct folio *folio) | |
69 | { | |
70 | return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; | |
71 | } | |
72 | ||
64601000 MWO |
73 | static inline void *folio_raw_mapping(struct folio *folio) |
74 | { | |
75 | unsigned long mapping = (unsigned long)folio->mapping; | |
76 | ||
77 | return (void *)(mapping & ~PAGE_MAPPING_FLAGS); | |
78 | } | |
79 | ||
512b7931 | 80 | void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, |
8cd7c588 | 81 | int nr_throttled); |
512b7931 | 82 | static inline void acct_reclaim_writeback(struct folio *folio) |
8cd7c588 | 83 | { |
512b7931 | 84 | pg_data_t *pgdat = folio_pgdat(folio); |
8cd7c588 MG |
85 | int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled); |
86 | ||
87 | if (nr_throttled) | |
512b7931 | 88 | __acct_reclaim_writeback(pgdat, folio, nr_throttled); |
8cd7c588 MG |
89 | } |
90 | ||
d818fca1 MG |
91 | static inline void wake_throttle_isolated(pg_data_t *pgdat) |
92 | { | |
93 | wait_queue_head_t *wqh; | |
94 | ||
95 | wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED]; | |
96 | if (waitqueue_active(wqh)) | |
97 | wake_up(wqh); | |
98 | } | |
99 | ||
2b740303 | 100 | vm_fault_t do_swap_page(struct vm_fault *vmf); |
575ced1c | 101 | void folio_rotate_reclaimable(struct folio *folio); |
269ccca3 | 102 | bool __folio_end_writeback(struct folio *folio); |
261b6840 | 103 | void deactivate_file_folio(struct folio *folio); |
018ee47f | 104 | void folio_activate(struct folio *folio); |
8a966ed7 | 105 | |
763ecb03 LH |
106 | void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, |
107 | struct vm_area_struct *start_vma, unsigned long floor, | |
108 | unsigned long ceiling); | |
03c4f204 | 109 | void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); |
42b77728 | 110 | |
3506659e | 111 | struct zap_details; |
aac45363 MH |
112 | void unmap_page_range(struct mmu_gather *tlb, |
113 | struct vm_area_struct *vma, | |
114 | unsigned long addr, unsigned long end, | |
115 | struct zap_details *details); | |
116 | ||
56a4d67c MWO |
117 | void page_cache_ra_order(struct readahead_control *, struct file_ra_state *, |
118 | unsigned int order); | |
fcd9ae4f | 119 | void force_page_cache_ra(struct readahead_control *, unsigned long nr); |
7b3df3b9 DH |
120 | static inline void force_page_cache_readahead(struct address_space *mapping, |
121 | struct file *file, pgoff_t index, unsigned long nr_to_read) | |
122 | { | |
fcd9ae4f MWO |
123 | DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index); |
124 | force_page_cache_ra(&ractl, nr_to_read); | |
7b3df3b9 | 125 | } |
29f175d1 | 126 | |
3392ca12 | 127 | unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, |
51dcbdac | 128 | pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); |
9fb6beea | 129 | unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, |
0e499ed3 | 130 | pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); |
78f42660 | 131 | void filemap_free_folio(struct address_space *mapping, struct folio *folio); |
1e84a3d9 | 132 | int truncate_inode_folio(struct address_space *mapping, struct folio *folio); |
b9a8a419 MWO |
133 | bool truncate_inode_partial_folio(struct folio *folio, loff_t start, |
134 | loff_t end); | |
d6c75dc2 | 135 | long invalidate_inode_page(struct page *page); |
c56109dd MWO |
136 | unsigned long invalidate_mapping_pagevec(struct address_space *mapping, |
137 | pgoff_t start, pgoff_t end, unsigned long *nr_pagevec); | |
5c211ba2 | 138 | |
1eb6234e | 139 | /** |
3eed3ef5 MWO |
140 | * folio_evictable - Test whether a folio is evictable. |
141 | * @folio: The folio to test. | |
1eb6234e | 142 | * |
3eed3ef5 MWO |
143 | * Test whether @folio is evictable -- i.e., should be placed on |
144 | * active/inactive lists vs unevictable list. | |
1eb6234e | 145 | * |
3eed3ef5 MWO |
146 | * Reasons folio might not be evictable: |
147 | * 1. folio's mapping marked unevictable | |
148 | * 2. One of the pages in the folio is part of an mlocked VMA | |
1eb6234e | 149 | */ |
3eed3ef5 MWO |
150 | static inline bool folio_evictable(struct folio *folio) |
151 | { | |
152 | bool ret; | |
153 | ||
154 | /* Prevent address_space of inode and swap cache from being freed */ | |
155 | rcu_read_lock(); | |
156 | ret = !mapping_unevictable(folio_mapping(folio)) && | |
157 | !folio_test_mlocked(folio); | |
158 | rcu_read_unlock(); | |
159 | return ret; | |
160 | } | |
161 | ||
1eb6234e YS |
162 | static inline bool page_evictable(struct page *page) |
163 | { | |
164 | bool ret; | |
165 | ||
166 | /* Prevent address_space of inode and swap cache from being freed */ | |
167 | rcu_read_lock(); | |
168 | ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); | |
169 | rcu_read_unlock(); | |
170 | return ret; | |
171 | } | |
172 | ||
7835e98b | 173 | /* |
0139aa7b | 174 | * Turn a non-refcounted page (->_refcount == 0) into refcounted with |
7835e98b NP |
175 | * a count of one. |
176 | */ | |
177 | static inline void set_page_refcounted(struct page *page) | |
178 | { | |
309381fe | 179 | VM_BUG_ON_PAGE(PageTail(page), page); |
fe896d18 | 180 | VM_BUG_ON_PAGE(page_ref_count(page), page); |
77a8a788 | 181 | set_page_count(page, 1); |
77a8a788 NP |
182 | } |
183 | ||
03f6462a HD |
184 | extern unsigned long highest_memmap_pfn; |
185 | ||
c73322d0 JW |
186 | /* |
187 | * Maximum number of reclaim retries without progress before the OOM | |
188 | * killer is consider the only way forward. | |
189 | */ | |
190 | #define MAX_RECLAIM_RETRIES 16 | |
191 | ||
be4893d9 VB |
192 | /* |
193 | * in mm/early_ioremap.c | |
194 | */ | |
195 | pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr, | |
196 | unsigned long size, pgprot_t prot); | |
197 | ||
894bc310 LS |
198 | /* |
199 | * in mm/vmscan.c: | |
200 | */ | |
d1d8a3b4 MWO |
201 | int isolate_lru_page(struct page *page); |
202 | int folio_isolate_lru(struct folio *folio); | |
ca6d60f3 MWO |
203 | void putback_lru_page(struct page *page); |
204 | void folio_putback_lru(struct folio *folio); | |
c3f4a9a2 | 205 | extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason); |
62695a84 | 206 | |
6219049a BL |
207 | /* |
208 | * in mm/rmap.c: | |
209 | */ | |
50722804 | 210 | pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); |
6219049a | 211 | |
894bc310 LS |
212 | /* |
213 | * in mm/page_alloc.c | |
214 | */ | |
3c605096 | 215 | |
1a6d53a1 VB |
216 | /* |
217 | * Structure for holding the mostly immutable allocation parameters passed | |
218 | * between functions involved in allocations, including the alloc_pages* | |
219 | * family of functions. | |
220 | * | |
97a225e6 | 221 | * nodemask, migratetype and highest_zoneidx are initialized only once in |
84172f4b | 222 | * __alloc_pages() and then never change. |
1a6d53a1 | 223 | * |
97a225e6 | 224 | * zonelist, preferred_zone and highest_zoneidx are set first in |
84172f4b | 225 | * __alloc_pages() for the fast path, and might be later changed |
68956ccb | 226 | * in __alloc_pages_slowpath(). All other functions pass the whole structure |
1a6d53a1 VB |
227 | * by a const pointer. |
228 | */ | |
229 | struct alloc_context { | |
230 | struct zonelist *zonelist; | |
231 | nodemask_t *nodemask; | |
c33d6c06 | 232 | struct zoneref *preferred_zoneref; |
1a6d53a1 | 233 | int migratetype; |
97a225e6 JK |
234 | |
235 | /* | |
236 | * highest_zoneidx represents highest usable zone index of | |
237 | * the allocation request. Due to the nature of the zone, | |
238 | * memory on lower zone than the highest_zoneidx will be | |
239 | * protected by lowmem_reserve[highest_zoneidx]. | |
240 | * | |
241 | * highest_zoneidx is also used by reclaim/compaction to limit | |
242 | * the target zone since higher zone than this index cannot be | |
243 | * usable for this allocation request. | |
244 | */ | |
245 | enum zone_type highest_zoneidx; | |
c9ab0c4f | 246 | bool spread_dirty_pages; |
1a6d53a1 VB |
247 | }; |
248 | ||
8170ac47 ZY |
249 | /* |
250 | * This function returns the order of a free page in the buddy system. In | |
251 | * general, page_zone(page)->lock must be held by the caller to prevent the | |
252 | * page from being allocated in parallel and returning garbage as the order. | |
253 | * If a caller does not hold page_zone(page)->lock, it must guarantee that the | |
254 | * page cannot be allocated or merged in parallel. Alternatively, it must | |
255 | * handle invalid values gracefully, and use buddy_order_unsafe() below. | |
256 | */ | |
257 | static inline unsigned int buddy_order(struct page *page) | |
258 | { | |
259 | /* PageBuddy() must be checked by the caller */ | |
260 | return page_private(page); | |
261 | } | |
262 | ||
263 | /* | |
264 | * Like buddy_order(), but for callers who cannot afford to hold the zone lock. | |
265 | * PageBuddy() should be checked first by the caller to minimize race window, | |
266 | * and invalid values must be handled gracefully. | |
267 | * | |
268 | * READ_ONCE is used so that if the caller assigns the result into a local | |
269 | * variable and e.g. tests it for valid range before using, the compiler cannot | |
270 | * decide to remove the variable and inline the page_private(page) multiple | |
271 | * times, potentially observing different values in the tests and the actual | |
272 | * use of the result. | |
273 | */ | |
274 | #define buddy_order_unsafe(page) READ_ONCE(page_private(page)) | |
275 | ||
276 | /* | |
277 | * This function checks whether a page is free && is the buddy | |
278 | * we can coalesce a page and its buddy if | |
279 | * (a) the buddy is not in a hole (check before calling!) && | |
280 | * (b) the buddy is in the buddy system && | |
281 | * (c) a page and its buddy have the same order && | |
282 | * (d) a page and its buddy are in the same zone. | |
283 | * | |
284 | * For recording whether a page is in the buddy system, we set PageBuddy. | |
285 | * Setting, clearing, and testing PageBuddy is serialized by zone->lock. | |
286 | * | |
287 | * For recording page's order, we use page_private(page). | |
288 | */ | |
289 | static inline bool page_is_buddy(struct page *page, struct page *buddy, | |
290 | unsigned int order) | |
291 | { | |
292 | if (!page_is_guard(buddy) && !PageBuddy(buddy)) | |
293 | return false; | |
294 | ||
295 | if (buddy_order(buddy) != order) | |
296 | return false; | |
297 | ||
298 | /* | |
299 | * zone check is done late to avoid uselessly calculating | |
300 | * zone/node ids for pages that could never merge. | |
301 | */ | |
302 | if (page_zone_id(page) != page_zone_id(buddy)) | |
303 | return false; | |
304 | ||
305 | VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); | |
306 | ||
307 | return true; | |
308 | } | |
309 | ||
3c605096 JK |
310 | /* |
311 | * Locate the struct page for both the matching buddy in our | |
312 | * pair (buddy1) and the combined O(n+1) page they form (page). | |
313 | * | |
314 | * 1) Any buddy B1 will have an order O twin B2 which satisfies | |
315 | * the following equation: | |
316 | * B2 = B1 ^ (1 << O) | |
317 | * For example, if the starting buddy (buddy2) is #8 its order | |
318 | * 1 buddy is #10: | |
319 | * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 | |
320 | * | |
321 | * 2) Any buddy B will have an order O+1 parent P which | |
322 | * satisfies the following equation: | |
323 | * P = B & ~(1 << O) | |
324 | * | |
325 | * Assumption: *_mem_map is contiguous at least up to MAX_ORDER | |
326 | */ | |
327 | static inline unsigned long | |
76741e77 | 328 | __find_buddy_pfn(unsigned long page_pfn, unsigned int order) |
3c605096 | 329 | { |
76741e77 | 330 | return page_pfn ^ (1 << order); |
3c605096 JK |
331 | } |
332 | ||
8170ac47 ZY |
333 | /* |
334 | * Find the buddy of @page and validate it. | |
335 | * @page: The input page | |
336 | * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the | |
337 | * function is used in the performance-critical __free_one_page(). | |
338 | * @order: The order of the page | |
339 | * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to | |
340 | * page_to_pfn(). | |
341 | * | |
342 | * The found buddy can be a non PageBuddy, out of @page's zone, or its order is | |
343 | * not the same as @page. The validation is necessary before use it. | |
344 | * | |
345 | * Return: the found buddy page or NULL if not found. | |
346 | */ | |
347 | static inline struct page *find_buddy_page_pfn(struct page *page, | |
348 | unsigned long pfn, unsigned int order, unsigned long *buddy_pfn) | |
349 | { | |
350 | unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order); | |
351 | struct page *buddy; | |
352 | ||
353 | buddy = page + (__buddy_pfn - pfn); | |
354 | if (buddy_pfn) | |
355 | *buddy_pfn = __buddy_pfn; | |
356 | ||
357 | if (page_is_buddy(page, buddy, order)) | |
358 | return buddy; | |
359 | return NULL; | |
360 | } | |
361 | ||
7cf91a98 JK |
362 | extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, |
363 | unsigned long end_pfn, struct zone *zone); | |
364 | ||
365 | static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, | |
366 | unsigned long end_pfn, struct zone *zone) | |
367 | { | |
368 | if (zone->contiguous) | |
369 | return pfn_to_page(start_pfn); | |
370 | ||
371 | return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); | |
372 | } | |
373 | ||
3c605096 | 374 | extern int __isolate_free_page(struct page *page, unsigned int order); |
624f58d8 AD |
375 | extern void __putback_isolated_page(struct page *page, unsigned int order, |
376 | int mt); | |
7c2ee349 | 377 | extern void memblock_free_pages(struct page *page, unsigned long pfn, |
d70ddd7a | 378 | unsigned int order); |
a9cd410a | 379 | extern void __free_pages_core(struct page *page, unsigned int order); |
d00181b9 | 380 | extern void prep_compound_page(struct page *page, unsigned int order); |
46f24fd8 JK |
381 | extern void post_alloc_hook(struct page *page, unsigned int order, |
382 | gfp_t gfp_flags); | |
42aa83cb | 383 | extern int user_min_free_kbytes; |
20a0307c | 384 | |
44042b44 | 385 | extern void free_unref_page(struct page *page, unsigned int order); |
0966aeb4 MWO |
386 | extern void free_unref_page_list(struct list_head *list); |
387 | ||
68265390 | 388 | extern void zone_pcp_reset(struct zone *zone); |
ec6e8c7e VB |
389 | extern void zone_pcp_disable(struct zone *zone); |
390 | extern void zone_pcp_enable(struct zone *zone); | |
68265390 | 391 | |
c803b3c8 MR |
392 | extern void *memmap_alloc(phys_addr_t size, phys_addr_t align, |
393 | phys_addr_t min_addr, | |
394 | int nid, bool exact_nid); | |
395 | ||
86d28b07 ZY |
396 | int split_free_page(struct page *free_page, |
397 | unsigned int order, unsigned long split_pfn_offset); | |
b2c9e2fb | 398 | |
04a42e72 SK |
399 | /* |
400 | * This will have no effect, other than possibly generating a warning, if the | |
401 | * caller passes in a non-large folio. | |
402 | */ | |
403 | static inline void folio_set_order(struct folio *folio, unsigned int order) | |
404 | { | |
405 | if (WARN_ON_ONCE(!folio_test_large(folio))) | |
406 | return; | |
407 | ||
408 | folio->_folio_order = order; | |
409 | #ifdef CONFIG_64BIT | |
410 | /* | |
411 | * When hugetlb dissolves a folio, we need to clear the tail | |
412 | * page, rather than setting nr_pages to 1. | |
413 | */ | |
414 | folio->_folio_nr_pages = order ? 1U << order : 0; | |
415 | #endif | |
416 | } | |
417 | ||
ff9543fd MN |
418 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
419 | ||
420 | /* | |
421 | * in mm/compaction.c | |
422 | */ | |
423 | /* | |
424 | * compact_control is used to track pages being migrated and the free pages | |
425 | * they are being migrated to during memory compaction. The free_pfn starts | |
426 | * at the end of a zone and migrate_pfn begins at the start. Movable pages | |
427 | * are moved to the end of a zone during a compaction run and the run | |
428 | * completes when free_pfn <= migrate_pfn | |
429 | */ | |
430 | struct compact_control { | |
431 | struct list_head freepages; /* List of free pages to migrate to */ | |
432 | struct list_head migratepages; /* List of pages being migrated */ | |
c5fbd937 MG |
433 | unsigned int nr_freepages; /* Number of isolated free pages */ |
434 | unsigned int nr_migratepages; /* Number of pages to migrate */ | |
ff9543fd | 435 | unsigned long free_pfn; /* isolate_freepages search base */ |
c2ad7a1f OS |
436 | /* |
437 | * Acts as an in/out parameter to page isolation for migration. | |
438 | * isolate_migratepages uses it as a search base. | |
439 | * isolate_migratepages_block will update the value to the next pfn | |
440 | * after the last isolated one. | |
441 | */ | |
442 | unsigned long migrate_pfn; | |
70b44595 | 443 | unsigned long fast_start_pfn; /* a pfn to start linear scan from */ |
c5943b9c MG |
444 | struct zone *zone; |
445 | unsigned long total_migrate_scanned; | |
446 | unsigned long total_free_scanned; | |
dbe2d4e4 MG |
447 | unsigned short fast_search_fail;/* failures to use free list searches */ |
448 | short search_order; /* order to start a fast search at */ | |
f25ba6dc VB |
449 | const gfp_t gfp_mask; /* gfp mask of a direct compactor */ |
450 | int order; /* order a direct compactor needs */ | |
d39773a0 | 451 | int migratetype; /* migratetype of direct compactor */ |
f25ba6dc | 452 | const unsigned int alloc_flags; /* alloc flags of a direct compactor */ |
97a225e6 | 453 | const int highest_zoneidx; /* zone index of a direct compactor */ |
e0b9daeb | 454 | enum migrate_mode mode; /* Async or sync migration mode */ |
bb13ffeb | 455 | bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
2583d671 | 456 | bool no_set_skip_hint; /* Don't mark blocks for skipping */ |
9f7e3387 | 457 | bool ignore_block_suitable; /* Scan blocks considered unsuitable */ |
accf6242 | 458 | bool direct_compaction; /* False from kcompactd or /proc/... */ |
facdaa91 | 459 | bool proactive_compaction; /* kcompactd proactive compaction */ |
06ed2998 | 460 | bool whole_zone; /* Whole zone should/has been scanned */ |
d56c1584 | 461 | bool contended; /* Signal lock contention */ |
804d3121 | 462 | bool rescan; /* Rescanning the same pageblock */ |
b06eda09 | 463 | bool alloc_contig; /* alloc_contig_range allocation */ |
ff9543fd MN |
464 | }; |
465 | ||
5e1f0f09 MG |
466 | /* |
467 | * Used in direct compaction when a page should be taken from the freelists | |
468 | * immediately when one is created during the free path. | |
469 | */ | |
470 | struct capture_control { | |
471 | struct compact_control *cc; | |
472 | struct page *page; | |
473 | }; | |
474 | ||
ff9543fd | 475 | unsigned long |
bb13ffeb MG |
476 | isolate_freepages_range(struct compact_control *cc, |
477 | unsigned long start_pfn, unsigned long end_pfn); | |
c2ad7a1f | 478 | int |
edc2ca61 VB |
479 | isolate_migratepages_range(struct compact_control *cc, |
480 | unsigned long low_pfn, unsigned long end_pfn); | |
b2c9e2fb ZY |
481 | |
482 | int __alloc_contig_migrate_range(struct compact_control *cc, | |
483 | unsigned long start, unsigned long end); | |
ffd8f251 | 484 | #endif |
2149cdae JK |
485 | int find_suitable_fallback(struct free_area *area, unsigned int order, |
486 | int migratetype, bool only_stealable, bool *can_steal); | |
ff9543fd | 487 | |
30bdbb78 KK |
488 | /* |
489 | * These three helpers classifies VMAs for virtual memory accounting. | |
490 | */ | |
491 | ||
492 | /* | |
493 | * Executable code area - executable, not writable, not stack | |
494 | */ | |
d977d56c KK |
495 | static inline bool is_exec_mapping(vm_flags_t flags) |
496 | { | |
30bdbb78 | 497 | return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; |
d977d56c KK |
498 | } |
499 | ||
30bdbb78 | 500 | /* |
f0953a1b | 501 | * Stack area - automatically grows in one direction |
30bdbb78 KK |
502 | * |
503 | * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: | |
504 | * do_mmap() forbids all other combinations. | |
505 | */ | |
d977d56c KK |
506 | static inline bool is_stack_mapping(vm_flags_t flags) |
507 | { | |
30bdbb78 | 508 | return (flags & VM_STACK) == VM_STACK; |
d977d56c KK |
509 | } |
510 | ||
30bdbb78 KK |
511 | /* |
512 | * Data area - private, writable, not stack | |
513 | */ | |
d977d56c KK |
514 | static inline bool is_data_mapping(vm_flags_t flags) |
515 | { | |
30bdbb78 | 516 | return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; |
d977d56c KK |
517 | } |
518 | ||
6038def0 | 519 | /* mm/util.c */ |
e05b3453 | 520 | struct anon_vma *folio_anon_vma(struct folio *folio); |
6038def0 | 521 | |
af8e3354 | 522 | #ifdef CONFIG_MMU |
3506659e | 523 | void unmap_mapping_folio(struct folio *folio); |
fc05f566 | 524 | extern long populate_vma_page_range(struct vm_area_struct *vma, |
a78f1ccd | 525 | unsigned long start, unsigned long end, int *locked); |
4ca9b385 DH |
526 | extern long faultin_vma_page_range(struct vm_area_struct *vma, |
527 | unsigned long start, unsigned long end, | |
528 | bool write, int *locked); | |
6aeb2542 MR |
529 | extern int mlock_future_check(struct mm_struct *mm, unsigned long flags, |
530 | unsigned long len); | |
b291f000 | 531 | /* |
cea86fe2 HD |
532 | * mlock_vma_page() and munlock_vma_page(): |
533 | * should be called with vma's mmap_lock held for read or write, | |
534 | * under page table lock for the pte/pmd being added or removed. | |
b291f000 | 535 | * |
96f97c43 LS |
536 | * mlock is usually called at the end of page_add_*_rmap(), munlock at |
537 | * the end of page_remove_rmap(); but new anon folios are managed by | |
538 | * folio_add_lru_vma() calling mlock_new_folio(). | |
cea86fe2 HD |
539 | * |
540 | * @compound is used to include pmd mappings of THPs, but filter out | |
541 | * pte mappings of THPs, which cannot be consistently counted: a pte | |
542 | * mapping of the THP head cannot be distinguished by the page alone. | |
b291f000 | 543 | */ |
dcc5d337 MWO |
544 | void mlock_folio(struct folio *folio); |
545 | static inline void mlock_vma_folio(struct folio *folio, | |
cea86fe2 HD |
546 | struct vm_area_struct *vma, bool compound) |
547 | { | |
c8263bd6 HD |
548 | /* |
549 | * The VM_SPECIAL check here serves two purposes. | |
550 | * 1) VM_IO check prevents migration from double-counting during mlock. | |
551 | * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED | |
552 | * is never left set on a VM_SPECIAL vma, there is an interval while | |
553 | * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may | |
554 | * still be set while VM_SPECIAL bits are added: so ignore it then. | |
555 | */ | |
556 | if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) && | |
dcc5d337 MWO |
557 | (compound || !folio_test_large(folio))) |
558 | mlock_folio(folio); | |
559 | } | |
560 | ||
561 | static inline void mlock_vma_page(struct page *page, | |
562 | struct vm_area_struct *vma, bool compound) | |
563 | { | |
564 | mlock_vma_folio(page_folio(page), vma, compound); | |
cea86fe2 | 565 | } |
dcc5d337 | 566 | |
96f97c43 LS |
567 | void munlock_folio(struct folio *folio); |
568 | ||
569 | static inline void munlock_vma_folio(struct folio *folio, | |
cea86fe2 HD |
570 | struct vm_area_struct *vma, bool compound) |
571 | { | |
572 | if (unlikely(vma->vm_flags & VM_LOCKED) && | |
96f97c43 LS |
573 | (compound || !folio_test_large(folio))) |
574 | munlock_folio(folio); | |
575 | } | |
576 | ||
577 | static inline void munlock_vma_page(struct page *page, | |
578 | struct vm_area_struct *vma, bool compound) | |
579 | { | |
580 | munlock_vma_folio(page_folio(page), vma, compound); | |
cea86fe2 | 581 | } |
96f97c43 LS |
582 | void mlock_new_folio(struct folio *folio); |
583 | bool need_mlock_drain(int cpu); | |
584 | void mlock_drain_local(void); | |
585 | void mlock_drain_remote(int cpu); | |
b291f000 | 586 | |
f55e1014 | 587 | extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); |
b32967ff | 588 | |
e9b61f19 | 589 | /* |
6a8e0596 MS |
590 | * Return the start of user virtual address at the specific offset within |
591 | * a vma. | |
e9b61f19 KS |
592 | */ |
593 | static inline unsigned long | |
6a8e0596 MS |
594 | vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages, |
595 | struct vm_area_struct *vma) | |
e9b61f19 | 596 | { |
494334e4 HD |
597 | unsigned long address; |
598 | ||
494334e4 HD |
599 | if (pgoff >= vma->vm_pgoff) { |
600 | address = vma->vm_start + | |
601 | ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | |
602 | /* Check for address beyond vma (or wrapped through 0?) */ | |
603 | if (address < vma->vm_start || address >= vma->vm_end) | |
604 | address = -EFAULT; | |
6a8e0596 | 605 | } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) { |
494334e4 HD |
606 | /* Test above avoids possibility of wrap to 0 on 32-bit */ |
607 | address = vma->vm_start; | |
608 | } else { | |
609 | address = -EFAULT; | |
610 | } | |
611 | return address; | |
e9b61f19 KS |
612 | } |
613 | ||
6a8e0596 MS |
614 | /* |
615 | * Return the start of user virtual address of a page within a vma. | |
616 | * Returns -EFAULT if all of the page is outside the range of vma. | |
617 | * If page is a compound head, the entire compound page is considered. | |
618 | */ | |
619 | static inline unsigned long | |
620 | vma_address(struct page *page, struct vm_area_struct *vma) | |
621 | { | |
622 | VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ | |
623 | return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma); | |
624 | } | |
625 | ||
494334e4 | 626 | /* |
2aff7a47 | 627 | * Then at what user virtual address will none of the range be found in vma? |
494334e4 | 628 | * Assumes that vma_address() already returned a good starting address. |
494334e4 | 629 | */ |
2aff7a47 | 630 | static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw) |
e9b61f19 | 631 | { |
2aff7a47 | 632 | struct vm_area_struct *vma = pvmw->vma; |
494334e4 HD |
633 | pgoff_t pgoff; |
634 | unsigned long address; | |
635 | ||
2aff7a47 MWO |
636 | /* Common case, plus ->pgoff is invalid for KSM */ |
637 | if (pvmw->nr_pages == 1) | |
638 | return pvmw->address + PAGE_SIZE; | |
639 | ||
640 | pgoff = pvmw->pgoff + pvmw->nr_pages; | |
494334e4 HD |
641 | address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); |
642 | /* Check for address beyond vma (or wrapped through 0?) */ | |
643 | if (address < vma->vm_start || address > vma->vm_end) | |
644 | address = vma->vm_end; | |
645 | return address; | |
e9b61f19 KS |
646 | } |
647 | ||
89b15332 JW |
648 | static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, |
649 | struct file *fpin) | |
650 | { | |
651 | int flags = vmf->flags; | |
652 | ||
653 | if (fpin) | |
654 | return fpin; | |
655 | ||
656 | /* | |
657 | * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or | |
c1e8d7c6 | 658 | * anything, so we only pin the file and drop the mmap_lock if only |
4064b982 | 659 | * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt. |
89b15332 | 660 | */ |
4064b982 PX |
661 | if (fault_flag_allow_retry_first(flags) && |
662 | !(flags & FAULT_FLAG_RETRY_NOWAIT)) { | |
89b15332 | 663 | fpin = get_file(vmf->vma->vm_file); |
d8ed45c5 | 664 | mmap_read_unlock(vmf->vma->vm_mm); |
89b15332 JW |
665 | } |
666 | return fpin; | |
667 | } | |
af8e3354 | 668 | #else /* !CONFIG_MMU */ |
3506659e | 669 | static inline void unmap_mapping_folio(struct folio *folio) { } |
cea86fe2 HD |
670 | static inline void mlock_vma_page(struct page *page, |
671 | struct vm_area_struct *vma, bool compound) { } | |
672 | static inline void munlock_vma_page(struct page *page, | |
673 | struct vm_area_struct *vma, bool compound) { } | |
96f97c43 LS |
674 | static inline void mlock_new_folio(struct folio *folio) { } |
675 | static inline bool need_mlock_drain(int cpu) { return false; } | |
676 | static inline void mlock_drain_local(void) { } | |
677 | static inline void mlock_drain_remote(int cpu) { } | |
4ad0ae8c NP |
678 | static inline void vunmap_range_noflush(unsigned long start, unsigned long end) |
679 | { | |
680 | } | |
af8e3354 | 681 | #endif /* !CONFIG_MMU */ |
894bc310 | 682 | |
6b74ab97 MG |
683 | /* Memory initialisation debug and verification */ |
684 | enum mminit_level { | |
685 | MMINIT_WARNING, | |
686 | MMINIT_VERIFY, | |
687 | MMINIT_TRACE | |
688 | }; | |
689 | ||
690 | #ifdef CONFIG_DEBUG_MEMORY_INIT | |
691 | ||
692 | extern int mminit_loglevel; | |
693 | ||
694 | #define mminit_dprintk(level, prefix, fmt, arg...) \ | |
695 | do { \ | |
696 | if (level < mminit_loglevel) { \ | |
fc5199d1 | 697 | if (level <= MMINIT_WARNING) \ |
1170532b | 698 | pr_warn("mminit::" prefix " " fmt, ##arg); \ |
fc5199d1 RV |
699 | else \ |
700 | printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ | |
6b74ab97 MG |
701 | } \ |
702 | } while (0) | |
703 | ||
708614e6 | 704 | extern void mminit_verify_pageflags_layout(void); |
68ad8df4 | 705 | extern void mminit_verify_zonelist(void); |
6b74ab97 MG |
706 | #else |
707 | ||
708 | static inline void mminit_dprintk(enum mminit_level level, | |
709 | const char *prefix, const char *fmt, ...) | |
710 | { | |
711 | } | |
712 | ||
708614e6 MG |
713 | static inline void mminit_verify_pageflags_layout(void) |
714 | { | |
715 | } | |
716 | ||
68ad8df4 MG |
717 | static inline void mminit_verify_zonelist(void) |
718 | { | |
719 | } | |
6b74ab97 | 720 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
2dbb51c4 | 721 | |
a5f5f91d MG |
722 | #define NODE_RECLAIM_NOSCAN -2 |
723 | #define NODE_RECLAIM_FULL -1 | |
724 | #define NODE_RECLAIM_SOME 0 | |
725 | #define NODE_RECLAIM_SUCCESS 1 | |
7c116f2b | 726 | |
8b09549c WY |
727 | #ifdef CONFIG_NUMA |
728 | extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); | |
79c28a41 | 729 | extern int find_next_best_node(int node, nodemask_t *used_node_mask); |
8b09549c WY |
730 | #else |
731 | static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, | |
732 | unsigned int order) | |
733 | { | |
734 | return NODE_RECLAIM_NOSCAN; | |
735 | } | |
79c28a41 DH |
736 | static inline int find_next_best_node(int node, nodemask_t *used_node_mask) |
737 | { | |
738 | return NUMA_NO_NODE; | |
739 | } | |
8b09549c WY |
740 | #endif |
741 | ||
60f272f6 | 742 | /* |
743 | * mm/memory-failure.c | |
744 | */ | |
31d3d348 WF |
745 | extern int hwpoison_filter(struct page *p); |
746 | ||
7c116f2b WF |
747 | extern u32 hwpoison_filter_dev_major; |
748 | extern u32 hwpoison_filter_dev_minor; | |
478c5ffc WF |
749 | extern u64 hwpoison_filter_flags_mask; |
750 | extern u64 hwpoison_filter_flags_value; | |
4fd466eb | 751 | extern u64 hwpoison_filter_memcg; |
1bfe5feb | 752 | extern u32 hwpoison_filter_enable; |
eb36c587 | 753 | |
dc0ef0df | 754 | extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, |
eb36c587 | 755 | unsigned long, unsigned long, |
9fbeb5ab | 756 | unsigned long, unsigned long); |
ca57df79 XQ |
757 | |
758 | extern void set_pageblock_order(void); | |
730ec8c0 | 759 | unsigned int reclaim_clean_pages_from_list(struct zone *zone, |
02c6de8d | 760 | struct list_head *page_list); |
d95ea5d1 BZ |
761 | /* The ALLOC_WMARK bits are used as an index to zone->watermark */ |
762 | #define ALLOC_WMARK_MIN WMARK_MIN | |
763 | #define ALLOC_WMARK_LOW WMARK_LOW | |
764 | #define ALLOC_WMARK_HIGH WMARK_HIGH | |
765 | #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ | |
766 | ||
767 | /* Mask to get the watermark bits */ | |
768 | #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) | |
769 | ||
cd04ae1e MH |
770 | /* |
771 | * Only MMU archs have async oom victim reclaim - aka oom_reaper so we | |
772 | * cannot assume a reduced access to memory reserves is sufficient for | |
773 | * !MMU | |
774 | */ | |
775 | #ifdef CONFIG_MMU | |
776 | #define ALLOC_OOM 0x08 | |
777 | #else | |
778 | #define ALLOC_OOM ALLOC_NO_WATERMARKS | |
779 | #endif | |
780 | ||
6bb15450 MG |
781 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ |
782 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ | |
783 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ | |
784 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ | |
785 | #ifdef CONFIG_ZONE_DMA32 | |
786 | #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */ | |
787 | #else | |
788 | #define ALLOC_NOFRAGMENT 0x0 | |
789 | #endif | |
736838e9 | 790 | #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */ |
d95ea5d1 | 791 | |
72b252ae MG |
792 | enum ttu_flags; |
793 | struct tlbflush_unmap_batch; | |
794 | ||
ce612879 MH |
795 | |
796 | /* | |
797 | * only for MM internal work items which do not depend on | |
798 | * any allocations or locks which might depend on allocations | |
799 | */ | |
800 | extern struct workqueue_struct *mm_percpu_wq; | |
801 | ||
72b252ae MG |
802 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
803 | void try_to_unmap_flush(void); | |
d950c947 | 804 | void try_to_unmap_flush_dirty(void); |
3ea27719 | 805 | void flush_tlb_batched_pending(struct mm_struct *mm); |
72b252ae MG |
806 | #else |
807 | static inline void try_to_unmap_flush(void) | |
808 | { | |
809 | } | |
d950c947 MG |
810 | static inline void try_to_unmap_flush_dirty(void) |
811 | { | |
812 | } | |
3ea27719 MG |
813 | static inline void flush_tlb_batched_pending(struct mm_struct *mm) |
814 | { | |
815 | } | |
72b252ae | 816 | #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ |
edf14cdb VB |
817 | |
818 | extern const struct trace_print_flags pageflag_names[]; | |
819 | extern const struct trace_print_flags vmaflag_names[]; | |
820 | extern const struct trace_print_flags gfpflag_names[]; | |
821 | ||
a6ffdc07 XQ |
822 | static inline bool is_migrate_highatomic(enum migratetype migratetype) |
823 | { | |
824 | return migratetype == MIGRATE_HIGHATOMIC; | |
825 | } | |
826 | ||
827 | static inline bool is_migrate_highatomic_page(struct page *page) | |
828 | { | |
829 | return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC; | |
830 | } | |
831 | ||
72675e13 | 832 | void setup_zone_pageset(struct zone *zone); |
19fc7bed JK |
833 | |
834 | struct migration_target_control { | |
835 | int nid; /* preferred node id */ | |
836 | nodemask_t *nmask; | |
837 | gfp_t gfp_mask; | |
838 | }; | |
839 | ||
b67177ec NP |
840 | /* |
841 | * mm/vmalloc.c | |
842 | */ | |
4ad0ae8c | 843 | #ifdef CONFIG_MMU |
b67177ec NP |
844 | int vmap_pages_range_noflush(unsigned long addr, unsigned long end, |
845 | pgprot_t prot, struct page **pages, unsigned int page_shift); | |
4ad0ae8c NP |
846 | #else |
847 | static inline | |
848 | int vmap_pages_range_noflush(unsigned long addr, unsigned long end, | |
849 | pgprot_t prot, struct page **pages, unsigned int page_shift) | |
850 | { | |
851 | return -EINVAL; | |
852 | } | |
853 | #endif | |
854 | ||
b073d7f8 AP |
855 | int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, |
856 | pgprot_t prot, struct page **pages, | |
857 | unsigned int page_shift); | |
858 | ||
4ad0ae8c | 859 | void vunmap_range_noflush(unsigned long start, unsigned long end); |
b67177ec | 860 | |
b073d7f8 AP |
861 | void __vunmap_range_noflush(unsigned long start, unsigned long end); |
862 | ||
f4c0d836 YS |
863 | int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, |
864 | unsigned long addr, int page_nid, int *flags); | |
865 | ||
27674ef6 | 866 | void free_zone_device_page(struct page *page); |
b05a79d4 | 867 | int migrate_device_coherent_page(struct page *page); |
27674ef6 | 868 | |
ece1ed7b MWO |
869 | /* |
870 | * mm/gup.c | |
871 | */ | |
872 | struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags); | |
873 | ||
902c2d91 MW |
874 | extern bool mirrored_kernelcore; |
875 | ||
76aefad6 PX |
876 | static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma) |
877 | { | |
878 | /* | |
879 | * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty | |
880 | * enablements, because when without soft-dirty being compiled in, | |
881 | * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY) | |
882 | * will be constantly true. | |
883 | */ | |
884 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) | |
885 | return false; | |
886 | ||
887 | /* | |
888 | * Soft-dirty is kind of special: its tracking is enabled when the | |
889 | * vma flags not set. | |
890 | */ | |
891 | return !(vma->vm_flags & VM_SOFTDIRTY); | |
892 | } | |
893 | ||
db971418 | 894 | #endif /* __MM_INTERNAL_H */ |