Commit | Line | Data |
---|---|---|
2874c5fd | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
1da177e4 LT |
2 | /* internal.h: mm/ internal definitions |
3 | * | |
4 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
1da177e4 | 6 | */ |
0f8053a5 NP |
7 | #ifndef __MM_INTERNAL_H |
8 | #define __MM_INTERNAL_H | |
9 | ||
29f175d1 | 10 | #include <linux/fs.h> |
0f8053a5 | 11 | #include <linux/mm.h> |
e9b61f19 | 12 | #include <linux/pagemap.h> |
edf14cdb | 13 | #include <linux/tracepoint-defs.h> |
1da177e4 | 14 | |
dd56b046 MG |
15 | /* |
16 | * The set of flags that only affect watermark checking and reclaim | |
17 | * behaviour. This is used by the MM to obey the caller constraints | |
18 | * about IO, FS and watermark checking while ignoring placement | |
19 | * hints such as HIGHMEM usage. | |
20 | */ | |
21 | #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | |
dcda9b04 | 22 | __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ |
e838a45f MG |
23 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ |
24 | __GFP_ATOMIC) | |
dd56b046 MG |
25 | |
26 | /* The GFP flags allowed during early boot */ | |
27 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) | |
28 | ||
29 | /* Control allocation cpuset and node placement constraints */ | |
30 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | |
31 | ||
32 | /* Do not use these with a slab allocator */ | |
33 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) | |
34 | ||
62906027 NP |
35 | void page_writeback_init(void); |
36 | ||
64601000 MWO |
37 | static inline void *folio_raw_mapping(struct folio *folio) |
38 | { | |
39 | unsigned long mapping = (unsigned long)folio->mapping; | |
40 | ||
41 | return (void *)(mapping & ~PAGE_MAPPING_FLAGS); | |
42 | } | |
43 | ||
512b7931 | 44 | void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, |
8cd7c588 | 45 | int nr_throttled); |
512b7931 | 46 | static inline void acct_reclaim_writeback(struct folio *folio) |
8cd7c588 | 47 | { |
512b7931 | 48 | pg_data_t *pgdat = folio_pgdat(folio); |
8cd7c588 MG |
49 | int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled); |
50 | ||
51 | if (nr_throttled) | |
512b7931 | 52 | __acct_reclaim_writeback(pgdat, folio, nr_throttled); |
8cd7c588 MG |
53 | } |
54 | ||
d818fca1 MG |
55 | static inline void wake_throttle_isolated(pg_data_t *pgdat) |
56 | { | |
57 | wait_queue_head_t *wqh; | |
58 | ||
59 | wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED]; | |
60 | if (waitqueue_active(wqh)) | |
61 | wake_up(wqh); | |
62 | } | |
63 | ||
2b740303 | 64 | vm_fault_t do_swap_page(struct vm_fault *vmf); |
575ced1c | 65 | void folio_rotate_reclaimable(struct folio *folio); |
269ccca3 | 66 | bool __folio_end_writeback(struct folio *folio); |
8a966ed7 | 67 | |
42b77728 JB |
68 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, |
69 | unsigned long floor, unsigned long ceiling); | |
03c4f204 | 70 | void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); |
42b77728 | 71 | |
9c276cc6 | 72 | static inline bool can_madv_lru_vma(struct vm_area_struct *vma) |
23519073 KS |
73 | { |
74 | return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); | |
75 | } | |
76 | ||
3506659e | 77 | struct zap_details; |
aac45363 MH |
78 | void unmap_page_range(struct mmu_gather *tlb, |
79 | struct vm_area_struct *vma, | |
80 | unsigned long addr, unsigned long end, | |
81 | struct zap_details *details); | |
82 | ||
7b3df3b9 DH |
83 | void do_page_cache_ra(struct readahead_control *, unsigned long nr_to_read, |
84 | unsigned long lookahead_size); | |
fcd9ae4f | 85 | void force_page_cache_ra(struct readahead_control *, unsigned long nr); |
7b3df3b9 DH |
86 | static inline void force_page_cache_readahead(struct address_space *mapping, |
87 | struct file *file, pgoff_t index, unsigned long nr_to_read) | |
88 | { | |
fcd9ae4f MWO |
89 | DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index); |
90 | force_page_cache_ra(&ractl, nr_to_read); | |
7b3df3b9 | 91 | } |
29f175d1 | 92 | |
5c211ba2 MWO |
93 | unsigned find_lock_entries(struct address_space *mapping, pgoff_t start, |
94 | pgoff_t end, struct pagevec *pvec, pgoff_t *indices); | |
1e84a3d9 | 95 | int truncate_inode_folio(struct address_space *mapping, struct folio *folio); |
5c211ba2 | 96 | |
1eb6234e | 97 | /** |
3eed3ef5 MWO |
98 | * folio_evictable - Test whether a folio is evictable. |
99 | * @folio: The folio to test. | |
1eb6234e | 100 | * |
3eed3ef5 MWO |
101 | * Test whether @folio is evictable -- i.e., should be placed on |
102 | * active/inactive lists vs unevictable list. | |
1eb6234e | 103 | * |
3eed3ef5 MWO |
104 | * Reasons folio might not be evictable: |
105 | * 1. folio's mapping marked unevictable | |
106 | * 2. One of the pages in the folio is part of an mlocked VMA | |
1eb6234e | 107 | */ |
3eed3ef5 MWO |
108 | static inline bool folio_evictable(struct folio *folio) |
109 | { | |
110 | bool ret; | |
111 | ||
112 | /* Prevent address_space of inode and swap cache from being freed */ | |
113 | rcu_read_lock(); | |
114 | ret = !mapping_unevictable(folio_mapping(folio)) && | |
115 | !folio_test_mlocked(folio); | |
116 | rcu_read_unlock(); | |
117 | return ret; | |
118 | } | |
119 | ||
1eb6234e YS |
120 | static inline bool page_evictable(struct page *page) |
121 | { | |
122 | bool ret; | |
123 | ||
124 | /* Prevent address_space of inode and swap cache from being freed */ | |
125 | rcu_read_lock(); | |
126 | ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); | |
127 | rcu_read_unlock(); | |
128 | return ret; | |
129 | } | |
130 | ||
7835e98b | 131 | /* |
0139aa7b | 132 | * Turn a non-refcounted page (->_refcount == 0) into refcounted with |
7835e98b NP |
133 | * a count of one. |
134 | */ | |
135 | static inline void set_page_refcounted(struct page *page) | |
136 | { | |
309381fe | 137 | VM_BUG_ON_PAGE(PageTail(page), page); |
fe896d18 | 138 | VM_BUG_ON_PAGE(page_ref_count(page), page); |
77a8a788 | 139 | set_page_count(page, 1); |
77a8a788 NP |
140 | } |
141 | ||
03f6462a HD |
142 | extern unsigned long highest_memmap_pfn; |
143 | ||
c73322d0 JW |
144 | /* |
145 | * Maximum number of reclaim retries without progress before the OOM | |
146 | * killer is consider the only way forward. | |
147 | */ | |
148 | #define MAX_RECLAIM_RETRIES 16 | |
149 | ||
894bc310 LS |
150 | /* |
151 | * in mm/vmscan.c: | |
152 | */ | |
62695a84 | 153 | extern int isolate_lru_page(struct page *page); |
894bc310 | 154 | extern void putback_lru_page(struct page *page); |
c3f4a9a2 | 155 | extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason); |
62695a84 | 156 | |
6219049a BL |
157 | /* |
158 | * in mm/rmap.c: | |
159 | */ | |
160 | extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); | |
161 | ||
494c1dfe WL |
162 | /* |
163 | * in mm/memcontrol.c: | |
164 | */ | |
165 | extern bool cgroup_memory_nokmem; | |
166 | ||
894bc310 LS |
167 | /* |
168 | * in mm/page_alloc.c | |
169 | */ | |
3c605096 | 170 | |
1a6d53a1 VB |
171 | /* |
172 | * Structure for holding the mostly immutable allocation parameters passed | |
173 | * between functions involved in allocations, including the alloc_pages* | |
174 | * family of functions. | |
175 | * | |
97a225e6 | 176 | * nodemask, migratetype and highest_zoneidx are initialized only once in |
84172f4b | 177 | * __alloc_pages() and then never change. |
1a6d53a1 | 178 | * |
97a225e6 | 179 | * zonelist, preferred_zone and highest_zoneidx are set first in |
84172f4b | 180 | * __alloc_pages() for the fast path, and might be later changed |
68956ccb | 181 | * in __alloc_pages_slowpath(). All other functions pass the whole structure |
1a6d53a1 VB |
182 | * by a const pointer. |
183 | */ | |
184 | struct alloc_context { | |
185 | struct zonelist *zonelist; | |
186 | nodemask_t *nodemask; | |
c33d6c06 | 187 | struct zoneref *preferred_zoneref; |
1a6d53a1 | 188 | int migratetype; |
97a225e6 JK |
189 | |
190 | /* | |
191 | * highest_zoneidx represents highest usable zone index of | |
192 | * the allocation request. Due to the nature of the zone, | |
193 | * memory on lower zone than the highest_zoneidx will be | |
194 | * protected by lowmem_reserve[highest_zoneidx]. | |
195 | * | |
196 | * highest_zoneidx is also used by reclaim/compaction to limit | |
197 | * the target zone since higher zone than this index cannot be | |
198 | * usable for this allocation request. | |
199 | */ | |
200 | enum zone_type highest_zoneidx; | |
c9ab0c4f | 201 | bool spread_dirty_pages; |
1a6d53a1 VB |
202 | }; |
203 | ||
3c605096 JK |
204 | /* |
205 | * Locate the struct page for both the matching buddy in our | |
206 | * pair (buddy1) and the combined O(n+1) page they form (page). | |
207 | * | |
208 | * 1) Any buddy B1 will have an order O twin B2 which satisfies | |
209 | * the following equation: | |
210 | * B2 = B1 ^ (1 << O) | |
211 | * For example, if the starting buddy (buddy2) is #8 its order | |
212 | * 1 buddy is #10: | |
213 | * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 | |
214 | * | |
215 | * 2) Any buddy B will have an order O+1 parent P which | |
216 | * satisfies the following equation: | |
217 | * P = B & ~(1 << O) | |
218 | * | |
219 | * Assumption: *_mem_map is contiguous at least up to MAX_ORDER | |
220 | */ | |
221 | static inline unsigned long | |
76741e77 | 222 | __find_buddy_pfn(unsigned long page_pfn, unsigned int order) |
3c605096 | 223 | { |
76741e77 | 224 | return page_pfn ^ (1 << order); |
3c605096 JK |
225 | } |
226 | ||
7cf91a98 JK |
227 | extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, |
228 | unsigned long end_pfn, struct zone *zone); | |
229 | ||
230 | static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, | |
231 | unsigned long end_pfn, struct zone *zone) | |
232 | { | |
233 | if (zone->contiguous) | |
234 | return pfn_to_page(start_pfn); | |
235 | ||
236 | return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); | |
237 | } | |
238 | ||
3c605096 | 239 | extern int __isolate_free_page(struct page *page, unsigned int order); |
624f58d8 AD |
240 | extern void __putback_isolated_page(struct page *page, unsigned int order, |
241 | int mt); | |
7c2ee349 | 242 | extern void memblock_free_pages(struct page *page, unsigned long pfn, |
d70ddd7a | 243 | unsigned int order); |
a9cd410a | 244 | extern void __free_pages_core(struct page *page, unsigned int order); |
d00181b9 | 245 | extern void prep_compound_page(struct page *page, unsigned int order); |
46f24fd8 JK |
246 | extern void post_alloc_hook(struct page *page, unsigned int order, |
247 | gfp_t gfp_flags); | |
42aa83cb | 248 | extern int user_min_free_kbytes; |
20a0307c | 249 | |
44042b44 | 250 | extern void free_unref_page(struct page *page, unsigned int order); |
0966aeb4 MWO |
251 | extern void free_unref_page_list(struct list_head *list); |
252 | ||
04f8cfea | 253 | extern void zone_pcp_update(struct zone *zone, int cpu_online); |
68265390 | 254 | extern void zone_pcp_reset(struct zone *zone); |
ec6e8c7e VB |
255 | extern void zone_pcp_disable(struct zone *zone); |
256 | extern void zone_pcp_enable(struct zone *zone); | |
68265390 | 257 | |
c803b3c8 MR |
258 | extern void *memmap_alloc(phys_addr_t size, phys_addr_t align, |
259 | phys_addr_t min_addr, | |
260 | int nid, bool exact_nid); | |
261 | ||
ff9543fd MN |
262 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
263 | ||
264 | /* | |
265 | * in mm/compaction.c | |
266 | */ | |
267 | /* | |
268 | * compact_control is used to track pages being migrated and the free pages | |
269 | * they are being migrated to during memory compaction. The free_pfn starts | |
270 | * at the end of a zone and migrate_pfn begins at the start. Movable pages | |
271 | * are moved to the end of a zone during a compaction run and the run | |
272 | * completes when free_pfn <= migrate_pfn | |
273 | */ | |
274 | struct compact_control { | |
275 | struct list_head freepages; /* List of free pages to migrate to */ | |
276 | struct list_head migratepages; /* List of pages being migrated */ | |
c5fbd937 MG |
277 | unsigned int nr_freepages; /* Number of isolated free pages */ |
278 | unsigned int nr_migratepages; /* Number of pages to migrate */ | |
ff9543fd | 279 | unsigned long free_pfn; /* isolate_freepages search base */ |
c2ad7a1f OS |
280 | /* |
281 | * Acts as an in/out parameter to page isolation for migration. | |
282 | * isolate_migratepages uses it as a search base. | |
283 | * isolate_migratepages_block will update the value to the next pfn | |
284 | * after the last isolated one. | |
285 | */ | |
286 | unsigned long migrate_pfn; | |
70b44595 | 287 | unsigned long fast_start_pfn; /* a pfn to start linear scan from */ |
c5943b9c MG |
288 | struct zone *zone; |
289 | unsigned long total_migrate_scanned; | |
290 | unsigned long total_free_scanned; | |
dbe2d4e4 MG |
291 | unsigned short fast_search_fail;/* failures to use free list searches */ |
292 | short search_order; /* order to start a fast search at */ | |
f25ba6dc VB |
293 | const gfp_t gfp_mask; /* gfp mask of a direct compactor */ |
294 | int order; /* order a direct compactor needs */ | |
d39773a0 | 295 | int migratetype; /* migratetype of direct compactor */ |
f25ba6dc | 296 | const unsigned int alloc_flags; /* alloc flags of a direct compactor */ |
97a225e6 | 297 | const int highest_zoneidx; /* zone index of a direct compactor */ |
e0b9daeb | 298 | enum migrate_mode mode; /* Async or sync migration mode */ |
bb13ffeb | 299 | bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
2583d671 | 300 | bool no_set_skip_hint; /* Don't mark blocks for skipping */ |
9f7e3387 | 301 | bool ignore_block_suitable; /* Scan blocks considered unsuitable */ |
accf6242 | 302 | bool direct_compaction; /* False from kcompactd or /proc/... */ |
facdaa91 | 303 | bool proactive_compaction; /* kcompactd proactive compaction */ |
06ed2998 | 304 | bool whole_zone; /* Whole zone should/has been scanned */ |
c3486f53 | 305 | bool contended; /* Signal lock or sched contention */ |
804d3121 | 306 | bool rescan; /* Rescanning the same pageblock */ |
b06eda09 | 307 | bool alloc_contig; /* alloc_contig_range allocation */ |
ff9543fd MN |
308 | }; |
309 | ||
5e1f0f09 MG |
310 | /* |
311 | * Used in direct compaction when a page should be taken from the freelists | |
312 | * immediately when one is created during the free path. | |
313 | */ | |
314 | struct capture_control { | |
315 | struct compact_control *cc; | |
316 | struct page *page; | |
317 | }; | |
318 | ||
ff9543fd | 319 | unsigned long |
bb13ffeb MG |
320 | isolate_freepages_range(struct compact_control *cc, |
321 | unsigned long start_pfn, unsigned long end_pfn); | |
c2ad7a1f | 322 | int |
edc2ca61 VB |
323 | isolate_migratepages_range(struct compact_control *cc, |
324 | unsigned long low_pfn, unsigned long end_pfn); | |
ffd8f251 | 325 | #endif |
2149cdae JK |
326 | int find_suitable_fallback(struct free_area *area, unsigned int order, |
327 | int migratetype, bool only_stealable, bool *can_steal); | |
ff9543fd | 328 | |
48f13bf3 | 329 | /* |
6c14466c MG |
330 | * This function returns the order of a free page in the buddy system. In |
331 | * general, page_zone(page)->lock must be held by the caller to prevent the | |
332 | * page from being allocated in parallel and returning garbage as the order. | |
333 | * If a caller does not hold page_zone(page)->lock, it must guarantee that the | |
99c0fd5e | 334 | * page cannot be allocated or merged in parallel. Alternatively, it must |
ab130f91 | 335 | * handle invalid values gracefully, and use buddy_order_unsafe() below. |
48f13bf3 | 336 | */ |
ab130f91 | 337 | static inline unsigned int buddy_order(struct page *page) |
48f13bf3 | 338 | { |
572438f9 | 339 | /* PageBuddy() must be checked by the caller */ |
48f13bf3 MG |
340 | return page_private(page); |
341 | } | |
b5a0e011 | 342 | |
99c0fd5e | 343 | /* |
ab130f91 | 344 | * Like buddy_order(), but for callers who cannot afford to hold the zone lock. |
99c0fd5e VB |
345 | * PageBuddy() should be checked first by the caller to minimize race window, |
346 | * and invalid values must be handled gracefully. | |
347 | * | |
4db0c3c2 | 348 | * READ_ONCE is used so that if the caller assigns the result into a local |
99c0fd5e VB |
349 | * variable and e.g. tests it for valid range before using, the compiler cannot |
350 | * decide to remove the variable and inline the page_private(page) multiple | |
351 | * times, potentially observing different values in the tests and the actual | |
352 | * use of the result. | |
353 | */ | |
ab130f91 | 354 | #define buddy_order_unsafe(page) READ_ONCE(page_private(page)) |
99c0fd5e | 355 | |
30bdbb78 KK |
356 | /* |
357 | * These three helpers classifies VMAs for virtual memory accounting. | |
358 | */ | |
359 | ||
360 | /* | |
361 | * Executable code area - executable, not writable, not stack | |
362 | */ | |
d977d56c KK |
363 | static inline bool is_exec_mapping(vm_flags_t flags) |
364 | { | |
30bdbb78 | 365 | return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; |
d977d56c KK |
366 | } |
367 | ||
30bdbb78 | 368 | /* |
f0953a1b | 369 | * Stack area - automatically grows in one direction |
30bdbb78 KK |
370 | * |
371 | * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: | |
372 | * do_mmap() forbids all other combinations. | |
373 | */ | |
d977d56c KK |
374 | static inline bool is_stack_mapping(vm_flags_t flags) |
375 | { | |
30bdbb78 | 376 | return (flags & VM_STACK) == VM_STACK; |
d977d56c KK |
377 | } |
378 | ||
30bdbb78 KK |
379 | /* |
380 | * Data area - private, writable, not stack | |
381 | */ | |
d977d56c KK |
382 | static inline bool is_data_mapping(vm_flags_t flags) |
383 | { | |
30bdbb78 | 384 | return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; |
d977d56c KK |
385 | } |
386 | ||
6038def0 NK |
387 | /* mm/util.c */ |
388 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, | |
aba6dfb7 | 389 | struct vm_area_struct *prev); |
1b9fc5b2 | 390 | void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma); |
6038def0 | 391 | |
af8e3354 | 392 | #ifdef CONFIG_MMU |
3506659e | 393 | void unmap_mapping_folio(struct folio *folio); |
fc05f566 | 394 | extern long populate_vma_page_range(struct vm_area_struct *vma, |
a78f1ccd | 395 | unsigned long start, unsigned long end, int *locked); |
4ca9b385 DH |
396 | extern long faultin_vma_page_range(struct vm_area_struct *vma, |
397 | unsigned long start, unsigned long end, | |
398 | bool write, int *locked); | |
af8e3354 HD |
399 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, |
400 | unsigned long start, unsigned long end); | |
401 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | |
402 | { | |
403 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); | |
404 | } | |
405 | ||
b291f000 | 406 | /* |
c1e8d7c6 | 407 | * must be called with vma's mmap_lock held for read or write, and page locked. |
b291f000 NP |
408 | */ |
409 | extern void mlock_vma_page(struct page *page); | |
ff6a6da6 | 410 | extern unsigned int munlock_vma_page(struct page *page); |
b291f000 | 411 | |
6aeb2542 MR |
412 | extern int mlock_future_check(struct mm_struct *mm, unsigned long flags, |
413 | unsigned long len); | |
414 | ||
b291f000 NP |
415 | /* |
416 | * Clear the page's PageMlocked(). This can be useful in a situation where | |
417 | * we want to unconditionally remove a page from the pagecache -- e.g., | |
418 | * on truncation or freeing. | |
419 | * | |
420 | * It is legal to call this function for any page, mlocked or not. | |
421 | * If called for a page that is still mapped by mlocked vmas, all we do | |
422 | * is revert to lazy LRU behaviour -- semantics are not broken. | |
423 | */ | |
e6c509f8 | 424 | extern void clear_page_mlock(struct page *page); |
b291f000 | 425 | |
f55e1014 | 426 | extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); |
b32967ff | 427 | |
e9b61f19 | 428 | /* |
494334e4 HD |
429 | * At what user virtual address is page expected in vma? |
430 | * Returns -EFAULT if all of the page is outside the range of vma. | |
431 | * If page is a compound head, the entire compound page is considered. | |
e9b61f19 KS |
432 | */ |
433 | static inline unsigned long | |
494334e4 | 434 | vma_address(struct page *page, struct vm_area_struct *vma) |
e9b61f19 | 435 | { |
494334e4 HD |
436 | pgoff_t pgoff; |
437 | unsigned long address; | |
438 | ||
439 | VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ | |
440 | pgoff = page_to_pgoff(page); | |
441 | if (pgoff >= vma->vm_pgoff) { | |
442 | address = vma->vm_start + | |
443 | ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | |
444 | /* Check for address beyond vma (or wrapped through 0?) */ | |
445 | if (address < vma->vm_start || address >= vma->vm_end) | |
446 | address = -EFAULT; | |
447 | } else if (PageHead(page) && | |
448 | pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) { | |
449 | /* Test above avoids possibility of wrap to 0 on 32-bit */ | |
450 | address = vma->vm_start; | |
451 | } else { | |
452 | address = -EFAULT; | |
453 | } | |
454 | return address; | |
e9b61f19 KS |
455 | } |
456 | ||
494334e4 HD |
457 | /* |
458 | * Then at what user virtual address will none of the page be found in vma? | |
459 | * Assumes that vma_address() already returned a good starting address. | |
460 | * If page is a compound head, the entire compound page is considered. | |
461 | */ | |
e9b61f19 | 462 | static inline unsigned long |
494334e4 | 463 | vma_address_end(struct page *page, struct vm_area_struct *vma) |
e9b61f19 | 464 | { |
494334e4 HD |
465 | pgoff_t pgoff; |
466 | unsigned long address; | |
467 | ||
468 | VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ | |
469 | pgoff = page_to_pgoff(page) + compound_nr(page); | |
470 | address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | |
471 | /* Check for address beyond vma (or wrapped through 0?) */ | |
472 | if (address < vma->vm_start || address > vma->vm_end) | |
473 | address = vma->vm_end; | |
474 | return address; | |
e9b61f19 KS |
475 | } |
476 | ||
89b15332 JW |
477 | static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, |
478 | struct file *fpin) | |
479 | { | |
480 | int flags = vmf->flags; | |
481 | ||
482 | if (fpin) | |
483 | return fpin; | |
484 | ||
485 | /* | |
486 | * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or | |
c1e8d7c6 | 487 | * anything, so we only pin the file and drop the mmap_lock if only |
4064b982 | 488 | * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt. |
89b15332 | 489 | */ |
4064b982 PX |
490 | if (fault_flag_allow_retry_first(flags) && |
491 | !(flags & FAULT_FLAG_RETRY_NOWAIT)) { | |
89b15332 | 492 | fpin = get_file(vmf->vma->vm_file); |
d8ed45c5 | 493 | mmap_read_unlock(vmf->vma->vm_mm); |
89b15332 JW |
494 | } |
495 | return fpin; | |
496 | } | |
af8e3354 | 497 | #else /* !CONFIG_MMU */ |
3506659e | 498 | static inline void unmap_mapping_folio(struct folio *folio) { } |
b291f000 NP |
499 | static inline void clear_page_mlock(struct page *page) { } |
500 | static inline void mlock_vma_page(struct page *page) { } | |
4ad0ae8c NP |
501 | static inline void vunmap_range_noflush(unsigned long start, unsigned long end) |
502 | { | |
503 | } | |
af8e3354 | 504 | #endif /* !CONFIG_MMU */ |
894bc310 | 505 | |
69d177c2 AW |
506 | /* |
507 | * Return the mem_map entry representing the 'offset' subpage within | |
508 | * the maximally aligned gigantic page 'base'. Handle any discontiguity | |
509 | * in the mem_map at MAX_ORDER_NR_PAGES boundaries. | |
510 | */ | |
511 | static inline struct page *mem_map_offset(struct page *base, int offset) | |
512 | { | |
513 | if (unlikely(offset >= MAX_ORDER_NR_PAGES)) | |
bc7f84c0 | 514 | return nth_page(base, offset); |
69d177c2 AW |
515 | return base + offset; |
516 | } | |
517 | ||
518 | /* | |
25985edc | 519 | * Iterator over all subpages within the maximally aligned gigantic |
69d177c2 AW |
520 | * page 'base'. Handle any discontiguity in the mem_map. |
521 | */ | |
522 | static inline struct page *mem_map_next(struct page *iter, | |
523 | struct page *base, int offset) | |
524 | { | |
525 | if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { | |
526 | unsigned long pfn = page_to_pfn(base) + offset; | |
527 | if (!pfn_valid(pfn)) | |
528 | return NULL; | |
529 | return pfn_to_page(pfn); | |
530 | } | |
531 | return iter + 1; | |
532 | } | |
533 | ||
6b74ab97 MG |
534 | /* Memory initialisation debug and verification */ |
535 | enum mminit_level { | |
536 | MMINIT_WARNING, | |
537 | MMINIT_VERIFY, | |
538 | MMINIT_TRACE | |
539 | }; | |
540 | ||
541 | #ifdef CONFIG_DEBUG_MEMORY_INIT | |
542 | ||
543 | extern int mminit_loglevel; | |
544 | ||
545 | #define mminit_dprintk(level, prefix, fmt, arg...) \ | |
546 | do { \ | |
547 | if (level < mminit_loglevel) { \ | |
fc5199d1 | 548 | if (level <= MMINIT_WARNING) \ |
1170532b | 549 | pr_warn("mminit::" prefix " " fmt, ##arg); \ |
fc5199d1 RV |
550 | else \ |
551 | printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ | |
6b74ab97 MG |
552 | } \ |
553 | } while (0) | |
554 | ||
708614e6 | 555 | extern void mminit_verify_pageflags_layout(void); |
68ad8df4 | 556 | extern void mminit_verify_zonelist(void); |
6b74ab97 MG |
557 | #else |
558 | ||
559 | static inline void mminit_dprintk(enum mminit_level level, | |
560 | const char *prefix, const char *fmt, ...) | |
561 | { | |
562 | } | |
563 | ||
708614e6 MG |
564 | static inline void mminit_verify_pageflags_layout(void) |
565 | { | |
566 | } | |
567 | ||
68ad8df4 MG |
568 | static inline void mminit_verify_zonelist(void) |
569 | { | |
570 | } | |
6b74ab97 | 571 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
2dbb51c4 MG |
572 | |
573 | /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ | |
574 | #if defined(CONFIG_SPARSEMEM) | |
575 | extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
576 | unsigned long *end_pfn); | |
577 | #else | |
578 | static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
579 | unsigned long *end_pfn) | |
580 | { | |
581 | } | |
582 | #endif /* CONFIG_SPARSEMEM */ | |
583 | ||
a5f5f91d MG |
584 | #define NODE_RECLAIM_NOSCAN -2 |
585 | #define NODE_RECLAIM_FULL -1 | |
586 | #define NODE_RECLAIM_SOME 0 | |
587 | #define NODE_RECLAIM_SUCCESS 1 | |
7c116f2b | 588 | |
8b09549c WY |
589 | #ifdef CONFIG_NUMA |
590 | extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); | |
79c28a41 | 591 | extern int find_next_best_node(int node, nodemask_t *used_node_mask); |
8b09549c WY |
592 | #else |
593 | static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, | |
594 | unsigned int order) | |
595 | { | |
596 | return NODE_RECLAIM_NOSCAN; | |
597 | } | |
79c28a41 DH |
598 | static inline int find_next_best_node(int node, nodemask_t *used_node_mask) |
599 | { | |
600 | return NUMA_NO_NODE; | |
601 | } | |
8b09549c WY |
602 | #endif |
603 | ||
31d3d348 WF |
604 | extern int hwpoison_filter(struct page *p); |
605 | ||
7c116f2b WF |
606 | extern u32 hwpoison_filter_dev_major; |
607 | extern u32 hwpoison_filter_dev_minor; | |
478c5ffc WF |
608 | extern u64 hwpoison_filter_flags_mask; |
609 | extern u64 hwpoison_filter_flags_value; | |
4fd466eb | 610 | extern u64 hwpoison_filter_memcg; |
1bfe5feb | 611 | extern u32 hwpoison_filter_enable; |
eb36c587 | 612 | |
dc0ef0df | 613 | extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, |
eb36c587 | 614 | unsigned long, unsigned long, |
9fbeb5ab | 615 | unsigned long, unsigned long); |
ca57df79 XQ |
616 | |
617 | extern void set_pageblock_order(void); | |
730ec8c0 | 618 | unsigned int reclaim_clean_pages_from_list(struct zone *zone, |
02c6de8d | 619 | struct list_head *page_list); |
d95ea5d1 BZ |
620 | /* The ALLOC_WMARK bits are used as an index to zone->watermark */ |
621 | #define ALLOC_WMARK_MIN WMARK_MIN | |
622 | #define ALLOC_WMARK_LOW WMARK_LOW | |
623 | #define ALLOC_WMARK_HIGH WMARK_HIGH | |
624 | #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ | |
625 | ||
626 | /* Mask to get the watermark bits */ | |
627 | #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) | |
628 | ||
cd04ae1e MH |
629 | /* |
630 | * Only MMU archs have async oom victim reclaim - aka oom_reaper so we | |
631 | * cannot assume a reduced access to memory reserves is sufficient for | |
632 | * !MMU | |
633 | */ | |
634 | #ifdef CONFIG_MMU | |
635 | #define ALLOC_OOM 0x08 | |
636 | #else | |
637 | #define ALLOC_OOM ALLOC_NO_WATERMARKS | |
638 | #endif | |
639 | ||
6bb15450 MG |
640 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ |
641 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ | |
642 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ | |
643 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ | |
644 | #ifdef CONFIG_ZONE_DMA32 | |
645 | #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */ | |
646 | #else | |
647 | #define ALLOC_NOFRAGMENT 0x0 | |
648 | #endif | |
736838e9 | 649 | #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */ |
d95ea5d1 | 650 | |
72b252ae MG |
651 | enum ttu_flags; |
652 | struct tlbflush_unmap_batch; | |
653 | ||
ce612879 MH |
654 | |
655 | /* | |
656 | * only for MM internal work items which do not depend on | |
657 | * any allocations or locks which might depend on allocations | |
658 | */ | |
659 | extern struct workqueue_struct *mm_percpu_wq; | |
660 | ||
72b252ae MG |
661 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
662 | void try_to_unmap_flush(void); | |
d950c947 | 663 | void try_to_unmap_flush_dirty(void); |
3ea27719 | 664 | void flush_tlb_batched_pending(struct mm_struct *mm); |
72b252ae MG |
665 | #else |
666 | static inline void try_to_unmap_flush(void) | |
667 | { | |
668 | } | |
d950c947 MG |
669 | static inline void try_to_unmap_flush_dirty(void) |
670 | { | |
671 | } | |
3ea27719 MG |
672 | static inline void flush_tlb_batched_pending(struct mm_struct *mm) |
673 | { | |
674 | } | |
72b252ae | 675 | #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ |
edf14cdb VB |
676 | |
677 | extern const struct trace_print_flags pageflag_names[]; | |
678 | extern const struct trace_print_flags vmaflag_names[]; | |
679 | extern const struct trace_print_flags gfpflag_names[]; | |
680 | ||
a6ffdc07 XQ |
681 | static inline bool is_migrate_highatomic(enum migratetype migratetype) |
682 | { | |
683 | return migratetype == MIGRATE_HIGHATOMIC; | |
684 | } | |
685 | ||
686 | static inline bool is_migrate_highatomic_page(struct page *page) | |
687 | { | |
688 | return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC; | |
689 | } | |
690 | ||
72675e13 | 691 | void setup_zone_pageset(struct zone *zone); |
19fc7bed JK |
692 | |
693 | struct migration_target_control { | |
694 | int nid; /* preferred node id */ | |
695 | nodemask_t *nmask; | |
696 | gfp_t gfp_mask; | |
697 | }; | |
698 | ||
b67177ec NP |
699 | /* |
700 | * mm/vmalloc.c | |
701 | */ | |
4ad0ae8c | 702 | #ifdef CONFIG_MMU |
b67177ec NP |
703 | int vmap_pages_range_noflush(unsigned long addr, unsigned long end, |
704 | pgprot_t prot, struct page **pages, unsigned int page_shift); | |
4ad0ae8c NP |
705 | #else |
706 | static inline | |
707 | int vmap_pages_range_noflush(unsigned long addr, unsigned long end, | |
708 | pgprot_t prot, struct page **pages, unsigned int page_shift) | |
709 | { | |
710 | return -EINVAL; | |
711 | } | |
712 | #endif | |
713 | ||
714 | void vunmap_range_noflush(unsigned long start, unsigned long end); | |
b67177ec | 715 | |
f4c0d836 YS |
716 | int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, |
717 | unsigned long addr, int page_nid, int *flags); | |
718 | ||
db971418 | 719 | #endif /* __MM_INTERNAL_H */ |