| 1 | /* internal.h: mm/ internal definitions |
| 2 | * |
| 3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | #ifndef __MM_INTERNAL_H |
| 12 | #define __MM_INTERNAL_H |
| 13 | |
| 14 | #include <linux/mm.h> |
| 15 | |
| 16 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, |
| 17 | unsigned long floor, unsigned long ceiling); |
| 18 | |
| 19 | static inline void set_page_count(struct page *page, int v) |
| 20 | { |
| 21 | atomic_set(&page->_count, v); |
| 22 | } |
| 23 | |
| 24 | /* |
| 25 | * Turn a non-refcounted page (->_count == 0) into refcounted with |
| 26 | * a count of one. |
| 27 | */ |
| 28 | static inline void set_page_refcounted(struct page *page) |
| 29 | { |
| 30 | VM_BUG_ON(PageTail(page)); |
| 31 | VM_BUG_ON(atomic_read(&page->_count)); |
| 32 | set_page_count(page, 1); |
| 33 | } |
| 34 | |
| 35 | static inline void __put_page(struct page *page) |
| 36 | { |
| 37 | atomic_dec(&page->_count); |
| 38 | } |
| 39 | |
| 40 | static inline void __get_page_tail_foll(struct page *page, |
| 41 | bool get_page_head) |
| 42 | { |
| 43 | /* |
| 44 | * If we're getting a tail page, the elevated page->_count is |
| 45 | * required only in the head page and we will elevate the head |
| 46 | * page->_count and tail page->_mapcount. |
| 47 | * |
| 48 | * We elevate page_tail->_mapcount for tail pages to force |
| 49 | * page_tail->_count to be zero at all times to avoid getting |
| 50 | * false positives from get_page_unless_zero() with |
| 51 | * speculative page access (like in |
| 52 | * page_cache_get_speculative()) on tail pages. |
| 53 | */ |
| 54 | VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); |
| 55 | VM_BUG_ON(atomic_read(&page->_count) != 0); |
| 56 | VM_BUG_ON(page_mapcount(page) < 0); |
| 57 | if (get_page_head) |
| 58 | atomic_inc(&page->first_page->_count); |
| 59 | atomic_inc(&page->_mapcount); |
| 60 | } |
| 61 | |
| 62 | /* |
| 63 | * This is meant to be called as the FOLL_GET operation of |
| 64 | * follow_page() and it must be called while holding the proper PT |
| 65 | * lock while the pte (or pmd_trans_huge) is still mapping the page. |
| 66 | */ |
| 67 | static inline void get_page_foll(struct page *page) |
| 68 | { |
| 69 | if (unlikely(PageTail(page))) |
| 70 | /* |
| 71 | * This is safe only because |
| 72 | * __split_huge_page_refcount() can't run under |
| 73 | * get_page_foll() because we hold the proper PT lock. |
| 74 | */ |
| 75 | __get_page_tail_foll(page, true); |
| 76 | else { |
| 77 | /* |
| 78 | * Getting a normal page or the head of a compound page |
| 79 | * requires to already have an elevated page->_count. |
| 80 | */ |
| 81 | VM_BUG_ON(atomic_read(&page->_count) <= 0); |
| 82 | atomic_inc(&page->_count); |
| 83 | } |
| 84 | } |
| 85 | |
| 86 | extern unsigned long highest_memmap_pfn; |
| 87 | |
| 88 | /* |
| 89 | * in mm/vmscan.c: |
| 90 | */ |
| 91 | extern int isolate_lru_page(struct page *page); |
| 92 | extern void putback_lru_page(struct page *page); |
| 93 | |
| 94 | /* |
| 95 | * in mm/rmap.c: |
| 96 | */ |
| 97 | extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); |
| 98 | |
| 99 | /* |
| 100 | * in mm/page_alloc.c |
| 101 | */ |
| 102 | extern void __free_pages_bootmem(struct page *page, unsigned int order); |
| 103 | extern void prep_compound_page(struct page *page, unsigned long order); |
| 104 | #ifdef CONFIG_MEMORY_FAILURE |
| 105 | extern bool is_free_buddy_page(struct page *page); |
| 106 | #endif |
| 107 | |
| 108 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
| 109 | |
| 110 | /* |
| 111 | * in mm/compaction.c |
| 112 | */ |
| 113 | /* |
| 114 | * compact_control is used to track pages being migrated and the free pages |
| 115 | * they are being migrated to during memory compaction. The free_pfn starts |
| 116 | * at the end of a zone and migrate_pfn begins at the start. Movable pages |
| 117 | * are moved to the end of a zone during a compaction run and the run |
| 118 | * completes when free_pfn <= migrate_pfn |
| 119 | */ |
| 120 | struct compact_control { |
| 121 | struct list_head freepages; /* List of free pages to migrate to */ |
| 122 | struct list_head migratepages; /* List of pages being migrated */ |
| 123 | unsigned long nr_freepages; /* Number of isolated free pages */ |
| 124 | unsigned long nr_migratepages; /* Number of pages to migrate */ |
| 125 | unsigned long free_pfn; /* isolate_freepages search base */ |
| 126 | unsigned long migrate_pfn; /* isolate_migratepages search base */ |
| 127 | bool sync; /* Synchronous migration */ |
| 128 | bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
| 129 | bool finished_update_free; /* True when the zone cached pfns are |
| 130 | * no longer being updated |
| 131 | */ |
| 132 | bool finished_update_migrate; |
| 133 | |
| 134 | int order; /* order a direct compactor needs */ |
| 135 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ |
| 136 | struct zone *zone; |
| 137 | bool contended; /* True if a lock was contended */ |
| 138 | struct page **page; /* Page captured of requested size */ |
| 139 | }; |
| 140 | |
| 141 | unsigned long |
| 142 | isolate_freepages_range(struct compact_control *cc, |
| 143 | unsigned long start_pfn, unsigned long end_pfn); |
| 144 | unsigned long |
| 145 | isolate_migratepages_range(struct zone *zone, struct compact_control *cc, |
| 146 | unsigned long low_pfn, unsigned long end_pfn, bool unevictable); |
| 147 | |
| 148 | #endif |
| 149 | |
| 150 | /* |
| 151 | * function for dealing with page's order in buddy system. |
| 152 | * zone->lock is already acquired when we use these. |
| 153 | * So, we don't need atomic page->flags operations here. |
| 154 | */ |
| 155 | static inline unsigned long page_order(struct page *page) |
| 156 | { |
| 157 | /* PageBuddy() must be checked by the caller */ |
| 158 | return page_private(page); |
| 159 | } |
| 160 | |
| 161 | /* mm/util.c */ |
| 162 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
| 163 | struct vm_area_struct *prev, struct rb_node *rb_parent); |
| 164 | |
| 165 | #ifdef CONFIG_MMU |
| 166 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, |
| 167 | unsigned long start, unsigned long end); |
| 168 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, |
| 169 | unsigned long start, unsigned long end); |
| 170 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) |
| 171 | { |
| 172 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); |
| 173 | } |
| 174 | |
| 175 | /* |
| 176 | * Called only in fault path, to determine if a new page is being |
| 177 | * mapped into a LOCKED vma. If it is, mark page as mlocked. |
| 178 | */ |
| 179 | static inline int mlocked_vma_newpage(struct vm_area_struct *vma, |
| 180 | struct page *page) |
| 181 | { |
| 182 | VM_BUG_ON(PageLRU(page)); |
| 183 | |
| 184 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) |
| 185 | return 0; |
| 186 | |
| 187 | if (!TestSetPageMlocked(page)) { |
| 188 | mod_zone_page_state(page_zone(page), NR_MLOCK, |
| 189 | hpage_nr_pages(page)); |
| 190 | count_vm_event(UNEVICTABLE_PGMLOCKED); |
| 191 | } |
| 192 | return 1; |
| 193 | } |
| 194 | |
| 195 | /* |
| 196 | * must be called with vma's mmap_sem held for read or write, and page locked. |
| 197 | */ |
| 198 | extern void mlock_vma_page(struct page *page); |
| 199 | extern void munlock_vma_page(struct page *page); |
| 200 | |
| 201 | /* |
| 202 | * Clear the page's PageMlocked(). This can be useful in a situation where |
| 203 | * we want to unconditionally remove a page from the pagecache -- e.g., |
| 204 | * on truncation or freeing. |
| 205 | * |
| 206 | * It is legal to call this function for any page, mlocked or not. |
| 207 | * If called for a page that is still mapped by mlocked vmas, all we do |
| 208 | * is revert to lazy LRU behaviour -- semantics are not broken. |
| 209 | */ |
| 210 | extern void clear_page_mlock(struct page *page); |
| 211 | |
| 212 | /* |
| 213 | * mlock_migrate_page - called only from migrate_page_copy() to |
| 214 | * migrate the Mlocked page flag; update statistics. |
| 215 | */ |
| 216 | static inline void mlock_migrate_page(struct page *newpage, struct page *page) |
| 217 | { |
| 218 | if (TestClearPageMlocked(page)) { |
| 219 | unsigned long flags; |
| 220 | int nr_pages = hpage_nr_pages(page); |
| 221 | |
| 222 | local_irq_save(flags); |
| 223 | __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
| 224 | SetPageMlocked(newpage); |
| 225 | __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); |
| 226 | local_irq_restore(flags); |
| 227 | } |
| 228 | } |
| 229 | |
| 230 | extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); |
| 231 | |
| 232 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 233 | extern unsigned long vma_address(struct page *page, |
| 234 | struct vm_area_struct *vma); |
| 235 | #endif |
| 236 | #else /* !CONFIG_MMU */ |
| 237 | static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p) |
| 238 | { |
| 239 | return 0; |
| 240 | } |
| 241 | static inline void clear_page_mlock(struct page *page) { } |
| 242 | static inline void mlock_vma_page(struct page *page) { } |
| 243 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
| 244 | |
| 245 | #endif /* !CONFIG_MMU */ |
| 246 | |
| 247 | /* |
| 248 | * Return the mem_map entry representing the 'offset' subpage within |
| 249 | * the maximally aligned gigantic page 'base'. Handle any discontiguity |
| 250 | * in the mem_map at MAX_ORDER_NR_PAGES boundaries. |
| 251 | */ |
| 252 | static inline struct page *mem_map_offset(struct page *base, int offset) |
| 253 | { |
| 254 | if (unlikely(offset >= MAX_ORDER_NR_PAGES)) |
| 255 | return pfn_to_page(page_to_pfn(base) + offset); |
| 256 | return base + offset; |
| 257 | } |
| 258 | |
| 259 | /* |
| 260 | * Iterator over all subpages within the maximally aligned gigantic |
| 261 | * page 'base'. Handle any discontiguity in the mem_map. |
| 262 | */ |
| 263 | static inline struct page *mem_map_next(struct page *iter, |
| 264 | struct page *base, int offset) |
| 265 | { |
| 266 | if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { |
| 267 | unsigned long pfn = page_to_pfn(base) + offset; |
| 268 | if (!pfn_valid(pfn)) |
| 269 | return NULL; |
| 270 | return pfn_to_page(pfn); |
| 271 | } |
| 272 | return iter + 1; |
| 273 | } |
| 274 | |
| 275 | /* |
| 276 | * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, |
| 277 | * so all functions starting at paging_init should be marked __init |
| 278 | * in those cases. SPARSEMEM, however, allows for memory hotplug, |
| 279 | * and alloc_bootmem_node is not used. |
| 280 | */ |
| 281 | #ifdef CONFIG_SPARSEMEM |
| 282 | #define __paginginit __meminit |
| 283 | #else |
| 284 | #define __paginginit __init |
| 285 | #endif |
| 286 | |
| 287 | /* Memory initialisation debug and verification */ |
| 288 | enum mminit_level { |
| 289 | MMINIT_WARNING, |
| 290 | MMINIT_VERIFY, |
| 291 | MMINIT_TRACE |
| 292 | }; |
| 293 | |
| 294 | #ifdef CONFIG_DEBUG_MEMORY_INIT |
| 295 | |
| 296 | extern int mminit_loglevel; |
| 297 | |
| 298 | #define mminit_dprintk(level, prefix, fmt, arg...) \ |
| 299 | do { \ |
| 300 | if (level < mminit_loglevel) { \ |
| 301 | printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \ |
| 302 | printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \ |
| 303 | } \ |
| 304 | } while (0) |
| 305 | |
| 306 | extern void mminit_verify_pageflags_layout(void); |
| 307 | extern void mminit_verify_page_links(struct page *page, |
| 308 | enum zone_type zone, unsigned long nid, unsigned long pfn); |
| 309 | extern void mminit_verify_zonelist(void); |
| 310 | |
| 311 | #else |
| 312 | |
| 313 | static inline void mminit_dprintk(enum mminit_level level, |
| 314 | const char *prefix, const char *fmt, ...) |
| 315 | { |
| 316 | } |
| 317 | |
| 318 | static inline void mminit_verify_pageflags_layout(void) |
| 319 | { |
| 320 | } |
| 321 | |
| 322 | static inline void mminit_verify_page_links(struct page *page, |
| 323 | enum zone_type zone, unsigned long nid, unsigned long pfn) |
| 324 | { |
| 325 | } |
| 326 | |
| 327 | static inline void mminit_verify_zonelist(void) |
| 328 | { |
| 329 | } |
| 330 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
| 331 | |
| 332 | /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ |
| 333 | #if defined(CONFIG_SPARSEMEM) |
| 334 | extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
| 335 | unsigned long *end_pfn); |
| 336 | #else |
| 337 | static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
| 338 | unsigned long *end_pfn) |
| 339 | { |
| 340 | } |
| 341 | #endif /* CONFIG_SPARSEMEM */ |
| 342 | |
| 343 | #define ZONE_RECLAIM_NOSCAN -2 |
| 344 | #define ZONE_RECLAIM_FULL -1 |
| 345 | #define ZONE_RECLAIM_SOME 0 |
| 346 | #define ZONE_RECLAIM_SUCCESS 1 |
| 347 | |
| 348 | extern int hwpoison_filter(struct page *p); |
| 349 | |
| 350 | extern u32 hwpoison_filter_dev_major; |
| 351 | extern u32 hwpoison_filter_dev_minor; |
| 352 | extern u64 hwpoison_filter_flags_mask; |
| 353 | extern u64 hwpoison_filter_flags_value; |
| 354 | extern u64 hwpoison_filter_memcg; |
| 355 | extern u32 hwpoison_filter_enable; |
| 356 | |
| 357 | extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, |
| 358 | unsigned long, unsigned long, |
| 359 | unsigned long, unsigned long); |
| 360 | |
| 361 | extern void set_pageblock_order(void); |
| 362 | unsigned long reclaim_clean_pages_from_list(struct zone *zone, |
| 363 | struct list_head *page_list); |
| 364 | /* The ALLOC_WMARK bits are used as an index to zone->watermark */ |
| 365 | #define ALLOC_WMARK_MIN WMARK_MIN |
| 366 | #define ALLOC_WMARK_LOW WMARK_LOW |
| 367 | #define ALLOC_WMARK_HIGH WMARK_HIGH |
| 368 | #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ |
| 369 | |
| 370 | /* Mask to get the watermark bits */ |
| 371 | #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) |
| 372 | |
| 373 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ |
| 374 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ |
| 375 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ |
| 376 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ |
| 377 | |
| 378 | #endif /* __MM_INTERNAL_H */ |