| 1 | #ifndef _LINUX_MMZONE_H |
| 2 | #define _LINUX_MMZONE_H |
| 3 | |
| 4 | #ifndef __ASSEMBLY__ |
| 5 | #ifndef __GENERATING_BOUNDS_H |
| 6 | |
| 7 | #include <linux/spinlock.h> |
| 8 | #include <linux/list.h> |
| 9 | #include <linux/wait.h> |
| 10 | #include <linux/bitops.h> |
| 11 | #include <linux/cache.h> |
| 12 | #include <linux/threads.h> |
| 13 | #include <linux/numa.h> |
| 14 | #include <linux/init.h> |
| 15 | #include <linux/seqlock.h> |
| 16 | #include <linux/nodemask.h> |
| 17 | #include <linux/pageblock-flags.h> |
| 18 | #include <linux/page-flags-layout.h> |
| 19 | #include <linux/atomic.h> |
| 20 | #include <asm/page.h> |
| 21 | |
| 22 | /* Free memory management - zoned buddy allocator. */ |
| 23 | #ifndef CONFIG_FORCE_MAX_ZONEORDER |
| 24 | #define MAX_ORDER 11 |
| 25 | #else |
| 26 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER |
| 27 | #endif |
| 28 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) |
| 29 | |
| 30 | /* |
| 31 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed |
| 32 | * costly to service. That is between allocation orders which should |
| 33 | * coalesce naturally under reasonable reclaim pressure and those which |
| 34 | * will not. |
| 35 | */ |
| 36 | #define PAGE_ALLOC_COSTLY_ORDER 3 |
| 37 | |
| 38 | enum { |
| 39 | MIGRATE_UNMOVABLE, |
| 40 | MIGRATE_MOVABLE, |
| 41 | MIGRATE_RECLAIMABLE, |
| 42 | MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ |
| 43 | MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, |
| 44 | #ifdef CONFIG_CMA |
| 45 | /* |
| 46 | * MIGRATE_CMA migration type is designed to mimic the way |
| 47 | * ZONE_MOVABLE works. Only movable pages can be allocated |
| 48 | * from MIGRATE_CMA pageblocks and page allocator never |
| 49 | * implicitly change migration type of MIGRATE_CMA pageblock. |
| 50 | * |
| 51 | * The way to use it is to change migratetype of a range of |
| 52 | * pageblocks to MIGRATE_CMA which can be done by |
| 53 | * __free_pageblock_cma() function. What is important though |
| 54 | * is that a range of pageblocks must be aligned to |
| 55 | * MAX_ORDER_NR_PAGES should biggest page be bigger then |
| 56 | * a single pageblock. |
| 57 | */ |
| 58 | MIGRATE_CMA, |
| 59 | #endif |
| 60 | #ifdef CONFIG_MEMORY_ISOLATION |
| 61 | MIGRATE_ISOLATE, /* can't allocate from here */ |
| 62 | #endif |
| 63 | MIGRATE_TYPES |
| 64 | }; |
| 65 | |
| 66 | /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ |
| 67 | extern char * const migratetype_names[MIGRATE_TYPES]; |
| 68 | |
| 69 | #ifdef CONFIG_CMA |
| 70 | # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) |
| 71 | # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) |
| 72 | #else |
| 73 | # define is_migrate_cma(migratetype) false |
| 74 | # define is_migrate_cma_page(_page) false |
| 75 | #endif |
| 76 | |
| 77 | #define for_each_migratetype_order(order, type) \ |
| 78 | for (order = 0; order < MAX_ORDER; order++) \ |
| 79 | for (type = 0; type < MIGRATE_TYPES; type++) |
| 80 | |
| 81 | extern int page_group_by_mobility_disabled; |
| 82 | |
| 83 | #define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) |
| 84 | #define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) |
| 85 | |
| 86 | #define get_pageblock_migratetype(page) \ |
| 87 | get_pfnblock_flags_mask(page, page_to_pfn(page), \ |
| 88 | PB_migrate_end, MIGRATETYPE_MASK) |
| 89 | |
| 90 | struct free_area { |
| 91 | struct list_head free_list[MIGRATE_TYPES]; |
| 92 | unsigned long nr_free; |
| 93 | }; |
| 94 | |
| 95 | struct pglist_data; |
| 96 | |
| 97 | /* |
| 98 | * zone->lock and the zone lru_lock are two of the hottest locks in the kernel. |
| 99 | * So add a wild amount of padding here to ensure that they fall into separate |
| 100 | * cachelines. There are very few zone structures in the machine, so space |
| 101 | * consumption is not a concern here. |
| 102 | */ |
| 103 | #if defined(CONFIG_SMP) |
| 104 | struct zone_padding { |
| 105 | char x[0]; |
| 106 | } ____cacheline_internodealigned_in_smp; |
| 107 | #define ZONE_PADDING(name) struct zone_padding name; |
| 108 | #else |
| 109 | #define ZONE_PADDING(name) |
| 110 | #endif |
| 111 | |
| 112 | enum zone_stat_item { |
| 113 | /* First 128 byte cacheline (assuming 64 bit words) */ |
| 114 | NR_FREE_PAGES, |
| 115 | NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ |
| 116 | NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, |
| 117 | NR_ZONE_ACTIVE_ANON, |
| 118 | NR_ZONE_INACTIVE_FILE, |
| 119 | NR_ZONE_ACTIVE_FILE, |
| 120 | NR_ZONE_UNEVICTABLE, |
| 121 | NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ |
| 122 | NR_MLOCK, /* mlock()ed pages found and moved off LRU */ |
| 123 | NR_SLAB_RECLAIMABLE, |
| 124 | NR_SLAB_UNRECLAIMABLE, |
| 125 | NR_PAGETABLE, /* used for pagetables */ |
| 126 | NR_KERNEL_STACK_KB, /* measured in KiB */ |
| 127 | /* Second 128 byte cacheline */ |
| 128 | NR_BOUNCE, |
| 129 | #if IS_ENABLED(CONFIG_ZSMALLOC) |
| 130 | NR_ZSPAGES, /* allocated in zsmalloc */ |
| 131 | #endif |
| 132 | #ifdef CONFIG_NUMA |
| 133 | NUMA_HIT, /* allocated in intended node */ |
| 134 | NUMA_MISS, /* allocated in non intended node */ |
| 135 | NUMA_FOREIGN, /* was intended here, hit elsewhere */ |
| 136 | NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ |
| 137 | NUMA_LOCAL, /* allocation from local node */ |
| 138 | NUMA_OTHER, /* allocation from other node */ |
| 139 | #endif |
| 140 | NR_FREE_CMA_PAGES, |
| 141 | NR_VM_ZONE_STAT_ITEMS }; |
| 142 | |
| 143 | enum node_stat_item { |
| 144 | NR_LRU_BASE, |
| 145 | NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ |
| 146 | NR_ACTIVE_ANON, /* " " " " " */ |
| 147 | NR_INACTIVE_FILE, /* " " " " " */ |
| 148 | NR_ACTIVE_FILE, /* " " " " " */ |
| 149 | NR_UNEVICTABLE, /* " " " " " */ |
| 150 | NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ |
| 151 | NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ |
| 152 | NR_PAGES_SCANNED, /* pages scanned since last reclaim */ |
| 153 | WORKINGSET_REFAULT, |
| 154 | WORKINGSET_ACTIVATE, |
| 155 | WORKINGSET_NODERECLAIM, |
| 156 | NR_ANON_MAPPED, /* Mapped anonymous pages */ |
| 157 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. |
| 158 | only modified from process context */ |
| 159 | NR_FILE_PAGES, |
| 160 | NR_FILE_DIRTY, |
| 161 | NR_WRITEBACK, |
| 162 | NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ |
| 163 | NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ |
| 164 | NR_SHMEM_THPS, |
| 165 | NR_SHMEM_PMDMAPPED, |
| 166 | NR_ANON_THPS, |
| 167 | NR_UNSTABLE_NFS, /* NFS unstable pages */ |
| 168 | NR_VMSCAN_WRITE, |
| 169 | NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ |
| 170 | NR_DIRTIED, /* page dirtyings since bootup */ |
| 171 | NR_WRITTEN, /* page writings since bootup */ |
| 172 | NR_VM_NODE_STAT_ITEMS |
| 173 | }; |
| 174 | |
| 175 | /* |
| 176 | * We do arithmetic on the LRU lists in various places in the code, |
| 177 | * so it is important to keep the active lists LRU_ACTIVE higher in |
| 178 | * the array than the corresponding inactive lists, and to keep |
| 179 | * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. |
| 180 | * |
| 181 | * This has to be kept in sync with the statistics in zone_stat_item |
| 182 | * above and the descriptions in vmstat_text in mm/vmstat.c |
| 183 | */ |
| 184 | #define LRU_BASE 0 |
| 185 | #define LRU_ACTIVE 1 |
| 186 | #define LRU_FILE 2 |
| 187 | |
| 188 | enum lru_list { |
| 189 | LRU_INACTIVE_ANON = LRU_BASE, |
| 190 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, |
| 191 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, |
| 192 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, |
| 193 | LRU_UNEVICTABLE, |
| 194 | NR_LRU_LISTS |
| 195 | }; |
| 196 | |
| 197 | #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) |
| 198 | |
| 199 | #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) |
| 200 | |
| 201 | static inline int is_file_lru(enum lru_list lru) |
| 202 | { |
| 203 | return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); |
| 204 | } |
| 205 | |
| 206 | static inline int is_active_lru(enum lru_list lru) |
| 207 | { |
| 208 | return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); |
| 209 | } |
| 210 | |
| 211 | struct zone_reclaim_stat { |
| 212 | /* |
| 213 | * The pageout code in vmscan.c keeps track of how many of the |
| 214 | * mem/swap backed and file backed pages are referenced. |
| 215 | * The higher the rotated/scanned ratio, the more valuable |
| 216 | * that cache is. |
| 217 | * |
| 218 | * The anon LRU stats live in [0], file LRU stats in [1] |
| 219 | */ |
| 220 | unsigned long recent_rotated[2]; |
| 221 | unsigned long recent_scanned[2]; |
| 222 | }; |
| 223 | |
| 224 | struct lruvec { |
| 225 | struct list_head lists[NR_LRU_LISTS]; |
| 226 | struct zone_reclaim_stat reclaim_stat; |
| 227 | /* Evictions & activations on the inactive file list */ |
| 228 | atomic_long_t inactive_age; |
| 229 | #ifdef CONFIG_MEMCG |
| 230 | struct pglist_data *pgdat; |
| 231 | #endif |
| 232 | }; |
| 233 | |
| 234 | /* Mask used at gathering information at once (see memcontrol.c) */ |
| 235 | #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) |
| 236 | #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) |
| 237 | #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) |
| 238 | |
| 239 | /* Isolate clean file */ |
| 240 | #define ISOLATE_CLEAN ((__force isolate_mode_t)0x1) |
| 241 | /* Isolate unmapped file */ |
| 242 | #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) |
| 243 | /* Isolate for asynchronous migration */ |
| 244 | #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) |
| 245 | /* Isolate unevictable pages */ |
| 246 | #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) |
| 247 | |
| 248 | /* LRU Isolation modes. */ |
| 249 | typedef unsigned __bitwise isolate_mode_t; |
| 250 | |
| 251 | enum zone_watermarks { |
| 252 | WMARK_MIN, |
| 253 | WMARK_LOW, |
| 254 | WMARK_HIGH, |
| 255 | NR_WMARK |
| 256 | }; |
| 257 | |
| 258 | #define min_wmark_pages(z) (z->watermark[WMARK_MIN]) |
| 259 | #define low_wmark_pages(z) (z->watermark[WMARK_LOW]) |
| 260 | #define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) |
| 261 | |
| 262 | struct per_cpu_pages { |
| 263 | int count; /* number of pages in the list */ |
| 264 | int high; /* high watermark, emptying needed */ |
| 265 | int batch; /* chunk size for buddy add/remove */ |
| 266 | |
| 267 | /* Lists of pages, one per migrate type stored on the pcp-lists */ |
| 268 | struct list_head lists[MIGRATE_PCPTYPES]; |
| 269 | }; |
| 270 | |
| 271 | struct per_cpu_pageset { |
| 272 | struct per_cpu_pages pcp; |
| 273 | #ifdef CONFIG_NUMA |
| 274 | s8 expire; |
| 275 | #endif |
| 276 | #ifdef CONFIG_SMP |
| 277 | s8 stat_threshold; |
| 278 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; |
| 279 | #endif |
| 280 | }; |
| 281 | |
| 282 | struct per_cpu_nodestat { |
| 283 | s8 stat_threshold; |
| 284 | s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; |
| 285 | }; |
| 286 | |
| 287 | #endif /* !__GENERATING_BOUNDS.H */ |
| 288 | |
| 289 | enum zone_type { |
| 290 | #ifdef CONFIG_ZONE_DMA |
| 291 | /* |
| 292 | * ZONE_DMA is used when there are devices that are not able |
| 293 | * to do DMA to all of addressable memory (ZONE_NORMAL). Then we |
| 294 | * carve out the portion of memory that is needed for these devices. |
| 295 | * The range is arch specific. |
| 296 | * |
| 297 | * Some examples |
| 298 | * |
| 299 | * Architecture Limit |
| 300 | * --------------------------- |
| 301 | * parisc, ia64, sparc <4G |
| 302 | * s390 <2G |
| 303 | * arm Various |
| 304 | * alpha Unlimited or 0-16MB. |
| 305 | * |
| 306 | * i386, x86_64 and multiple other arches |
| 307 | * <16M. |
| 308 | */ |
| 309 | ZONE_DMA, |
| 310 | #endif |
| 311 | #ifdef CONFIG_ZONE_DMA32 |
| 312 | /* |
| 313 | * x86_64 needs two ZONE_DMAs because it supports devices that are |
| 314 | * only able to do DMA to the lower 16M but also 32 bit devices that |
| 315 | * can only do DMA areas below 4G. |
| 316 | */ |
| 317 | ZONE_DMA32, |
| 318 | #endif |
| 319 | /* |
| 320 | * Normal addressable memory is in ZONE_NORMAL. DMA operations can be |
| 321 | * performed on pages in ZONE_NORMAL if the DMA devices support |
| 322 | * transfers to all addressable memory. |
| 323 | */ |
| 324 | ZONE_NORMAL, |
| 325 | #ifdef CONFIG_HIGHMEM |
| 326 | /* |
| 327 | * A memory area that is only addressable by the kernel through |
| 328 | * mapping portions into its own address space. This is for example |
| 329 | * used by i386 to allow the kernel to address the memory beyond |
| 330 | * 900MB. The kernel will set up special mappings (page |
| 331 | * table entries on i386) for each page that the kernel needs to |
| 332 | * access. |
| 333 | */ |
| 334 | ZONE_HIGHMEM, |
| 335 | #endif |
| 336 | ZONE_MOVABLE, |
| 337 | #ifdef CONFIG_ZONE_DEVICE |
| 338 | ZONE_DEVICE, |
| 339 | #endif |
| 340 | __MAX_NR_ZONES |
| 341 | |
| 342 | }; |
| 343 | |
| 344 | #ifndef __GENERATING_BOUNDS_H |
| 345 | |
| 346 | struct zone { |
| 347 | /* Read-mostly fields */ |
| 348 | |
| 349 | /* zone watermarks, access with *_wmark_pages(zone) macros */ |
| 350 | unsigned long watermark[NR_WMARK]; |
| 351 | |
| 352 | unsigned long nr_reserved_highatomic; |
| 353 | |
| 354 | /* |
| 355 | * We don't know if the memory that we're going to allocate will be |
| 356 | * freeable or/and it will be released eventually, so to avoid totally |
| 357 | * wasting several GB of ram we must reserve some of the lower zone |
| 358 | * memory (otherwise we risk to run OOM on the lower zones despite |
| 359 | * there being tons of freeable ram on the higher zones). This array is |
| 360 | * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl |
| 361 | * changes. |
| 362 | */ |
| 363 | long lowmem_reserve[MAX_NR_ZONES]; |
| 364 | |
| 365 | #ifdef CONFIG_NUMA |
| 366 | int node; |
| 367 | #endif |
| 368 | struct pglist_data *zone_pgdat; |
| 369 | struct per_cpu_pageset __percpu *pageset; |
| 370 | |
| 371 | #ifndef CONFIG_SPARSEMEM |
| 372 | /* |
| 373 | * Flags for a pageblock_nr_pages block. See pageblock-flags.h. |
| 374 | * In SPARSEMEM, this map is stored in struct mem_section |
| 375 | */ |
| 376 | unsigned long *pageblock_flags; |
| 377 | #endif /* CONFIG_SPARSEMEM */ |
| 378 | |
| 379 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ |
| 380 | unsigned long zone_start_pfn; |
| 381 | |
| 382 | /* |
| 383 | * spanned_pages is the total pages spanned by the zone, including |
| 384 | * holes, which is calculated as: |
| 385 | * spanned_pages = zone_end_pfn - zone_start_pfn; |
| 386 | * |
| 387 | * present_pages is physical pages existing within the zone, which |
| 388 | * is calculated as: |
| 389 | * present_pages = spanned_pages - absent_pages(pages in holes); |
| 390 | * |
| 391 | * managed_pages is present pages managed by the buddy system, which |
| 392 | * is calculated as (reserved_pages includes pages allocated by the |
| 393 | * bootmem allocator): |
| 394 | * managed_pages = present_pages - reserved_pages; |
| 395 | * |
| 396 | * So present_pages may be used by memory hotplug or memory power |
| 397 | * management logic to figure out unmanaged pages by checking |
| 398 | * (present_pages - managed_pages). And managed_pages should be used |
| 399 | * by page allocator and vm scanner to calculate all kinds of watermarks |
| 400 | * and thresholds. |
| 401 | * |
| 402 | * Locking rules: |
| 403 | * |
| 404 | * zone_start_pfn and spanned_pages are protected by span_seqlock. |
| 405 | * It is a seqlock because it has to be read outside of zone->lock, |
| 406 | * and it is done in the main allocator path. But, it is written |
| 407 | * quite infrequently. |
| 408 | * |
| 409 | * The span_seq lock is declared along with zone->lock because it is |
| 410 | * frequently read in proximity to zone->lock. It's good to |
| 411 | * give them a chance of being in the same cacheline. |
| 412 | * |
| 413 | * Write access to present_pages at runtime should be protected by |
| 414 | * mem_hotplug_begin/end(). Any reader who can't tolerant drift of |
| 415 | * present_pages should get_online_mems() to get a stable value. |
| 416 | * |
| 417 | * Read access to managed_pages should be safe because it's unsigned |
| 418 | * long. Write access to zone->managed_pages and totalram_pages are |
| 419 | * protected by managed_page_count_lock at runtime. Idealy only |
| 420 | * adjust_managed_page_count() should be used instead of directly |
| 421 | * touching zone->managed_pages and totalram_pages. |
| 422 | */ |
| 423 | unsigned long managed_pages; |
| 424 | unsigned long spanned_pages; |
| 425 | unsigned long present_pages; |
| 426 | |
| 427 | const char *name; |
| 428 | |
| 429 | #ifdef CONFIG_MEMORY_ISOLATION |
| 430 | /* |
| 431 | * Number of isolated pageblock. It is used to solve incorrect |
| 432 | * freepage counting problem due to racy retrieving migratetype |
| 433 | * of pageblock. Protected by zone->lock. |
| 434 | */ |
| 435 | unsigned long nr_isolate_pageblock; |
| 436 | #endif |
| 437 | |
| 438 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 439 | /* see spanned/present_pages for more description */ |
| 440 | seqlock_t span_seqlock; |
| 441 | #endif |
| 442 | |
| 443 | int initialized; |
| 444 | |
| 445 | /* Write-intensive fields used from the page allocator */ |
| 446 | ZONE_PADDING(_pad1_) |
| 447 | |
| 448 | /* free areas of different sizes */ |
| 449 | struct free_area free_area[MAX_ORDER]; |
| 450 | |
| 451 | /* zone flags, see below */ |
| 452 | unsigned long flags; |
| 453 | |
| 454 | /* Primarily protects free_area */ |
| 455 | spinlock_t lock; |
| 456 | |
| 457 | /* Write-intensive fields used by compaction and vmstats. */ |
| 458 | ZONE_PADDING(_pad2_) |
| 459 | |
| 460 | /* |
| 461 | * When free pages are below this point, additional steps are taken |
| 462 | * when reading the number of free pages to avoid per-cpu counter |
| 463 | * drift allowing watermarks to be breached |
| 464 | */ |
| 465 | unsigned long percpu_drift_mark; |
| 466 | |
| 467 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
| 468 | /* pfn where compaction free scanner should start */ |
| 469 | unsigned long compact_cached_free_pfn; |
| 470 | /* pfn where async and sync compaction migration scanner should start */ |
| 471 | unsigned long compact_cached_migrate_pfn[2]; |
| 472 | #endif |
| 473 | |
| 474 | #ifdef CONFIG_COMPACTION |
| 475 | /* |
| 476 | * On compaction failure, 1<<compact_defer_shift compactions |
| 477 | * are skipped before trying again. The number attempted since |
| 478 | * last failure is tracked with compact_considered. |
| 479 | */ |
| 480 | unsigned int compact_considered; |
| 481 | unsigned int compact_defer_shift; |
| 482 | int compact_order_failed; |
| 483 | #endif |
| 484 | |
| 485 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
| 486 | /* Set to true when the PG_migrate_skip bits should be cleared */ |
| 487 | bool compact_blockskip_flush; |
| 488 | #endif |
| 489 | |
| 490 | bool contiguous; |
| 491 | |
| 492 | ZONE_PADDING(_pad3_) |
| 493 | /* Zone statistics */ |
| 494 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
| 495 | } ____cacheline_internodealigned_in_smp; |
| 496 | |
| 497 | enum pgdat_flags { |
| 498 | PGDAT_CONGESTED, /* pgdat has many dirty pages backed by |
| 499 | * a congested BDI |
| 500 | */ |
| 501 | PGDAT_DIRTY, /* reclaim scanning has recently found |
| 502 | * many dirty file pages at the tail |
| 503 | * of the LRU. |
| 504 | */ |
| 505 | PGDAT_WRITEBACK, /* reclaim scanning has recently found |
| 506 | * many pages under writeback |
| 507 | */ |
| 508 | PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ |
| 509 | }; |
| 510 | |
| 511 | static inline unsigned long zone_end_pfn(const struct zone *zone) |
| 512 | { |
| 513 | return zone->zone_start_pfn + zone->spanned_pages; |
| 514 | } |
| 515 | |
| 516 | static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) |
| 517 | { |
| 518 | return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); |
| 519 | } |
| 520 | |
| 521 | static inline bool zone_is_initialized(struct zone *zone) |
| 522 | { |
| 523 | return zone->initialized; |
| 524 | } |
| 525 | |
| 526 | static inline bool zone_is_empty(struct zone *zone) |
| 527 | { |
| 528 | return zone->spanned_pages == 0; |
| 529 | } |
| 530 | |
| 531 | /* |
| 532 | * The "priority" of VM scanning is how much of the queues we will scan in one |
| 533 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the |
| 534 | * queues ("queue_length >> 12") during an aging round. |
| 535 | */ |
| 536 | #define DEF_PRIORITY 12 |
| 537 | |
| 538 | /* Maximum number of zones on a zonelist */ |
| 539 | #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) |
| 540 | |
| 541 | enum { |
| 542 | ZONELIST_FALLBACK, /* zonelist with fallback */ |
| 543 | #ifdef CONFIG_NUMA |
| 544 | /* |
| 545 | * The NUMA zonelists are doubled because we need zonelists that |
| 546 | * restrict the allocations to a single node for __GFP_THISNODE. |
| 547 | */ |
| 548 | ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ |
| 549 | #endif |
| 550 | MAX_ZONELISTS |
| 551 | }; |
| 552 | |
| 553 | /* |
| 554 | * This struct contains information about a zone in a zonelist. It is stored |
| 555 | * here to avoid dereferences into large structures and lookups of tables |
| 556 | */ |
| 557 | struct zoneref { |
| 558 | struct zone *zone; /* Pointer to actual zone */ |
| 559 | int zone_idx; /* zone_idx(zoneref->zone) */ |
| 560 | }; |
| 561 | |
| 562 | /* |
| 563 | * One allocation request operates on a zonelist. A zonelist |
| 564 | * is a list of zones, the first one is the 'goal' of the |
| 565 | * allocation, the other zones are fallback zones, in decreasing |
| 566 | * priority. |
| 567 | * |
| 568 | * To speed the reading of the zonelist, the zonerefs contain the zone index |
| 569 | * of the entry being read. Helper functions to access information given |
| 570 | * a struct zoneref are |
| 571 | * |
| 572 | * zonelist_zone() - Return the struct zone * for an entry in _zonerefs |
| 573 | * zonelist_zone_idx() - Return the index of the zone for an entry |
| 574 | * zonelist_node_idx() - Return the index of the node for an entry |
| 575 | */ |
| 576 | struct zonelist { |
| 577 | struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; |
| 578 | }; |
| 579 | |
| 580 | #ifndef CONFIG_DISCONTIGMEM |
| 581 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ |
| 582 | extern struct page *mem_map; |
| 583 | #endif |
| 584 | |
| 585 | /* |
| 586 | * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM |
| 587 | * (mostly NUMA machines?) to denote a higher-level memory zone than the |
| 588 | * zone denotes. |
| 589 | * |
| 590 | * On NUMA machines, each NUMA node would have a pg_data_t to describe |
| 591 | * it's memory layout. |
| 592 | * |
| 593 | * Memory statistics and page replacement data structures are maintained on a |
| 594 | * per-zone basis. |
| 595 | */ |
| 596 | struct bootmem_data; |
| 597 | typedef struct pglist_data { |
| 598 | struct zone node_zones[MAX_NR_ZONES]; |
| 599 | struct zonelist node_zonelists[MAX_ZONELISTS]; |
| 600 | int nr_zones; |
| 601 | #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ |
| 602 | struct page *node_mem_map; |
| 603 | #ifdef CONFIG_PAGE_EXTENSION |
| 604 | struct page_ext *node_page_ext; |
| 605 | #endif |
| 606 | #endif |
| 607 | #ifndef CONFIG_NO_BOOTMEM |
| 608 | struct bootmem_data *bdata; |
| 609 | #endif |
| 610 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 611 | /* |
| 612 | * Must be held any time you expect node_start_pfn, node_present_pages |
| 613 | * or node_spanned_pages stay constant. Holding this will also |
| 614 | * guarantee that any pfn_valid() stays that way. |
| 615 | * |
| 616 | * pgdat_resize_lock() and pgdat_resize_unlock() are provided to |
| 617 | * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG. |
| 618 | * |
| 619 | * Nests above zone->lock and zone->span_seqlock |
| 620 | */ |
| 621 | spinlock_t node_size_lock; |
| 622 | #endif |
| 623 | unsigned long node_start_pfn; |
| 624 | unsigned long node_present_pages; /* total number of physical pages */ |
| 625 | unsigned long node_spanned_pages; /* total size of physical page |
| 626 | range, including holes */ |
| 627 | int node_id; |
| 628 | wait_queue_head_t kswapd_wait; |
| 629 | wait_queue_head_t pfmemalloc_wait; |
| 630 | struct task_struct *kswapd; /* Protected by |
| 631 | mem_hotplug_begin/end() */ |
| 632 | int kswapd_order; |
| 633 | enum zone_type kswapd_classzone_idx; |
| 634 | |
| 635 | #ifdef CONFIG_COMPACTION |
| 636 | int kcompactd_max_order; |
| 637 | enum zone_type kcompactd_classzone_idx; |
| 638 | wait_queue_head_t kcompactd_wait; |
| 639 | struct task_struct *kcompactd; |
| 640 | #endif |
| 641 | #ifdef CONFIG_NUMA_BALANCING |
| 642 | /* Lock serializing the migrate rate limiting window */ |
| 643 | spinlock_t numabalancing_migrate_lock; |
| 644 | |
| 645 | /* Rate limiting time interval */ |
| 646 | unsigned long numabalancing_migrate_next_window; |
| 647 | |
| 648 | /* Number of pages migrated during the rate limiting time interval */ |
| 649 | unsigned long numabalancing_migrate_nr_pages; |
| 650 | #endif |
| 651 | /* |
| 652 | * This is a per-node reserve of pages that are not available |
| 653 | * to userspace allocations. |
| 654 | */ |
| 655 | unsigned long totalreserve_pages; |
| 656 | |
| 657 | #ifdef CONFIG_NUMA |
| 658 | /* |
| 659 | * zone reclaim becomes active if more unmapped pages exist. |
| 660 | */ |
| 661 | unsigned long min_unmapped_pages; |
| 662 | unsigned long min_slab_pages; |
| 663 | #endif /* CONFIG_NUMA */ |
| 664 | |
| 665 | /* Write-intensive fields used by page reclaim */ |
| 666 | ZONE_PADDING(_pad1_) |
| 667 | spinlock_t lru_lock; |
| 668 | |
| 669 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
| 670 | /* |
| 671 | * If memory initialisation on large machines is deferred then this |
| 672 | * is the first PFN that needs to be initialised. |
| 673 | */ |
| 674 | unsigned long first_deferred_pfn; |
| 675 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ |
| 676 | |
| 677 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 678 | spinlock_t split_queue_lock; |
| 679 | struct list_head split_queue; |
| 680 | unsigned long split_queue_len; |
| 681 | #endif |
| 682 | |
| 683 | /* Fields commonly accessed by the page reclaim scanner */ |
| 684 | struct lruvec lruvec; |
| 685 | |
| 686 | /* |
| 687 | * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on |
| 688 | * this node's LRU. Maintained by the pageout code. |
| 689 | */ |
| 690 | unsigned int inactive_ratio; |
| 691 | |
| 692 | unsigned long flags; |
| 693 | |
| 694 | ZONE_PADDING(_pad2_) |
| 695 | |
| 696 | /* Per-node vmstats */ |
| 697 | struct per_cpu_nodestat __percpu *per_cpu_nodestats; |
| 698 | atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; |
| 699 | } pg_data_t; |
| 700 | |
| 701 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) |
| 702 | #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) |
| 703 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
| 704 | #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) |
| 705 | #else |
| 706 | #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) |
| 707 | #endif |
| 708 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) |
| 709 | |
| 710 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
| 711 | #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) |
| 712 | static inline spinlock_t *zone_lru_lock(struct zone *zone) |
| 713 | { |
| 714 | return &zone->zone_pgdat->lru_lock; |
| 715 | } |
| 716 | |
| 717 | static inline struct lruvec *node_lruvec(struct pglist_data *pgdat) |
| 718 | { |
| 719 | return &pgdat->lruvec; |
| 720 | } |
| 721 | |
| 722 | static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) |
| 723 | { |
| 724 | return pgdat->node_start_pfn + pgdat->node_spanned_pages; |
| 725 | } |
| 726 | |
| 727 | static inline bool pgdat_is_empty(pg_data_t *pgdat) |
| 728 | { |
| 729 | return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; |
| 730 | } |
| 731 | |
| 732 | static inline int zone_id(const struct zone *zone) |
| 733 | { |
| 734 | struct pglist_data *pgdat = zone->zone_pgdat; |
| 735 | |
| 736 | return zone - pgdat->node_zones; |
| 737 | } |
| 738 | |
| 739 | #ifdef CONFIG_ZONE_DEVICE |
| 740 | static inline bool is_dev_zone(const struct zone *zone) |
| 741 | { |
| 742 | return zone_id(zone) == ZONE_DEVICE; |
| 743 | } |
| 744 | #else |
| 745 | static inline bool is_dev_zone(const struct zone *zone) |
| 746 | { |
| 747 | return false; |
| 748 | } |
| 749 | #endif |
| 750 | |
| 751 | #include <linux/memory_hotplug.h> |
| 752 | |
| 753 | extern struct mutex zonelists_mutex; |
| 754 | void build_all_zonelists(pg_data_t *pgdat, struct zone *zone); |
| 755 | void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); |
| 756 | bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, |
| 757 | int classzone_idx, unsigned int alloc_flags, |
| 758 | long free_pages); |
| 759 | bool zone_watermark_ok(struct zone *z, unsigned int order, |
| 760 | unsigned long mark, int classzone_idx, |
| 761 | unsigned int alloc_flags); |
| 762 | bool zone_watermark_ok_safe(struct zone *z, unsigned int order, |
| 763 | unsigned long mark, int classzone_idx); |
| 764 | enum memmap_context { |
| 765 | MEMMAP_EARLY, |
| 766 | MEMMAP_HOTPLUG, |
| 767 | }; |
| 768 | extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, |
| 769 | unsigned long size); |
| 770 | |
| 771 | extern void lruvec_init(struct lruvec *lruvec); |
| 772 | |
| 773 | static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) |
| 774 | { |
| 775 | #ifdef CONFIG_MEMCG |
| 776 | return lruvec->pgdat; |
| 777 | #else |
| 778 | return container_of(lruvec, struct pglist_data, lruvec); |
| 779 | #endif |
| 780 | } |
| 781 | |
| 782 | extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru); |
| 783 | |
| 784 | #ifdef CONFIG_HAVE_MEMORY_PRESENT |
| 785 | void memory_present(int nid, unsigned long start, unsigned long end); |
| 786 | #else |
| 787 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} |
| 788 | #endif |
| 789 | |
| 790 | #ifdef CONFIG_HAVE_MEMORYLESS_NODES |
| 791 | int local_memory_node(int node_id); |
| 792 | #else |
| 793 | static inline int local_memory_node(int node_id) { return node_id; }; |
| 794 | #endif |
| 795 | |
| 796 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE |
| 797 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); |
| 798 | #endif |
| 799 | |
| 800 | /* |
| 801 | * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. |
| 802 | */ |
| 803 | #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) |
| 804 | |
| 805 | /* |
| 806 | * Returns true if a zone has pages managed by the buddy allocator. |
| 807 | * All the reclaim decisions have to use this function rather than |
| 808 | * populated_zone(). If the whole zone is reserved then we can easily |
| 809 | * end up with populated_zone() && !managed_zone(). |
| 810 | */ |
| 811 | static inline bool managed_zone(struct zone *zone) |
| 812 | { |
| 813 | return zone->managed_pages; |
| 814 | } |
| 815 | |
| 816 | /* Returns true if a zone has memory */ |
| 817 | static inline bool populated_zone(struct zone *zone) |
| 818 | { |
| 819 | return zone->present_pages; |
| 820 | } |
| 821 | |
| 822 | extern int movable_zone; |
| 823 | |
| 824 | #ifdef CONFIG_HIGHMEM |
| 825 | static inline int zone_movable_is_highmem(void) |
| 826 | { |
| 827 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 828 | return movable_zone == ZONE_HIGHMEM; |
| 829 | #else |
| 830 | return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; |
| 831 | #endif |
| 832 | } |
| 833 | #endif |
| 834 | |
| 835 | static inline int is_highmem_idx(enum zone_type idx) |
| 836 | { |
| 837 | #ifdef CONFIG_HIGHMEM |
| 838 | return (idx == ZONE_HIGHMEM || |
| 839 | (idx == ZONE_MOVABLE && zone_movable_is_highmem())); |
| 840 | #else |
| 841 | return 0; |
| 842 | #endif |
| 843 | } |
| 844 | |
| 845 | /** |
| 846 | * is_highmem - helper function to quickly check if a struct zone is a |
| 847 | * highmem zone or not. This is an attempt to keep references |
| 848 | * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. |
| 849 | * @zone - pointer to struct zone variable |
| 850 | */ |
| 851 | static inline int is_highmem(struct zone *zone) |
| 852 | { |
| 853 | #ifdef CONFIG_HIGHMEM |
| 854 | return is_highmem_idx(zone_idx(zone)); |
| 855 | #else |
| 856 | return 0; |
| 857 | #endif |
| 858 | } |
| 859 | |
| 860 | /* These two functions are used to setup the per zone pages min values */ |
| 861 | struct ctl_table; |
| 862 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, |
| 863 | void __user *, size_t *, loff_t *); |
| 864 | int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, |
| 865 | void __user *, size_t *, loff_t *); |
| 866 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; |
| 867 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, |
| 868 | void __user *, size_t *, loff_t *); |
| 869 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, |
| 870 | void __user *, size_t *, loff_t *); |
| 871 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, |
| 872 | void __user *, size_t *, loff_t *); |
| 873 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, |
| 874 | void __user *, size_t *, loff_t *); |
| 875 | |
| 876 | extern int numa_zonelist_order_handler(struct ctl_table *, int, |
| 877 | void __user *, size_t *, loff_t *); |
| 878 | extern char numa_zonelist_order[]; |
| 879 | #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ |
| 880 | |
| 881 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
| 882 | |
| 883 | extern struct pglist_data contig_page_data; |
| 884 | #define NODE_DATA(nid) (&contig_page_data) |
| 885 | #define NODE_MEM_MAP(nid) mem_map |
| 886 | |
| 887 | #else /* CONFIG_NEED_MULTIPLE_NODES */ |
| 888 | |
| 889 | #include <asm/mmzone.h> |
| 890 | |
| 891 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
| 892 | |
| 893 | extern struct pglist_data *first_online_pgdat(void); |
| 894 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); |
| 895 | extern struct zone *next_zone(struct zone *zone); |
| 896 | |
| 897 | /** |
| 898 | * for_each_online_pgdat - helper macro to iterate over all online nodes |
| 899 | * @pgdat - pointer to a pg_data_t variable |
| 900 | */ |
| 901 | #define for_each_online_pgdat(pgdat) \ |
| 902 | for (pgdat = first_online_pgdat(); \ |
| 903 | pgdat; \ |
| 904 | pgdat = next_online_pgdat(pgdat)) |
| 905 | /** |
| 906 | * for_each_zone - helper macro to iterate over all memory zones |
| 907 | * @zone - pointer to struct zone variable |
| 908 | * |
| 909 | * The user only needs to declare the zone variable, for_each_zone |
| 910 | * fills it in. |
| 911 | */ |
| 912 | #define for_each_zone(zone) \ |
| 913 | for (zone = (first_online_pgdat())->node_zones; \ |
| 914 | zone; \ |
| 915 | zone = next_zone(zone)) |
| 916 | |
| 917 | #define for_each_populated_zone(zone) \ |
| 918 | for (zone = (first_online_pgdat())->node_zones; \ |
| 919 | zone; \ |
| 920 | zone = next_zone(zone)) \ |
| 921 | if (!populated_zone(zone)) \ |
| 922 | ; /* do nothing */ \ |
| 923 | else |
| 924 | |
| 925 | static inline struct zone *zonelist_zone(struct zoneref *zoneref) |
| 926 | { |
| 927 | return zoneref->zone; |
| 928 | } |
| 929 | |
| 930 | static inline int zonelist_zone_idx(struct zoneref *zoneref) |
| 931 | { |
| 932 | return zoneref->zone_idx; |
| 933 | } |
| 934 | |
| 935 | static inline int zonelist_node_idx(struct zoneref *zoneref) |
| 936 | { |
| 937 | #ifdef CONFIG_NUMA |
| 938 | /* zone_to_nid not available in this context */ |
| 939 | return zoneref->zone->node; |
| 940 | #else |
| 941 | return 0; |
| 942 | #endif /* CONFIG_NUMA */ |
| 943 | } |
| 944 | |
| 945 | struct zoneref *__next_zones_zonelist(struct zoneref *z, |
| 946 | enum zone_type highest_zoneidx, |
| 947 | nodemask_t *nodes); |
| 948 | |
| 949 | /** |
| 950 | * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point |
| 951 | * @z - The cursor used as a starting point for the search |
| 952 | * @highest_zoneidx - The zone index of the highest zone to return |
| 953 | * @nodes - An optional nodemask to filter the zonelist with |
| 954 | * |
| 955 | * This function returns the next zone at or below a given zone index that is |
| 956 | * within the allowed nodemask using a cursor as the starting point for the |
| 957 | * search. The zoneref returned is a cursor that represents the current zone |
| 958 | * being examined. It should be advanced by one before calling |
| 959 | * next_zones_zonelist again. |
| 960 | */ |
| 961 | static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, |
| 962 | enum zone_type highest_zoneidx, |
| 963 | nodemask_t *nodes) |
| 964 | { |
| 965 | if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) |
| 966 | return z; |
| 967 | return __next_zones_zonelist(z, highest_zoneidx, nodes); |
| 968 | } |
| 969 | |
| 970 | /** |
| 971 | * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist |
| 972 | * @zonelist - The zonelist to search for a suitable zone |
| 973 | * @highest_zoneidx - The zone index of the highest zone to return |
| 974 | * @nodes - An optional nodemask to filter the zonelist with |
| 975 | * @zone - The first suitable zone found is returned via this parameter |
| 976 | * |
| 977 | * This function returns the first zone at or below a given zone index that is |
| 978 | * within the allowed nodemask. The zoneref returned is a cursor that can be |
| 979 | * used to iterate the zonelist with next_zones_zonelist by advancing it by |
| 980 | * one before calling. |
| 981 | */ |
| 982 | static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, |
| 983 | enum zone_type highest_zoneidx, |
| 984 | nodemask_t *nodes) |
| 985 | { |
| 986 | return next_zones_zonelist(zonelist->_zonerefs, |
| 987 | highest_zoneidx, nodes); |
| 988 | } |
| 989 | |
| 990 | /** |
| 991 | * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask |
| 992 | * @zone - The current zone in the iterator |
| 993 | * @z - The current pointer within zonelist->zones being iterated |
| 994 | * @zlist - The zonelist being iterated |
| 995 | * @highidx - The zone index of the highest zone to return |
| 996 | * @nodemask - Nodemask allowed by the allocator |
| 997 | * |
| 998 | * This iterator iterates though all zones at or below a given zone index and |
| 999 | * within a given nodemask |
| 1000 | */ |
| 1001 | #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ |
| 1002 | for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ |
| 1003 | zone; \ |
| 1004 | z = next_zones_zonelist(++z, highidx, nodemask), \ |
| 1005 | zone = zonelist_zone(z)) |
| 1006 | |
| 1007 | #define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ |
| 1008 | for (zone = z->zone; \ |
| 1009 | zone; \ |
| 1010 | z = next_zones_zonelist(++z, highidx, nodemask), \ |
| 1011 | zone = zonelist_zone(z)) |
| 1012 | |
| 1013 | |
| 1014 | /** |
| 1015 | * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index |
| 1016 | * @zone - The current zone in the iterator |
| 1017 | * @z - The current pointer within zonelist->zones being iterated |
| 1018 | * @zlist - The zonelist being iterated |
| 1019 | * @highidx - The zone index of the highest zone to return |
| 1020 | * |
| 1021 | * This iterator iterates though all zones at or below a given zone index. |
| 1022 | */ |
| 1023 | #define for_each_zone_zonelist(zone, z, zlist, highidx) \ |
| 1024 | for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) |
| 1025 | |
| 1026 | #ifdef CONFIG_SPARSEMEM |
| 1027 | #include <asm/sparsemem.h> |
| 1028 | #endif |
| 1029 | |
| 1030 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ |
| 1031 | !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) |
| 1032 | static inline unsigned long early_pfn_to_nid(unsigned long pfn) |
| 1033 | { |
| 1034 | return 0; |
| 1035 | } |
| 1036 | #endif |
| 1037 | |
| 1038 | #ifdef CONFIG_FLATMEM |
| 1039 | #define pfn_to_nid(pfn) (0) |
| 1040 | #endif |
| 1041 | |
| 1042 | #ifdef CONFIG_SPARSEMEM |
| 1043 | |
| 1044 | /* |
| 1045 | * SECTION_SHIFT #bits space required to store a section # |
| 1046 | * |
| 1047 | * PA_SECTION_SHIFT physical address to/from section number |
| 1048 | * PFN_SECTION_SHIFT pfn to/from section number |
| 1049 | */ |
| 1050 | #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) |
| 1051 | #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) |
| 1052 | |
| 1053 | #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) |
| 1054 | |
| 1055 | #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) |
| 1056 | #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) |
| 1057 | |
| 1058 | #define SECTION_BLOCKFLAGS_BITS \ |
| 1059 | ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) |
| 1060 | |
| 1061 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS |
| 1062 | #error Allocator MAX_ORDER exceeds SECTION_SIZE |
| 1063 | #endif |
| 1064 | |
| 1065 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) |
| 1066 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) |
| 1067 | |
| 1068 | #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) |
| 1069 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) |
| 1070 | |
| 1071 | struct page; |
| 1072 | struct page_ext; |
| 1073 | struct mem_section { |
| 1074 | /* |
| 1075 | * This is, logically, a pointer to an array of struct |
| 1076 | * pages. However, it is stored with some other magic. |
| 1077 | * (see sparse.c::sparse_init_one_section()) |
| 1078 | * |
| 1079 | * Additionally during early boot we encode node id of |
| 1080 | * the location of the section here to guide allocation. |
| 1081 | * (see sparse.c::memory_present()) |
| 1082 | * |
| 1083 | * Making it a UL at least makes someone do a cast |
| 1084 | * before using it wrong. |
| 1085 | */ |
| 1086 | unsigned long section_mem_map; |
| 1087 | |
| 1088 | /* See declaration of similar field in struct zone */ |
| 1089 | unsigned long *pageblock_flags; |
| 1090 | #ifdef CONFIG_PAGE_EXTENSION |
| 1091 | /* |
| 1092 | * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use |
| 1093 | * section. (see page_ext.h about this.) |
| 1094 | */ |
| 1095 | struct page_ext *page_ext; |
| 1096 | unsigned long pad; |
| 1097 | #endif |
| 1098 | /* |
| 1099 | * WARNING: mem_section must be a power-of-2 in size for the |
| 1100 | * calculation and use of SECTION_ROOT_MASK to make sense. |
| 1101 | */ |
| 1102 | }; |
| 1103 | |
| 1104 | #ifdef CONFIG_SPARSEMEM_EXTREME |
| 1105 | #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) |
| 1106 | #else |
| 1107 | #define SECTIONS_PER_ROOT 1 |
| 1108 | #endif |
| 1109 | |
| 1110 | #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) |
| 1111 | #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) |
| 1112 | #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) |
| 1113 | |
| 1114 | #ifdef CONFIG_SPARSEMEM_EXTREME |
| 1115 | extern struct mem_section *mem_section[NR_SECTION_ROOTS]; |
| 1116 | #else |
| 1117 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; |
| 1118 | #endif |
| 1119 | |
| 1120 | static inline struct mem_section *__nr_to_section(unsigned long nr) |
| 1121 | { |
| 1122 | if (!mem_section[SECTION_NR_TO_ROOT(nr)]) |
| 1123 | return NULL; |
| 1124 | return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; |
| 1125 | } |
| 1126 | extern int __section_nr(struct mem_section* ms); |
| 1127 | extern unsigned long usemap_size(void); |
| 1128 | |
| 1129 | /* |
| 1130 | * We use the lower bits of the mem_map pointer to store |
| 1131 | * a little bit of information. There should be at least |
| 1132 | * 3 bits here due to 32-bit alignment. |
| 1133 | */ |
| 1134 | #define SECTION_MARKED_PRESENT (1UL<<0) |
| 1135 | #define SECTION_HAS_MEM_MAP (1UL<<1) |
| 1136 | #define SECTION_MAP_LAST_BIT (1UL<<2) |
| 1137 | #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) |
| 1138 | #define SECTION_NID_SHIFT 2 |
| 1139 | |
| 1140 | static inline struct page *__section_mem_map_addr(struct mem_section *section) |
| 1141 | { |
| 1142 | unsigned long map = section->section_mem_map; |
| 1143 | map &= SECTION_MAP_MASK; |
| 1144 | return (struct page *)map; |
| 1145 | } |
| 1146 | |
| 1147 | static inline int present_section(struct mem_section *section) |
| 1148 | { |
| 1149 | return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); |
| 1150 | } |
| 1151 | |
| 1152 | static inline int present_section_nr(unsigned long nr) |
| 1153 | { |
| 1154 | return present_section(__nr_to_section(nr)); |
| 1155 | } |
| 1156 | |
| 1157 | static inline int valid_section(struct mem_section *section) |
| 1158 | { |
| 1159 | return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); |
| 1160 | } |
| 1161 | |
| 1162 | static inline int valid_section_nr(unsigned long nr) |
| 1163 | { |
| 1164 | return valid_section(__nr_to_section(nr)); |
| 1165 | } |
| 1166 | |
| 1167 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) |
| 1168 | { |
| 1169 | return __nr_to_section(pfn_to_section_nr(pfn)); |
| 1170 | } |
| 1171 | |
| 1172 | #ifndef CONFIG_HAVE_ARCH_PFN_VALID |
| 1173 | static inline int pfn_valid(unsigned long pfn) |
| 1174 | { |
| 1175 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) |
| 1176 | return 0; |
| 1177 | return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); |
| 1178 | } |
| 1179 | #endif |
| 1180 | |
| 1181 | static inline int pfn_present(unsigned long pfn) |
| 1182 | { |
| 1183 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) |
| 1184 | return 0; |
| 1185 | return present_section(__nr_to_section(pfn_to_section_nr(pfn))); |
| 1186 | } |
| 1187 | |
| 1188 | /* |
| 1189 | * These are _only_ used during initialisation, therefore they |
| 1190 | * can use __initdata ... They could have names to indicate |
| 1191 | * this restriction. |
| 1192 | */ |
| 1193 | #ifdef CONFIG_NUMA |
| 1194 | #define pfn_to_nid(pfn) \ |
| 1195 | ({ \ |
| 1196 | unsigned long __pfn_to_nid_pfn = (pfn); \ |
| 1197 | page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ |
| 1198 | }) |
| 1199 | #else |
| 1200 | #define pfn_to_nid(pfn) (0) |
| 1201 | #endif |
| 1202 | |
| 1203 | #define early_pfn_valid(pfn) pfn_valid(pfn) |
| 1204 | void sparse_init(void); |
| 1205 | #else |
| 1206 | #define sparse_init() do {} while (0) |
| 1207 | #define sparse_index_init(_sec, _nid) do {} while (0) |
| 1208 | #endif /* CONFIG_SPARSEMEM */ |
| 1209 | |
| 1210 | /* |
| 1211 | * During memory init memblocks map pfns to nids. The search is expensive and |
| 1212 | * this caches recent lookups. The implementation of __early_pfn_to_nid |
| 1213 | * may treat start/end as pfns or sections. |
| 1214 | */ |
| 1215 | struct mminit_pfnnid_cache { |
| 1216 | unsigned long last_start; |
| 1217 | unsigned long last_end; |
| 1218 | int last_nid; |
| 1219 | }; |
| 1220 | |
| 1221 | #ifndef early_pfn_valid |
| 1222 | #define early_pfn_valid(pfn) (1) |
| 1223 | #endif |
| 1224 | |
| 1225 | void memory_present(int nid, unsigned long start, unsigned long end); |
| 1226 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); |
| 1227 | |
| 1228 | /* |
| 1229 | * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we |
| 1230 | * need to check pfn validility within that MAX_ORDER_NR_PAGES block. |
| 1231 | * pfn_valid_within() should be used in this case; we optimise this away |
| 1232 | * when we have no holes within a MAX_ORDER_NR_PAGES block. |
| 1233 | */ |
| 1234 | #ifdef CONFIG_HOLES_IN_ZONE |
| 1235 | #define pfn_valid_within(pfn) pfn_valid(pfn) |
| 1236 | #else |
| 1237 | #define pfn_valid_within(pfn) (1) |
| 1238 | #endif |
| 1239 | |
| 1240 | #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL |
| 1241 | /* |
| 1242 | * pfn_valid() is meant to be able to tell if a given PFN has valid memmap |
| 1243 | * associated with it or not. In FLATMEM, it is expected that holes always |
| 1244 | * have valid memmap as long as there is valid PFNs either side of the hole. |
| 1245 | * In SPARSEMEM, it is assumed that a valid section has a memmap for the |
| 1246 | * entire section. |
| 1247 | * |
| 1248 | * However, an ARM, and maybe other embedded architectures in the future |
| 1249 | * free memmap backing holes to save memory on the assumption the memmap is |
| 1250 | * never used. The page_zone linkages are then broken even though pfn_valid() |
| 1251 | * returns true. A walker of the full memmap must then do this additional |
| 1252 | * check to ensure the memmap they are looking at is sane by making sure |
| 1253 | * the zone and PFN linkages are still valid. This is expensive, but walkers |
| 1254 | * of the full memmap are extremely rare. |
| 1255 | */ |
| 1256 | bool memmap_valid_within(unsigned long pfn, |
| 1257 | struct page *page, struct zone *zone); |
| 1258 | #else |
| 1259 | static inline bool memmap_valid_within(unsigned long pfn, |
| 1260 | struct page *page, struct zone *zone) |
| 1261 | { |
| 1262 | return true; |
| 1263 | } |
| 1264 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ |
| 1265 | |
| 1266 | #endif /* !__GENERATING_BOUNDS.H */ |
| 1267 | #endif /* !__ASSEMBLY__ */ |
| 1268 | #endif /* _LINUX_MMZONE_H */ |