mm: fix misleading __GFP_REPEAT related comments
[linux-2.6-block.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
10ed273f 22#include <linux/jiffies.h>
1da177e4
LT
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
9f158333 25#include <linux/kernel.h>
1da177e4
LT
26#include <linux/module.h>
27#include <linux/suspend.h>
28#include <linux/pagevec.h>
29#include <linux/blkdev.h>
30#include <linux/slab.h>
5a3135c2 31#include <linux/oom.h>
1da177e4
LT
32#include <linux/notifier.h>
33#include <linux/topology.h>
34#include <linux/sysctl.h>
35#include <linux/cpu.h>
36#include <linux/cpuset.h>
bdc8cb98 37#include <linux/memory_hotplug.h>
1da177e4
LT
38#include <linux/nodemask.h>
39#include <linux/vmalloc.h>
4be38e35 40#include <linux/mempolicy.h>
6811378e 41#include <linux/stop_machine.h>
c713216d
MG
42#include <linux/sort.h>
43#include <linux/pfn.h>
3fcfab16 44#include <linux/backing-dev.h>
933e312e 45#include <linux/fault-inject.h>
a5d76b54 46#include <linux/page-isolation.h>
8a9f3ccd 47#include <linux/memcontrol.h>
1da177e4
LT
48
49#include <asm/tlbflush.h>
ac924c60 50#include <asm/div64.h>
1da177e4
LT
51#include "internal.h"
52
53/*
13808910 54 * Array of node states.
1da177e4 55 */
13808910
CL
56nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
57 [N_POSSIBLE] = NODE_MASK_ALL,
58 [N_ONLINE] = { { [0] = 1UL } },
59#ifndef CONFIG_NUMA
60 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
61#ifdef CONFIG_HIGHMEM
62 [N_HIGH_MEMORY] = { { [0] = 1UL } },
63#endif
64 [N_CPU] = { { [0] = 1UL } },
65#endif /* NUMA */
66};
67EXPORT_SYMBOL(node_states);
68
6c231b7b 69unsigned long totalram_pages __read_mostly;
cb45b0e9 70unsigned long totalreserve_pages __read_mostly;
1da177e4 71long nr_swap_pages;
8ad4b1fb 72int percpu_pagelist_fraction;
1da177e4 73
d9c23400
MG
74#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
75int pageblock_order __read_mostly;
76#endif
77
d98c7a09 78static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 79
1da177e4
LT
80/*
81 * results with 256, 32 in the lowmem_reserve sysctl:
82 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
83 * 1G machine -> (16M dma, 784M normal, 224M high)
84 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
85 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
86 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
87 *
88 * TBD: should special case ZONE_DMA32 machines here - in those we normally
89 * don't need any ZONE_NORMAL reservation
1da177e4 90 */
2f1b6248 91int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
4b51d669 92#ifdef CONFIG_ZONE_DMA
2f1b6248 93 256,
4b51d669 94#endif
fb0e7942 95#ifdef CONFIG_ZONE_DMA32
2f1b6248 96 256,
fb0e7942 97#endif
e53ef38d 98#ifdef CONFIG_HIGHMEM
2a1e274a 99 32,
e53ef38d 100#endif
2a1e274a 101 32,
2f1b6248 102};
1da177e4
LT
103
104EXPORT_SYMBOL(totalram_pages);
1da177e4 105
15ad7cdc 106static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 107#ifdef CONFIG_ZONE_DMA
2f1b6248 108 "DMA",
4b51d669 109#endif
fb0e7942 110#ifdef CONFIG_ZONE_DMA32
2f1b6248 111 "DMA32",
fb0e7942 112#endif
2f1b6248 113 "Normal",
e53ef38d 114#ifdef CONFIG_HIGHMEM
2a1e274a 115 "HighMem",
e53ef38d 116#endif
2a1e274a 117 "Movable",
2f1b6248
CL
118};
119
1da177e4
LT
120int min_free_kbytes = 1024;
121
86356ab1
YG
122unsigned long __meminitdata nr_kernel_pages;
123unsigned long __meminitdata nr_all_pages;
a3142c8e 124static unsigned long __meminitdata dma_reserve;
1da177e4 125
c713216d
MG
126#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
127 /*
183ff22b 128 * MAX_ACTIVE_REGIONS determines the maximum number of distinct
c713216d
MG
129 * ranges of memory (RAM) that may be registered with add_active_range().
130 * Ranges passed to add_active_range() will be merged if possible
131 * so the number of times add_active_range() can be called is
132 * related to the number of nodes and the number of holes
133 */
134 #ifdef CONFIG_MAX_ACTIVE_REGIONS
135 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
136 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
137 #else
138 #if MAX_NUMNODES >= 32
139 /* If there can be many nodes, allow up to 50 holes per node */
140 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
141 #else
142 /* By default, allow up to 256 distinct regions */
143 #define MAX_ACTIVE_REGIONS 256
144 #endif
145 #endif
146
98011f56
JB
147 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
148 static int __meminitdata nr_nodemap_entries;
149 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
150 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
fb01439c 151#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
98011f56
JB
152 static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
153 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
fb01439c 154#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
2a1e274a 155 unsigned long __initdata required_kernelcore;
484f51f8 156 static unsigned long __initdata required_movablecore;
e228929b 157 unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
2a1e274a
MG
158
159 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
160 int movable_zone;
161 EXPORT_SYMBOL(movable_zone);
c713216d
MG
162#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
163
418508c1
MS
164#if MAX_NUMNODES > 1
165int nr_node_ids __read_mostly = MAX_NUMNODES;
166EXPORT_SYMBOL(nr_node_ids);
167#endif
168
9ef9acb0
MG
169int page_group_by_mobility_disabled __read_mostly;
170
b2a0ac88
MG
171static void set_pageblock_migratetype(struct page *page, int migratetype)
172{
173 set_pageblock_flags_group(page, (unsigned long)migratetype,
174 PB_migrate, PB_migrate_end);
175}
176
13e7444b 177#ifdef CONFIG_DEBUG_VM
c6a57e19 178static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 179{
bdc8cb98
DH
180 int ret = 0;
181 unsigned seq;
182 unsigned long pfn = page_to_pfn(page);
c6a57e19 183
bdc8cb98
DH
184 do {
185 seq = zone_span_seqbegin(zone);
186 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
187 ret = 1;
188 else if (pfn < zone->zone_start_pfn)
189 ret = 1;
190 } while (zone_span_seqretry(zone, seq));
191
192 return ret;
c6a57e19
DH
193}
194
195static int page_is_consistent(struct zone *zone, struct page *page)
196{
14e07298 197 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 198 return 0;
1da177e4 199 if (zone != page_zone(page))
c6a57e19
DH
200 return 0;
201
202 return 1;
203}
204/*
205 * Temporary debugging check for pages not lying within a given zone.
206 */
207static int bad_range(struct zone *zone, struct page *page)
208{
209 if (page_outside_zone_boundaries(zone, page))
1da177e4 210 return 1;
c6a57e19
DH
211 if (!page_is_consistent(zone, page))
212 return 1;
213
1da177e4
LT
214 return 0;
215}
13e7444b
NP
216#else
217static inline int bad_range(struct zone *zone, struct page *page)
218{
219 return 0;
220}
221#endif
222
224abf92 223static void bad_page(struct page *page)
1da177e4 224{
9442ec9d
HD
225 void *pc = page_get_page_cgroup(page);
226
227 printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG
228 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
224abf92
NP
229 current->comm, page, (int)(2*sizeof(unsigned long)),
230 (unsigned long)page->flags, page->mapping,
231 page_mapcount(page), page_count(page));
9442ec9d
HD
232 if (pc) {
233 printk(KERN_EMERG "cgroup:%p\n", pc);
234 page_reset_bad_cgroup(page);
235 }
236 printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
237 KERN_EMERG "Backtrace:\n");
1da177e4 238 dump_stack();
334795ec
HD
239 page->flags &= ~(1 << PG_lru |
240 1 << PG_private |
1da177e4 241 1 << PG_locked |
1da177e4
LT
242 1 << PG_active |
243 1 << PG_dirty |
334795ec
HD
244 1 << PG_reclaim |
245 1 << PG_slab |
1da177e4 246 1 << PG_swapcache |
676165a8
NP
247 1 << PG_writeback |
248 1 << PG_buddy );
1da177e4
LT
249 set_page_count(page, 0);
250 reset_page_mapcount(page);
251 page->mapping = NULL;
9f158333 252 add_taint(TAINT_BAD_PAGE);
1da177e4
LT
253}
254
1da177e4
LT
255/*
256 * Higher-order pages are called "compound pages". They are structured thusly:
257 *
258 * The first PAGE_SIZE page is called the "head page".
259 *
260 * The remaining PAGE_SIZE pages are called "tail pages".
261 *
262 * All pages have PG_compound set. All pages have their ->private pointing at
263 * the head page (even the head page has this).
264 *
41d78ba5
HD
265 * The first tail page's ->lru.next holds the address of the compound page's
266 * put_page() function. Its ->lru.prev holds the order of allocation.
267 * This usage means that zero-order pages may not be compound.
1da177e4 268 */
d98c7a09
HD
269
270static void free_compound_page(struct page *page)
271{
d85f3385 272 __free_pages_ok(page, compound_order(page));
d98c7a09
HD
273}
274
1da177e4
LT
275static void prep_compound_page(struct page *page, unsigned long order)
276{
277 int i;
278 int nr_pages = 1 << order;
279
33f2ef89 280 set_compound_page_dtor(page, free_compound_page);
d85f3385 281 set_compound_order(page, order);
6d777953 282 __SetPageHead(page);
d85f3385 283 for (i = 1; i < nr_pages; i++) {
1da177e4
LT
284 struct page *p = page + i;
285
d85f3385 286 __SetPageTail(p);
d85f3385 287 p->first_page = page;
1da177e4
LT
288 }
289}
290
291static void destroy_compound_page(struct page *page, unsigned long order)
292{
293 int i;
294 int nr_pages = 1 << order;
295
d85f3385 296 if (unlikely(compound_order(page) != order))
224abf92 297 bad_page(page);
1da177e4 298
6d777953 299 if (unlikely(!PageHead(page)))
d85f3385 300 bad_page(page);
6d777953 301 __ClearPageHead(page);
d85f3385 302 for (i = 1; i < nr_pages; i++) {
1da177e4
LT
303 struct page *p = page + i;
304
6d777953 305 if (unlikely(!PageTail(p) |
d85f3385 306 (p->first_page != page)))
224abf92 307 bad_page(page);
d85f3385 308 __ClearPageTail(p);
1da177e4
LT
309 }
310}
1da177e4 311
17cf4406
NP
312static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
313{
314 int i;
315
6626c5d5
AM
316 /*
317 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
318 * and __GFP_HIGHMEM from hard or soft interrupt context.
319 */
725d704e 320 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
17cf4406
NP
321 for (i = 0; i < (1 << order); i++)
322 clear_highpage(page + i);
323}
324
6aa3001b
AM
325static inline void set_page_order(struct page *page, int order)
326{
4c21e2f2 327 set_page_private(page, order);
676165a8 328 __SetPageBuddy(page);
1da177e4
LT
329}
330
331static inline void rmv_page_order(struct page *page)
332{
676165a8 333 __ClearPageBuddy(page);
4c21e2f2 334 set_page_private(page, 0);
1da177e4
LT
335}
336
337/*
338 * Locate the struct page for both the matching buddy in our
339 * pair (buddy1) and the combined O(n+1) page they form (page).
340 *
341 * 1) Any buddy B1 will have an order O twin B2 which satisfies
342 * the following equation:
343 * B2 = B1 ^ (1 << O)
344 * For example, if the starting buddy (buddy2) is #8 its order
345 * 1 buddy is #10:
346 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
347 *
348 * 2) Any buddy B will have an order O+1 parent P which
349 * satisfies the following equation:
350 * P = B & ~(1 << O)
351 *
d6e05edc 352 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
1da177e4
LT
353 */
354static inline struct page *
355__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
356{
357 unsigned long buddy_idx = page_idx ^ (1 << order);
358
359 return page + (buddy_idx - page_idx);
360}
361
362static inline unsigned long
363__find_combined_index(unsigned long page_idx, unsigned int order)
364{
365 return (page_idx & ~(1 << order));
366}
367
368/*
369 * This function checks whether a page is free && is the buddy
370 * we can do coalesce a page and its buddy if
13e7444b 371 * (a) the buddy is not in a hole &&
676165a8 372 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
373 * (c) a page and its buddy have the same order &&
374 * (d) a page and its buddy are in the same zone.
676165a8
NP
375 *
376 * For recording whether a page is in the buddy system, we use PG_buddy.
377 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
1da177e4 378 *
676165a8 379 * For recording page's order, we use page_private(page).
1da177e4 380 */
cb2b95e1
AW
381static inline int page_is_buddy(struct page *page, struct page *buddy,
382 int order)
1da177e4 383{
14e07298 384 if (!pfn_valid_within(page_to_pfn(buddy)))
13e7444b 385 return 0;
13e7444b 386
cb2b95e1
AW
387 if (page_zone_id(page) != page_zone_id(buddy))
388 return 0;
389
390 if (PageBuddy(buddy) && page_order(buddy) == order) {
391 BUG_ON(page_count(buddy) != 0);
6aa3001b 392 return 1;
676165a8 393 }
6aa3001b 394 return 0;
1da177e4
LT
395}
396
397/*
398 * Freeing function for a buddy system allocator.
399 *
400 * The concept of a buddy system is to maintain direct-mapped table
401 * (containing bit values) for memory blocks of various "orders".
402 * The bottom level table contains the map for the smallest allocatable
403 * units of memory (here, pages), and each level above it describes
404 * pairs of units from the levels below, hence, "buddies".
405 * At a high level, all that happens here is marking the table entry
406 * at the bottom level available, and propagating the changes upward
407 * as necessary, plus some accounting needed to play nicely with other
408 * parts of the VM system.
409 * At each level, we keep a list of pages, which are heads of continuous
676165a8 410 * free pages of length of (1 << order) and marked with PG_buddy. Page's
4c21e2f2 411 * order is recorded in page_private(page) field.
1da177e4
LT
412 * So when we are allocating or freeing one, we can derive the state of the
413 * other. That is, if we allocate a small block, and both were
414 * free, the remainder of the region must be split into blocks.
415 * If a block is freed, and its buddy is also free, then this
416 * triggers coalescing into a block of larger size.
417 *
418 * -- wli
419 */
420
48db57f8 421static inline void __free_one_page(struct page *page,
1da177e4
LT
422 struct zone *zone, unsigned int order)
423{
424 unsigned long page_idx;
425 int order_size = 1 << order;
b2a0ac88 426 int migratetype = get_pageblock_migratetype(page);
1da177e4 427
224abf92 428 if (unlikely(PageCompound(page)))
1da177e4
LT
429 destroy_compound_page(page, order);
430
431 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
432
725d704e
NP
433 VM_BUG_ON(page_idx & (order_size - 1));
434 VM_BUG_ON(bad_range(zone, page));
1da177e4 435
d23ad423 436 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
1da177e4
LT
437 while (order < MAX_ORDER-1) {
438 unsigned long combined_idx;
1da177e4
LT
439 struct page *buddy;
440
1da177e4 441 buddy = __page_find_buddy(page, page_idx, order);
cb2b95e1 442 if (!page_is_buddy(page, buddy, order))
1da177e4 443 break; /* Move the buddy up one level. */
13e7444b 444
1da177e4 445 list_del(&buddy->lru);
b2a0ac88 446 zone->free_area[order].nr_free--;
1da177e4 447 rmv_page_order(buddy);
13e7444b 448 combined_idx = __find_combined_index(page_idx, order);
1da177e4
LT
449 page = page + (combined_idx - page_idx);
450 page_idx = combined_idx;
451 order++;
452 }
453 set_page_order(page, order);
b2a0ac88
MG
454 list_add(&page->lru,
455 &zone->free_area[order].free_list[migratetype]);
1da177e4
LT
456 zone->free_area[order].nr_free++;
457}
458
224abf92 459static inline int free_pages_check(struct page *page)
1da177e4 460{
92be2e33
NP
461 if (unlikely(page_mapcount(page) |
462 (page->mapping != NULL) |
9442ec9d 463 (page_get_page_cgroup(page) != NULL) |
92be2e33 464 (page_count(page) != 0) |
1da177e4
LT
465 (page->flags & (
466 1 << PG_lru |
467 1 << PG_private |
468 1 << PG_locked |
469 1 << PG_active |
1da177e4
LT
470 1 << PG_slab |
471 1 << PG_swapcache |
b5810039 472 1 << PG_writeback |
676165a8
NP
473 1 << PG_reserved |
474 1 << PG_buddy ))))
224abf92 475 bad_page(page);
1da177e4 476 if (PageDirty(page))
242e5468 477 __ClearPageDirty(page);
689bcebf
HD
478 /*
479 * For now, we report if PG_reserved was found set, but do not
480 * clear it, and do not free the page. But we shall soon need
481 * to do more, for when the ZERO_PAGE count wraps negative.
482 */
483 return PageReserved(page);
1da177e4
LT
484}
485
486/*
487 * Frees a list of pages.
488 * Assumes all pages on list are in same zone, and of same order.
207f36ee 489 * count is the number of pages to free.
1da177e4
LT
490 *
491 * If the zone was previously in an "all pages pinned" state then look to
492 * see if this freeing clears that state.
493 *
494 * And clear the zone's pages_scanned counter, to hold off the "all pages are
495 * pinned" detection logic.
496 */
48db57f8
NP
497static void free_pages_bulk(struct zone *zone, int count,
498 struct list_head *list, int order)
1da177e4 499{
c54ad30c 500 spin_lock(&zone->lock);
e815af95 501 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
1da177e4 502 zone->pages_scanned = 0;
48db57f8
NP
503 while (count--) {
504 struct page *page;
505
725d704e 506 VM_BUG_ON(list_empty(list));
1da177e4 507 page = list_entry(list->prev, struct page, lru);
48db57f8 508 /* have to delete it as __free_one_page list manipulates */
1da177e4 509 list_del(&page->lru);
48db57f8 510 __free_one_page(page, zone, order);
1da177e4 511 }
c54ad30c 512 spin_unlock(&zone->lock);
1da177e4
LT
513}
514
48db57f8 515static void free_one_page(struct zone *zone, struct page *page, int order)
1da177e4 516{
006d22d9 517 spin_lock(&zone->lock);
e815af95 518 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
006d22d9 519 zone->pages_scanned = 0;
0798e519 520 __free_one_page(page, zone, order);
006d22d9 521 spin_unlock(&zone->lock);
48db57f8
NP
522}
523
524static void __free_pages_ok(struct page *page, unsigned int order)
525{
526 unsigned long flags;
1da177e4 527 int i;
689bcebf 528 int reserved = 0;
1da177e4 529
1da177e4 530 for (i = 0 ; i < (1 << order) ; ++i)
224abf92 531 reserved += free_pages_check(page + i);
689bcebf
HD
532 if (reserved)
533 return;
534
9858db50
NP
535 if (!PageHighMem(page))
536 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
dafb1367 537 arch_free_page(page, order);
48db57f8 538 kernel_map_pages(page, 1 << order, 0);
dafb1367 539
c54ad30c 540 local_irq_save(flags);
f8891e5e 541 __count_vm_events(PGFREE, 1 << order);
48db57f8 542 free_one_page(page_zone(page), page, order);
c54ad30c 543 local_irq_restore(flags);
1da177e4
LT
544}
545
a226f6c8
DH
546/*
547 * permit the bootmem allocator to evade page validation on high-order frees
548 */
0c0a4a51 549void __free_pages_bootmem(struct page *page, unsigned int order)
a226f6c8
DH
550{
551 if (order == 0) {
552 __ClearPageReserved(page);
553 set_page_count(page, 0);
7835e98b 554 set_page_refcounted(page);
545b1ea9 555 __free_page(page);
a226f6c8 556 } else {
a226f6c8
DH
557 int loop;
558
545b1ea9 559 prefetchw(page);
a226f6c8
DH
560 for (loop = 0; loop < BITS_PER_LONG; loop++) {
561 struct page *p = &page[loop];
562
545b1ea9
NP
563 if (loop + 1 < BITS_PER_LONG)
564 prefetchw(p + 1);
a226f6c8
DH
565 __ClearPageReserved(p);
566 set_page_count(p, 0);
567 }
568
7835e98b 569 set_page_refcounted(page);
545b1ea9 570 __free_pages(page, order);
a226f6c8
DH
571 }
572}
573
1da177e4
LT
574
575/*
576 * The order of subdivision here is critical for the IO subsystem.
577 * Please do not alter this order without good reasons and regression
578 * testing. Specifically, as large blocks of memory are subdivided,
579 * the order in which smaller blocks are delivered depends on the order
580 * they're subdivided in this function. This is the primary factor
581 * influencing the order in which pages are delivered to the IO
582 * subsystem according to empirical testing, and this is also justified
583 * by considering the behavior of a buddy system containing a single
584 * large block of memory acted on by a series of small allocations.
585 * This behavior is a critical factor in sglist merging's success.
586 *
587 * -- wli
588 */
085cc7d5 589static inline void expand(struct zone *zone, struct page *page,
b2a0ac88
MG
590 int low, int high, struct free_area *area,
591 int migratetype)
1da177e4
LT
592{
593 unsigned long size = 1 << high;
594
595 while (high > low) {
596 area--;
597 high--;
598 size >>= 1;
725d704e 599 VM_BUG_ON(bad_range(zone, &page[size]));
b2a0ac88 600 list_add(&page[size].lru, &area->free_list[migratetype]);
1da177e4
LT
601 area->nr_free++;
602 set_page_order(&page[size], high);
603 }
1da177e4
LT
604}
605
1da177e4
LT
606/*
607 * This page is about to be returned from the page allocator
608 */
17cf4406 609static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
1da177e4 610{
92be2e33
NP
611 if (unlikely(page_mapcount(page) |
612 (page->mapping != NULL) |
9442ec9d 613 (page_get_page_cgroup(page) != NULL) |
92be2e33 614 (page_count(page) != 0) |
334795ec
HD
615 (page->flags & (
616 1 << PG_lru |
1da177e4
LT
617 1 << PG_private |
618 1 << PG_locked |
1da177e4
LT
619 1 << PG_active |
620 1 << PG_dirty |
334795ec 621 1 << PG_slab |
1da177e4 622 1 << PG_swapcache |
b5810039 623 1 << PG_writeback |
676165a8
NP
624 1 << PG_reserved |
625 1 << PG_buddy ))))
224abf92 626 bad_page(page);
1da177e4 627
689bcebf
HD
628 /*
629 * For now, we report if PG_reserved was found set, but do not
630 * clear it, and do not allocate the page: as a safety net.
631 */
632 if (PageReserved(page))
633 return 1;
634
0a128b2b 635 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_reclaim |
1da177e4 636 1 << PG_referenced | 1 << PG_arch_1 |
5409bae0 637 1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
4c21e2f2 638 set_page_private(page, 0);
7835e98b 639 set_page_refcounted(page);
cc102509
NP
640
641 arch_alloc_page(page, order);
1da177e4 642 kernel_map_pages(page, 1 << order, 1);
17cf4406
NP
643
644 if (gfp_flags & __GFP_ZERO)
645 prep_zero_page(page, order, gfp_flags);
646
647 if (order && (gfp_flags & __GFP_COMP))
648 prep_compound_page(page, order);
649
689bcebf 650 return 0;
1da177e4
LT
651}
652
56fd56b8
MG
653/*
654 * Go through the free lists for the given migratetype and remove
655 * the smallest available page from the freelists
656 */
657static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
658 int migratetype)
659{
660 unsigned int current_order;
661 struct free_area * area;
662 struct page *page;
663
664 /* Find a page of the appropriate size in the preferred list */
665 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
666 area = &(zone->free_area[current_order]);
667 if (list_empty(&area->free_list[migratetype]))
668 continue;
669
670 page = list_entry(area->free_list[migratetype].next,
671 struct page, lru);
672 list_del(&page->lru);
673 rmv_page_order(page);
674 area->nr_free--;
675 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
676 expand(zone, page, order, current_order, area, migratetype);
677 return page;
678 }
679
680 return NULL;
681}
682
683
b2a0ac88
MG
684/*
685 * This array describes the order lists are fallen back to when
686 * the free lists for the desirable migrate type are depleted
687 */
688static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
64c5e135
MG
689 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
690 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
691 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
692 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
b2a0ac88
MG
693};
694
c361be55
MG
695/*
696 * Move the free pages in a range to the free lists of the requested type.
d9c23400 697 * Note that start_page and end_pages are not aligned on a pageblock
c361be55
MG
698 * boundary. If alignment is required, use move_freepages_block()
699 */
700int move_freepages(struct zone *zone,
701 struct page *start_page, struct page *end_page,
702 int migratetype)
703{
704 struct page *page;
705 unsigned long order;
d100313f 706 int pages_moved = 0;
c361be55
MG
707
708#ifndef CONFIG_HOLES_IN_ZONE
709 /*
710 * page_zone is not safe to call in this context when
711 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
712 * anyway as we check zone boundaries in move_freepages_block().
713 * Remove at a later date when no bug reports exist related to
ac0e5b7a 714 * grouping pages by mobility
c361be55
MG
715 */
716 BUG_ON(page_zone(start_page) != page_zone(end_page));
717#endif
718
719 for (page = start_page; page <= end_page;) {
720 if (!pfn_valid_within(page_to_pfn(page))) {
721 page++;
722 continue;
723 }
724
725 if (!PageBuddy(page)) {
726 page++;
727 continue;
728 }
729
730 order = page_order(page);
731 list_del(&page->lru);
732 list_add(&page->lru,
733 &zone->free_area[order].free_list[migratetype]);
734 page += 1 << order;
d100313f 735 pages_moved += 1 << order;
c361be55
MG
736 }
737
d100313f 738 return pages_moved;
c361be55
MG
739}
740
741int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
742{
743 unsigned long start_pfn, end_pfn;
744 struct page *start_page, *end_page;
745
746 start_pfn = page_to_pfn(page);
d9c23400 747 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
c361be55 748 start_page = pfn_to_page(start_pfn);
d9c23400
MG
749 end_page = start_page + pageblock_nr_pages - 1;
750 end_pfn = start_pfn + pageblock_nr_pages - 1;
c361be55
MG
751
752 /* Do not cross zone boundaries */
753 if (start_pfn < zone->zone_start_pfn)
754 start_page = page;
755 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
756 return 0;
757
758 return move_freepages(zone, start_page, end_page, migratetype);
759}
760
b2a0ac88
MG
761/* Remove an element from the buddy allocator from the fallback list */
762static struct page *__rmqueue_fallback(struct zone *zone, int order,
763 int start_migratetype)
764{
765 struct free_area * area;
766 int current_order;
767 struct page *page;
768 int migratetype, i;
769
770 /* Find the largest possible block of pages in the other list */
771 for (current_order = MAX_ORDER-1; current_order >= order;
772 --current_order) {
773 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
774 migratetype = fallbacks[start_migratetype][i];
775
56fd56b8
MG
776 /* MIGRATE_RESERVE handled later if necessary */
777 if (migratetype == MIGRATE_RESERVE)
778 continue;
e010487d 779
b2a0ac88
MG
780 area = &(zone->free_area[current_order]);
781 if (list_empty(&area->free_list[migratetype]))
782 continue;
783
784 page = list_entry(area->free_list[migratetype].next,
785 struct page, lru);
786 area->nr_free--;
787
788 /*
c361be55 789 * If breaking a large block of pages, move all free
46dafbca
MG
790 * pages to the preferred allocation list. If falling
791 * back for a reclaimable kernel allocation, be more
792 * agressive about taking ownership of free pages
b2a0ac88 793 */
d9c23400 794 if (unlikely(current_order >= (pageblock_order >> 1)) ||
46dafbca
MG
795 start_migratetype == MIGRATE_RECLAIMABLE) {
796 unsigned long pages;
797 pages = move_freepages_block(zone, page,
798 start_migratetype);
799
800 /* Claim the whole block if over half of it is free */
d9c23400 801 if (pages >= (1 << (pageblock_order-1)))
46dafbca
MG
802 set_pageblock_migratetype(page,
803 start_migratetype);
804
b2a0ac88 805 migratetype = start_migratetype;
c361be55 806 }
b2a0ac88
MG
807
808 /* Remove the page from the freelists */
809 list_del(&page->lru);
810 rmv_page_order(page);
811 __mod_zone_page_state(zone, NR_FREE_PAGES,
812 -(1UL << order));
813
d9c23400 814 if (current_order == pageblock_order)
b2a0ac88
MG
815 set_pageblock_migratetype(page,
816 start_migratetype);
817
818 expand(zone, page, order, current_order, area, migratetype);
819 return page;
820 }
821 }
822
56fd56b8
MG
823 /* Use MIGRATE_RESERVE rather than fail an allocation */
824 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
b2a0ac88
MG
825}
826
56fd56b8 827/*
1da177e4
LT
828 * Do the hard work of removing an element from the buddy allocator.
829 * Call me with the zone->lock already held.
830 */
b2a0ac88
MG
831static struct page *__rmqueue(struct zone *zone, unsigned int order,
832 int migratetype)
1da177e4 833{
1da177e4
LT
834 struct page *page;
835
56fd56b8 836 page = __rmqueue_smallest(zone, order, migratetype);
b2a0ac88 837
56fd56b8
MG
838 if (unlikely(!page))
839 page = __rmqueue_fallback(zone, order, migratetype);
b2a0ac88
MG
840
841 return page;
1da177e4
LT
842}
843
844/*
845 * Obtain a specified number of elements from the buddy allocator, all under
846 * a single hold of the lock, for efficiency. Add them to the supplied list.
847 * Returns the number of new pages which were placed at *list.
848 */
849static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88
MG
850 unsigned long count, struct list_head *list,
851 int migratetype)
1da177e4 852{
1da177e4 853 int i;
1da177e4 854
c54ad30c 855 spin_lock(&zone->lock);
1da177e4 856 for (i = 0; i < count; ++i) {
b2a0ac88 857 struct page *page = __rmqueue(zone, order, migratetype);
085cc7d5 858 if (unlikely(page == NULL))
1da177e4 859 break;
81eabcbe
MG
860
861 /*
862 * Split buddy pages returned by expand() are received here
863 * in physical page order. The page is added to the callers and
864 * list and the list head then moves forward. From the callers
865 * perspective, the linked list is ordered by page number in
866 * some conditions. This is useful for IO devices that can
867 * merge IO requests if the physical pages are ordered
868 * properly.
869 */
535131e6
MG
870 list_add(&page->lru, list);
871 set_page_private(page, migratetype);
81eabcbe 872 list = &page->lru;
1da177e4 873 }
c54ad30c 874 spin_unlock(&zone->lock);
085cc7d5 875 return i;
1da177e4
LT
876}
877
4ae7c039 878#ifdef CONFIG_NUMA
8fce4d8e 879/*
4037d452
CL
880 * Called from the vmstat counter updater to drain pagesets of this
881 * currently executing processor on remote nodes after they have
882 * expired.
883 *
879336c3
CL
884 * Note that this function must be called with the thread pinned to
885 * a single processor.
8fce4d8e 886 */
4037d452 887void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 888{
4ae7c039 889 unsigned long flags;
4037d452 890 int to_drain;
4ae7c039 891
4037d452
CL
892 local_irq_save(flags);
893 if (pcp->count >= pcp->batch)
894 to_drain = pcp->batch;
895 else
896 to_drain = pcp->count;
897 free_pages_bulk(zone, to_drain, &pcp->list, 0);
898 pcp->count -= to_drain;
899 local_irq_restore(flags);
4ae7c039
CL
900}
901#endif
902
9f8f2172
CL
903/*
904 * Drain pages of the indicated processor.
905 *
906 * The processor must either be the current processor and the
907 * thread pinned to the current processor or a processor that
908 * is not online.
909 */
910static void drain_pages(unsigned int cpu)
1da177e4 911{
c54ad30c 912 unsigned long flags;
1da177e4 913 struct zone *zone;
1da177e4
LT
914
915 for_each_zone(zone) {
916 struct per_cpu_pageset *pset;
3dfa5721 917 struct per_cpu_pages *pcp;
1da177e4 918
f2e12bb2
CL
919 if (!populated_zone(zone))
920 continue;
921
e7c8d5c9 922 pset = zone_pcp(zone, cpu);
3dfa5721
CL
923
924 pcp = &pset->pcp;
925 local_irq_save(flags);
926 free_pages_bulk(zone, pcp->count, &pcp->list, 0);
927 pcp->count = 0;
928 local_irq_restore(flags);
1da177e4
LT
929 }
930}
1da177e4 931
9f8f2172
CL
932/*
933 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
934 */
935void drain_local_pages(void *arg)
936{
937 drain_pages(smp_processor_id());
938}
939
940/*
941 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
942 */
943void drain_all_pages(void)
944{
945 on_each_cpu(drain_local_pages, NULL, 0, 1);
946}
947
296699de 948#ifdef CONFIG_HIBERNATION
1da177e4
LT
949
950void mark_free_pages(struct zone *zone)
951{
f623f0db
RW
952 unsigned long pfn, max_zone_pfn;
953 unsigned long flags;
b2a0ac88 954 int order, t;
1da177e4
LT
955 struct list_head *curr;
956
957 if (!zone->spanned_pages)
958 return;
959
960 spin_lock_irqsave(&zone->lock, flags);
f623f0db
RW
961
962 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
963 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
964 if (pfn_valid(pfn)) {
965 struct page *page = pfn_to_page(pfn);
966
7be98234
RW
967 if (!swsusp_page_is_forbidden(page))
968 swsusp_unset_page_free(page);
f623f0db 969 }
1da177e4 970
b2a0ac88
MG
971 for_each_migratetype_order(order, t) {
972 list_for_each(curr, &zone->free_area[order].free_list[t]) {
f623f0db 973 unsigned long i;
1da177e4 974
f623f0db
RW
975 pfn = page_to_pfn(list_entry(curr, struct page, lru));
976 for (i = 0; i < (1UL << order); i++)
7be98234 977 swsusp_set_page_free(pfn_to_page(pfn + i));
f623f0db 978 }
b2a0ac88 979 }
1da177e4
LT
980 spin_unlock_irqrestore(&zone->lock, flags);
981}
e2c55dc8 982#endif /* CONFIG_PM */
1da177e4 983
1da177e4
LT
984/*
985 * Free a 0-order page
986 */
920c7a5d 987static void free_hot_cold_page(struct page *page, int cold)
1da177e4
LT
988{
989 struct zone *zone = page_zone(page);
990 struct per_cpu_pages *pcp;
991 unsigned long flags;
992
1da177e4
LT
993 if (PageAnon(page))
994 page->mapping = NULL;
224abf92 995 if (free_pages_check(page))
689bcebf
HD
996 return;
997
9858db50
NP
998 if (!PageHighMem(page))
999 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
dafb1367 1000 arch_free_page(page, 0);
689bcebf
HD
1001 kernel_map_pages(page, 1, 0);
1002
3dfa5721 1003 pcp = &zone_pcp(zone, get_cpu())->pcp;
1da177e4 1004 local_irq_save(flags);
f8891e5e 1005 __count_vm_event(PGFREE);
3dfa5721
CL
1006 if (cold)
1007 list_add_tail(&page->lru, &pcp->list);
1008 else
1009 list_add(&page->lru, &pcp->list);
535131e6 1010 set_page_private(page, get_pageblock_migratetype(page));
1da177e4 1011 pcp->count++;
48db57f8
NP
1012 if (pcp->count >= pcp->high) {
1013 free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
1014 pcp->count -= pcp->batch;
1015 }
1da177e4
LT
1016 local_irq_restore(flags);
1017 put_cpu();
1018}
1019
920c7a5d 1020void free_hot_page(struct page *page)
1da177e4
LT
1021{
1022 free_hot_cold_page(page, 0);
1023}
1024
920c7a5d 1025void free_cold_page(struct page *page)
1da177e4
LT
1026{
1027 free_hot_cold_page(page, 1);
1028}
1029
8dfcc9ba
NP
1030/*
1031 * split_page takes a non-compound higher-order page, and splits it into
1032 * n (1<<order) sub-pages: page[0..n]
1033 * Each sub-page must be freed individually.
1034 *
1035 * Note: this is probably too low level an operation for use in drivers.
1036 * Please consult with lkml before using this in your driver.
1037 */
1038void split_page(struct page *page, unsigned int order)
1039{
1040 int i;
1041
725d704e
NP
1042 VM_BUG_ON(PageCompound(page));
1043 VM_BUG_ON(!page_count(page));
7835e98b
NP
1044 for (i = 1; i < (1 << order); i++)
1045 set_page_refcounted(page + i);
8dfcc9ba 1046}
8dfcc9ba 1047
1da177e4
LT
1048/*
1049 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
1050 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1051 * or two.
1052 */
18ea7e71 1053static struct page *buffered_rmqueue(struct zone *preferred_zone,
a74609fa 1054 struct zone *zone, int order, gfp_t gfp_flags)
1da177e4
LT
1055{
1056 unsigned long flags;
689bcebf 1057 struct page *page;
1da177e4 1058 int cold = !!(gfp_flags & __GFP_COLD);
a74609fa 1059 int cpu;
64c5e135 1060 int migratetype = allocflags_to_migratetype(gfp_flags);
1da177e4 1061
689bcebf 1062again:
a74609fa 1063 cpu = get_cpu();
48db57f8 1064 if (likely(order == 0)) {
1da177e4
LT
1065 struct per_cpu_pages *pcp;
1066
3dfa5721 1067 pcp = &zone_pcp(zone, cpu)->pcp;
1da177e4 1068 local_irq_save(flags);
a74609fa 1069 if (!pcp->count) {
941c7105 1070 pcp->count = rmqueue_bulk(zone, 0,
b2a0ac88 1071 pcp->batch, &pcp->list, migratetype);
a74609fa
NP
1072 if (unlikely(!pcp->count))
1073 goto failed;
1da177e4 1074 }
b92a6edd 1075
535131e6 1076 /* Find a page of the appropriate migrate type */
3dfa5721
CL
1077 if (cold) {
1078 list_for_each_entry_reverse(page, &pcp->list, lru)
1079 if (page_private(page) == migratetype)
1080 break;
1081 } else {
1082 list_for_each_entry(page, &pcp->list, lru)
1083 if (page_private(page) == migratetype)
1084 break;
1085 }
535131e6 1086
b92a6edd
MG
1087 /* Allocate more to the pcp list if necessary */
1088 if (unlikely(&page->lru == &pcp->list)) {
535131e6
MG
1089 pcp->count += rmqueue_bulk(zone, 0,
1090 pcp->batch, &pcp->list, migratetype);
1091 page = list_entry(pcp->list.next, struct page, lru);
535131e6 1092 }
b92a6edd
MG
1093
1094 list_del(&page->lru);
1095 pcp->count--;
7fb1d9fc 1096 } else {
1da177e4 1097 spin_lock_irqsave(&zone->lock, flags);
b2a0ac88 1098 page = __rmqueue(zone, order, migratetype);
a74609fa
NP
1099 spin_unlock(&zone->lock);
1100 if (!page)
1101 goto failed;
1da177e4
LT
1102 }
1103
f8891e5e 1104 __count_zone_vm_events(PGALLOC, zone, 1 << order);
18ea7e71 1105 zone_statistics(preferred_zone, zone);
a74609fa
NP
1106 local_irq_restore(flags);
1107 put_cpu();
1da177e4 1108
725d704e 1109 VM_BUG_ON(bad_range(zone, page));
17cf4406 1110 if (prep_new_page(page, order, gfp_flags))
a74609fa 1111 goto again;
1da177e4 1112 return page;
a74609fa
NP
1113
1114failed:
1115 local_irq_restore(flags);
1116 put_cpu();
1117 return NULL;
1da177e4
LT
1118}
1119
7fb1d9fc 1120#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
3148890b
NP
1121#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
1122#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
1123#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
1124#define ALLOC_HARDER 0x10 /* try to alloc harder */
1125#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
1126#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
7fb1d9fc 1127
933e312e
AM
1128#ifdef CONFIG_FAIL_PAGE_ALLOC
1129
1130static struct fail_page_alloc_attr {
1131 struct fault_attr attr;
1132
1133 u32 ignore_gfp_highmem;
1134 u32 ignore_gfp_wait;
54114994 1135 u32 min_order;
933e312e
AM
1136
1137#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1138
1139 struct dentry *ignore_gfp_highmem_file;
1140 struct dentry *ignore_gfp_wait_file;
54114994 1141 struct dentry *min_order_file;
933e312e
AM
1142
1143#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1144
1145} fail_page_alloc = {
1146 .attr = FAULT_ATTR_INITIALIZER,
6b1b60f4
DM
1147 .ignore_gfp_wait = 1,
1148 .ignore_gfp_highmem = 1,
54114994 1149 .min_order = 1,
933e312e
AM
1150};
1151
1152static int __init setup_fail_page_alloc(char *str)
1153{
1154 return setup_fault_attr(&fail_page_alloc.attr, str);
1155}
1156__setup("fail_page_alloc=", setup_fail_page_alloc);
1157
1158static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1159{
54114994
AM
1160 if (order < fail_page_alloc.min_order)
1161 return 0;
933e312e
AM
1162 if (gfp_mask & __GFP_NOFAIL)
1163 return 0;
1164 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1165 return 0;
1166 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1167 return 0;
1168
1169 return should_fail(&fail_page_alloc.attr, 1 << order);
1170}
1171
1172#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1173
1174static int __init fail_page_alloc_debugfs(void)
1175{
1176 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1177 struct dentry *dir;
1178 int err;
1179
1180 err = init_fault_attr_dentries(&fail_page_alloc.attr,
1181 "fail_page_alloc");
1182 if (err)
1183 return err;
1184 dir = fail_page_alloc.attr.dentries.dir;
1185
1186 fail_page_alloc.ignore_gfp_wait_file =
1187 debugfs_create_bool("ignore-gfp-wait", mode, dir,
1188 &fail_page_alloc.ignore_gfp_wait);
1189
1190 fail_page_alloc.ignore_gfp_highmem_file =
1191 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1192 &fail_page_alloc.ignore_gfp_highmem);
54114994
AM
1193 fail_page_alloc.min_order_file =
1194 debugfs_create_u32("min-order", mode, dir,
1195 &fail_page_alloc.min_order);
933e312e
AM
1196
1197 if (!fail_page_alloc.ignore_gfp_wait_file ||
54114994
AM
1198 !fail_page_alloc.ignore_gfp_highmem_file ||
1199 !fail_page_alloc.min_order_file) {
933e312e
AM
1200 err = -ENOMEM;
1201 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1202 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
54114994 1203 debugfs_remove(fail_page_alloc.min_order_file);
933e312e
AM
1204 cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1205 }
1206
1207 return err;
1208}
1209
1210late_initcall(fail_page_alloc_debugfs);
1211
1212#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1213
1214#else /* CONFIG_FAIL_PAGE_ALLOC */
1215
1216static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1217{
1218 return 0;
1219}
1220
1221#endif /* CONFIG_FAIL_PAGE_ALLOC */
1222
1da177e4
LT
1223/*
1224 * Return 1 if free pages are above 'mark'. This takes into account the order
1225 * of the allocation.
1226 */
1227int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
7fb1d9fc 1228 int classzone_idx, int alloc_flags)
1da177e4
LT
1229{
1230 /* free_pages my go negative - that's OK */
d23ad423
CL
1231 long min = mark;
1232 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1da177e4
LT
1233 int o;
1234
7fb1d9fc 1235 if (alloc_flags & ALLOC_HIGH)
1da177e4 1236 min -= min / 2;
7fb1d9fc 1237 if (alloc_flags & ALLOC_HARDER)
1da177e4
LT
1238 min -= min / 4;
1239
1240 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1241 return 0;
1242 for (o = 0; o < order; o++) {
1243 /* At the next order, this order's pages become unavailable */
1244 free_pages -= z->free_area[o].nr_free << o;
1245
1246 /* Require fewer higher order pages to be free */
1247 min >>= 1;
1248
1249 if (free_pages <= min)
1250 return 0;
1251 }
1252 return 1;
1253}
1254
9276b1bc
PJ
1255#ifdef CONFIG_NUMA
1256/*
1257 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1258 * skip over zones that are not allowed by the cpuset, or that have
1259 * been recently (in last second) found to be nearly full. See further
1260 * comments in mmzone.h. Reduces cache footprint of zonelist scans
183ff22b 1261 * that have to skip over a lot of full or unallowed zones.
9276b1bc
PJ
1262 *
1263 * If the zonelist cache is present in the passed in zonelist, then
1264 * returns a pointer to the allowed node mask (either the current
37b07e41 1265 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
9276b1bc
PJ
1266 *
1267 * If the zonelist cache is not available for this zonelist, does
1268 * nothing and returns NULL.
1269 *
1270 * If the fullzones BITMAP in the zonelist cache is stale (more than
1271 * a second since last zap'd) then we zap it out (clear its bits.)
1272 *
1273 * We hold off even calling zlc_setup, until after we've checked the
1274 * first zone in the zonelist, on the theory that most allocations will
1275 * be satisfied from that first zone, so best to examine that zone as
1276 * quickly as we can.
1277 */
1278static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1279{
1280 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1281 nodemask_t *allowednodes; /* zonelist_cache approximation */
1282
1283 zlc = zonelist->zlcache_ptr;
1284 if (!zlc)
1285 return NULL;
1286
f05111f5 1287 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
9276b1bc
PJ
1288 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1289 zlc->last_full_zap = jiffies;
1290 }
1291
1292 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1293 &cpuset_current_mems_allowed :
37b07e41 1294 &node_states[N_HIGH_MEMORY];
9276b1bc
PJ
1295 return allowednodes;
1296}
1297
1298/*
1299 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1300 * if it is worth looking at further for free memory:
1301 * 1) Check that the zone isn't thought to be full (doesn't have its
1302 * bit set in the zonelist_cache fullzones BITMAP).
1303 * 2) Check that the zones node (obtained from the zonelist_cache
1304 * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1305 * Return true (non-zero) if zone is worth looking at further, or
1306 * else return false (zero) if it is not.
1307 *
1308 * This check -ignores- the distinction between various watermarks,
1309 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
1310 * found to be full for any variation of these watermarks, it will
1311 * be considered full for up to one second by all requests, unless
1312 * we are so low on memory on all allowed nodes that we are forced
1313 * into the second scan of the zonelist.
1314 *
1315 * In the second scan we ignore this zonelist cache and exactly
1316 * apply the watermarks to all zones, even it is slower to do so.
1317 * We are low on memory in the second scan, and should leave no stone
1318 * unturned looking for a free page.
1319 */
dd1a239f 1320static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1321 nodemask_t *allowednodes)
1322{
1323 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1324 int i; /* index of *z in zonelist zones */
1325 int n; /* node that zone *z is on */
1326
1327 zlc = zonelist->zlcache_ptr;
1328 if (!zlc)
1329 return 1;
1330
dd1a239f 1331 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1332 n = zlc->z_to_n[i];
1333
1334 /* This zone is worth trying if it is allowed but not full */
1335 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1336}
1337
1338/*
1339 * Given 'z' scanning a zonelist, set the corresponding bit in
1340 * zlc->fullzones, so that subsequent attempts to allocate a page
1341 * from that zone don't waste time re-examining it.
1342 */
dd1a239f 1343static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1344{
1345 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1346 int i; /* index of *z in zonelist zones */
1347
1348 zlc = zonelist->zlcache_ptr;
1349 if (!zlc)
1350 return;
1351
dd1a239f 1352 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1353
1354 set_bit(i, zlc->fullzones);
1355}
1356
1357#else /* CONFIG_NUMA */
1358
1359static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1360{
1361 return NULL;
1362}
1363
dd1a239f 1364static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1365 nodemask_t *allowednodes)
1366{
1367 return 1;
1368}
1369
dd1a239f 1370static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1371{
1372}
1373#endif /* CONFIG_NUMA */
1374
7fb1d9fc 1375/*
0798e519 1376 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
1377 * a page.
1378 */
1379static struct page *
19770b32 1380get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
54a6eb5c 1381 struct zonelist *zonelist, int high_zoneidx, int alloc_flags)
753ee728 1382{
dd1a239f 1383 struct zoneref *z;
7fb1d9fc 1384 struct page *page = NULL;
54a6eb5c 1385 int classzone_idx;
18ea7e71 1386 struct zone *zone, *preferred_zone;
9276b1bc
PJ
1387 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1388 int zlc_active = 0; /* set if using zonelist_cache */
1389 int did_zlc_setup = 0; /* just call zlc_setup() one time */
54a6eb5c 1390
19770b32
MG
1391 (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
1392 &preferred_zone);
1393 classzone_idx = zone_idx(preferred_zone);
7fb1d9fc 1394
9276b1bc 1395zonelist_scan:
7fb1d9fc 1396 /*
9276b1bc 1397 * Scan zonelist, looking for a zone with enough free.
7fb1d9fc
RS
1398 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1399 */
19770b32
MG
1400 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1401 high_zoneidx, nodemask) {
9276b1bc
PJ
1402 if (NUMA_BUILD && zlc_active &&
1403 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1404 continue;
7fb1d9fc 1405 if ((alloc_flags & ALLOC_CPUSET) &&
02a0e53d 1406 !cpuset_zone_allowed_softwall(zone, gfp_mask))
9276b1bc 1407 goto try_next_zone;
7fb1d9fc
RS
1408
1409 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
3148890b
NP
1410 unsigned long mark;
1411 if (alloc_flags & ALLOC_WMARK_MIN)
1192d526 1412 mark = zone->pages_min;
3148890b 1413 else if (alloc_flags & ALLOC_WMARK_LOW)
1192d526 1414 mark = zone->pages_low;
3148890b 1415 else
1192d526 1416 mark = zone->pages_high;
0798e519
PJ
1417 if (!zone_watermark_ok(zone, order, mark,
1418 classzone_idx, alloc_flags)) {
9eeff239 1419 if (!zone_reclaim_mode ||
1192d526 1420 !zone_reclaim(zone, gfp_mask, order))
9276b1bc 1421 goto this_zone_full;
0798e519 1422 }
7fb1d9fc
RS
1423 }
1424
18ea7e71 1425 page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
0798e519 1426 if (page)
7fb1d9fc 1427 break;
9276b1bc
PJ
1428this_zone_full:
1429 if (NUMA_BUILD)
1430 zlc_mark_zone_full(zonelist, z);
1431try_next_zone:
1432 if (NUMA_BUILD && !did_zlc_setup) {
1433 /* we do zlc_setup after the first zone is tried */
1434 allowednodes = zlc_setup(zonelist, alloc_flags);
1435 zlc_active = 1;
1436 did_zlc_setup = 1;
1437 }
54a6eb5c 1438 }
9276b1bc
PJ
1439
1440 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1441 /* Disable zlc cache for second zonelist scan */
1442 zlc_active = 0;
1443 goto zonelist_scan;
1444 }
7fb1d9fc 1445 return page;
753ee728
MH
1446}
1447
1da177e4
LT
1448/*
1449 * This is the 'heart' of the zoned buddy allocator.
1450 */
19770b32
MG
1451static struct page *
1452__alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
1453 struct zonelist *zonelist, nodemask_t *nodemask)
1da177e4 1454{
260b2367 1455 const gfp_t wait = gfp_mask & __GFP_WAIT;
54a6eb5c 1456 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
dd1a239f
MG
1457 struct zoneref *z;
1458 struct zone *zone;
1da177e4
LT
1459 struct page *page;
1460 struct reclaim_state reclaim_state;
1461 struct task_struct *p = current;
1da177e4 1462 int do_retry;
7fb1d9fc 1463 int alloc_flags;
1da177e4
LT
1464 int did_some_progress;
1465
1466 might_sleep_if(wait);
1467
933e312e
AM
1468 if (should_fail_alloc_page(gfp_mask, order))
1469 return NULL;
1470
6b1de916 1471restart:
dd1a239f 1472 z = zonelist->_zonerefs; /* the list of zones suitable for gfp_mask */
1da177e4 1473
dd1a239f 1474 if (unlikely(!z->zone)) {
523b9458
CL
1475 /*
1476 * Happens if we have an empty zonelist as a result of
1477 * GFP_THISNODE being used on a memoryless node
1478 */
1da177e4
LT
1479 return NULL;
1480 }
6b1de916 1481
19770b32 1482 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
54a6eb5c 1483 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET);
7fb1d9fc
RS
1484 if (page)
1485 goto got_pg;
1da177e4 1486
952f3b51
CL
1487 /*
1488 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1489 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1490 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1491 * using a larger set of nodes after it has established that the
1492 * allowed per node queues are empty and that nodes are
1493 * over allocated.
1494 */
1495 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1496 goto nopage;
1497
dd1a239f
MG
1498 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1499 wakeup_kswapd(zone, order);
1da177e4 1500
9bf2229f 1501 /*
7fb1d9fc
RS
1502 * OK, we're below the kswapd watermark and have kicked background
1503 * reclaim. Now things get more complex, so set up alloc_flags according
1504 * to how we want to proceed.
1505 *
1506 * The caller may dip into page reserves a bit more if the caller
1507 * cannot run direct reclaim, or if the caller has realtime scheduling
4eac915d
PJ
1508 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1509 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
9bf2229f 1510 */
3148890b 1511 alloc_flags = ALLOC_WMARK_MIN;
7fb1d9fc
RS
1512 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1513 alloc_flags |= ALLOC_HARDER;
1514 if (gfp_mask & __GFP_HIGH)
1515 alloc_flags |= ALLOC_HIGH;
bdd804f4
PJ
1516 if (wait)
1517 alloc_flags |= ALLOC_CPUSET;
1da177e4
LT
1518
1519 /*
1520 * Go through the zonelist again. Let __GFP_HIGH and allocations
7fb1d9fc 1521 * coming from realtime tasks go deeper into reserves.
1da177e4
LT
1522 *
1523 * This is the last chance, in general, before the goto nopage.
1524 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
9bf2229f 1525 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1da177e4 1526 */
19770b32 1527 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
54a6eb5c 1528 high_zoneidx, alloc_flags);
7fb1d9fc
RS
1529 if (page)
1530 goto got_pg;
1da177e4
LT
1531
1532 /* This allocation should allow future memory freeing. */
b84a35be 1533
b43a57bb 1534rebalance:
b84a35be
NP
1535 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
1536 && !in_interrupt()) {
1537 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
885036d3 1538nofail_alloc:
b84a35be 1539 /* go through the zonelist yet again, ignoring mins */
19770b32 1540 page = get_page_from_freelist(gfp_mask, nodemask, order,
54a6eb5c 1541 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
7fb1d9fc
RS
1542 if (page)
1543 goto got_pg;
885036d3 1544 if (gfp_mask & __GFP_NOFAIL) {
3fcfab16 1545 congestion_wait(WRITE, HZ/50);
885036d3
KK
1546 goto nofail_alloc;
1547 }
1da177e4
LT
1548 }
1549 goto nopage;
1550 }
1551
1552 /* Atomic allocations - we can't balance anything */
1553 if (!wait)
1554 goto nopage;
1555
1da177e4
LT
1556 cond_resched();
1557
1558 /* We now go into synchronous reclaim */
3e0d98b9 1559 cpuset_memory_pressure_bump();
1da177e4
LT
1560 p->flags |= PF_MEMALLOC;
1561 reclaim_state.reclaimed_slab = 0;
1562 p->reclaim_state = &reclaim_state;
1563
dac1d27b 1564 did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
1da177e4
LT
1565
1566 p->reclaim_state = NULL;
1567 p->flags &= ~PF_MEMALLOC;
1568
1569 cond_resched();
1570
e2c55dc8 1571 if (order != 0)
9f8f2172 1572 drain_all_pages();
e2c55dc8 1573
1da177e4 1574 if (likely(did_some_progress)) {
19770b32 1575 page = get_page_from_freelist(gfp_mask, nodemask, order,
54a6eb5c 1576 zonelist, high_zoneidx, alloc_flags);
7fb1d9fc
RS
1577 if (page)
1578 goto got_pg;
1da177e4 1579 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
dd1a239f 1580 if (!try_set_zone_oom(zonelist, gfp_mask)) {
ff0ceb9d
DR
1581 schedule_timeout_uninterruptible(1);
1582 goto restart;
1583 }
1584
1da177e4
LT
1585 /*
1586 * Go through the zonelist yet one more time, keep
1587 * very high watermark here, this is only to catch
1588 * a parallel oom killing, we must fail if we're still
1589 * under heavy pressure.
1590 */
19770b32
MG
1591 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1592 order, zonelist, high_zoneidx,
1593 ALLOC_WMARK_HIGH|ALLOC_CPUSET);
ff0ceb9d 1594 if (page) {
dd1a239f 1595 clear_zonelist_oom(zonelist, gfp_mask);
7fb1d9fc 1596 goto got_pg;
ff0ceb9d 1597 }
1da177e4 1598
a8bbf72a 1599 /* The OOM killer will not help higher order allocs so fail */
ff0ceb9d 1600 if (order > PAGE_ALLOC_COSTLY_ORDER) {
dd1a239f 1601 clear_zonelist_oom(zonelist, gfp_mask);
a8bbf72a 1602 goto nopage;
ff0ceb9d 1603 }
a8bbf72a 1604
9b0f8b04 1605 out_of_memory(zonelist, gfp_mask, order);
dd1a239f 1606 clear_zonelist_oom(zonelist, gfp_mask);
1da177e4
LT
1607 goto restart;
1608 }
1609
1610 /*
1611 * Don't let big-order allocations loop unless the caller explicitly
1612 * requests that. Wait for some write requests to complete then retry.
1613 *
ab857d09
NA
1614 * In this implementation, either order <= PAGE_ALLOC_COSTLY_ORDER or
1615 * __GFP_REPEAT mean __GFP_NOFAIL, but that may not be true in other
1616 * implementations.
1da177e4
LT
1617 */
1618 do_retry = 0;
1619 if (!(gfp_mask & __GFP_NORETRY)) {
5ad333eb
AW
1620 if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
1621 (gfp_mask & __GFP_REPEAT))
1da177e4
LT
1622 do_retry = 1;
1623 if (gfp_mask & __GFP_NOFAIL)
1624 do_retry = 1;
1625 }
1626 if (do_retry) {
3fcfab16 1627 congestion_wait(WRITE, HZ/50);
1da177e4
LT
1628 goto rebalance;
1629 }
1630
1631nopage:
1632 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1633 printk(KERN_WARNING "%s: page allocation failure."
1634 " order:%d, mode:0x%x\n",
1635 p->comm, order, gfp_mask);
1636 dump_stack();
578c2fd6 1637 show_mem();
1da177e4 1638 }
1da177e4 1639got_pg:
1da177e4
LT
1640 return page;
1641}
1642
19770b32
MG
1643struct page *
1644__alloc_pages(gfp_t gfp_mask, unsigned int order,
1645 struct zonelist *zonelist)
1646{
1647 return __alloc_pages_internal(gfp_mask, order, zonelist, NULL);
1648}
1649
1650struct page *
1651__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1652 struct zonelist *zonelist, nodemask_t *nodemask)
1653{
1654 return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask);
1655}
1656
1da177e4
LT
1657EXPORT_SYMBOL(__alloc_pages);
1658
1659/*
1660 * Common helper functions.
1661 */
920c7a5d 1662unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4
LT
1663{
1664 struct page * page;
1665 page = alloc_pages(gfp_mask, order);
1666 if (!page)
1667 return 0;
1668 return (unsigned long) page_address(page);
1669}
1670
1671EXPORT_SYMBOL(__get_free_pages);
1672
920c7a5d 1673unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4
LT
1674{
1675 struct page * page;
1676
1677 /*
1678 * get_zeroed_page() returns a 32-bit address, which cannot represent
1679 * a highmem page
1680 */
725d704e 1681 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1da177e4
LT
1682
1683 page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1684 if (page)
1685 return (unsigned long) page_address(page);
1686 return 0;
1687}
1688
1689EXPORT_SYMBOL(get_zeroed_page);
1690
1691void __pagevec_free(struct pagevec *pvec)
1692{
1693 int i = pagevec_count(pvec);
1694
1695 while (--i >= 0)
1696 free_hot_cold_page(pvec->pages[i], pvec->cold);
1697}
1698
920c7a5d 1699void __free_pages(struct page *page, unsigned int order)
1da177e4 1700{
b5810039 1701 if (put_page_testzero(page)) {
1da177e4
LT
1702 if (order == 0)
1703 free_hot_page(page);
1704 else
1705 __free_pages_ok(page, order);
1706 }
1707}
1708
1709EXPORT_SYMBOL(__free_pages);
1710
920c7a5d 1711void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
1712{
1713 if (addr != 0) {
725d704e 1714 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
1715 __free_pages(virt_to_page((void *)addr), order);
1716 }
1717}
1718
1719EXPORT_SYMBOL(free_pages);
1720
1da177e4
LT
1721static unsigned int nr_free_zone_pages(int offset)
1722{
dd1a239f 1723 struct zoneref *z;
54a6eb5c
MG
1724 struct zone *zone;
1725
e310fd43 1726 /* Just pick one node, since fallback list is circular */
1da177e4
LT
1727 unsigned int sum = 0;
1728
0e88460d 1729 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1da177e4 1730
54a6eb5c 1731 for_each_zone_zonelist(zone, z, zonelist, offset) {
e310fd43
MB
1732 unsigned long size = zone->present_pages;
1733 unsigned long high = zone->pages_high;
1734 if (size > high)
1735 sum += size - high;
1da177e4
LT
1736 }
1737
1738 return sum;
1739}
1740
1741/*
1742 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1743 */
1744unsigned int nr_free_buffer_pages(void)
1745{
af4ca457 1746 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 1747}
c2f1a551 1748EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4
LT
1749
1750/*
1751 * Amount of free RAM allocatable within all zones
1752 */
1753unsigned int nr_free_pagecache_pages(void)
1754{
2a1e274a 1755 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1da177e4 1756}
08e0f6a9
CL
1757
1758static inline void show_node(struct zone *zone)
1da177e4 1759{
08e0f6a9 1760 if (NUMA_BUILD)
25ba77c1 1761 printk("Node %d ", zone_to_nid(zone));
1da177e4 1762}
1da177e4 1763
1da177e4
LT
1764void si_meminfo(struct sysinfo *val)
1765{
1766 val->totalram = totalram_pages;
1767 val->sharedram = 0;
d23ad423 1768 val->freeram = global_page_state(NR_FREE_PAGES);
1da177e4 1769 val->bufferram = nr_blockdev_pages();
1da177e4
LT
1770 val->totalhigh = totalhigh_pages;
1771 val->freehigh = nr_free_highpages();
1da177e4
LT
1772 val->mem_unit = PAGE_SIZE;
1773}
1774
1775EXPORT_SYMBOL(si_meminfo);
1776
1777#ifdef CONFIG_NUMA
1778void si_meminfo_node(struct sysinfo *val, int nid)
1779{
1780 pg_data_t *pgdat = NODE_DATA(nid);
1781
1782 val->totalram = pgdat->node_present_pages;
d23ad423 1783 val->freeram = node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 1784#ifdef CONFIG_HIGHMEM
1da177e4 1785 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
d23ad423
CL
1786 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
1787 NR_FREE_PAGES);
98d2b0eb
CL
1788#else
1789 val->totalhigh = 0;
1790 val->freehigh = 0;
1791#endif
1da177e4
LT
1792 val->mem_unit = PAGE_SIZE;
1793}
1794#endif
1795
1796#define K(x) ((x) << (PAGE_SHIFT-10))
1797
1798/*
1799 * Show free area list (used inside shift_scroll-lock stuff)
1800 * We also calculate the percentage fragmentation. We do this by counting the
1801 * memory on each free list with the exception of the first item on the list.
1802 */
1803void show_free_areas(void)
1804{
c7241913 1805 int cpu;
1da177e4
LT
1806 struct zone *zone;
1807
1808 for_each_zone(zone) {
c7241913 1809 if (!populated_zone(zone))
1da177e4 1810 continue;
c7241913
JS
1811
1812 show_node(zone);
1813 printk("%s per-cpu:\n", zone->name);
1da177e4 1814
6b482c67 1815 for_each_online_cpu(cpu) {
1da177e4
LT
1816 struct per_cpu_pageset *pageset;
1817
e7c8d5c9 1818 pageset = zone_pcp(zone, cpu);
1da177e4 1819
3dfa5721
CL
1820 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
1821 cpu, pageset->pcp.high,
1822 pageset->pcp.batch, pageset->pcp.count);
1da177e4
LT
1823 }
1824 }
1825
a25700a5 1826 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
d23ad423 1827 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
65e458d4
CL
1828 global_page_state(NR_ACTIVE),
1829 global_page_state(NR_INACTIVE),
b1e7a8fd 1830 global_page_state(NR_FILE_DIRTY),
ce866b34 1831 global_page_state(NR_WRITEBACK),
fd39fc85 1832 global_page_state(NR_UNSTABLE_NFS),
d23ad423 1833 global_page_state(NR_FREE_PAGES),
972d1a7b
CL
1834 global_page_state(NR_SLAB_RECLAIMABLE) +
1835 global_page_state(NR_SLAB_UNRECLAIMABLE),
65ba55f5 1836 global_page_state(NR_FILE_MAPPED),
a25700a5
AM
1837 global_page_state(NR_PAGETABLE),
1838 global_page_state(NR_BOUNCE));
1da177e4
LT
1839
1840 for_each_zone(zone) {
1841 int i;
1842
c7241913
JS
1843 if (!populated_zone(zone))
1844 continue;
1845
1da177e4
LT
1846 show_node(zone);
1847 printk("%s"
1848 " free:%lukB"
1849 " min:%lukB"
1850 " low:%lukB"
1851 " high:%lukB"
1852 " active:%lukB"
1853 " inactive:%lukB"
1854 " present:%lukB"
1855 " pages_scanned:%lu"
1856 " all_unreclaimable? %s"
1857 "\n",
1858 zone->name,
d23ad423 1859 K(zone_page_state(zone, NR_FREE_PAGES)),
1da177e4
LT
1860 K(zone->pages_min),
1861 K(zone->pages_low),
1862 K(zone->pages_high),
c8785385
CL
1863 K(zone_page_state(zone, NR_ACTIVE)),
1864 K(zone_page_state(zone, NR_INACTIVE)),
1da177e4
LT
1865 K(zone->present_pages),
1866 zone->pages_scanned,
e815af95 1867 (zone_is_all_unreclaimable(zone) ? "yes" : "no")
1da177e4
LT
1868 );
1869 printk("lowmem_reserve[]:");
1870 for (i = 0; i < MAX_NR_ZONES; i++)
1871 printk(" %lu", zone->lowmem_reserve[i]);
1872 printk("\n");
1873 }
1874
1875 for_each_zone(zone) {
8f9de51a 1876 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1da177e4 1877
c7241913
JS
1878 if (!populated_zone(zone))
1879 continue;
1880
1da177e4
LT
1881 show_node(zone);
1882 printk("%s: ", zone->name);
1da177e4
LT
1883
1884 spin_lock_irqsave(&zone->lock, flags);
1885 for (order = 0; order < MAX_ORDER; order++) {
8f9de51a
KK
1886 nr[order] = zone->free_area[order].nr_free;
1887 total += nr[order] << order;
1da177e4
LT
1888 }
1889 spin_unlock_irqrestore(&zone->lock, flags);
8f9de51a
KK
1890 for (order = 0; order < MAX_ORDER; order++)
1891 printk("%lu*%lukB ", nr[order], K(1UL) << order);
1da177e4
LT
1892 printk("= %lukB\n", K(total));
1893 }
1894
e6f3602d
LW
1895 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
1896
1da177e4
LT
1897 show_swap_cache_info();
1898}
1899
19770b32
MG
1900static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
1901{
1902 zoneref->zone = zone;
1903 zoneref->zone_idx = zone_idx(zone);
1904}
1905
1da177e4
LT
1906/*
1907 * Builds allocation fallback zone lists.
1a93205b
CL
1908 *
1909 * Add all populated zones of a node to the zonelist.
1da177e4 1910 */
f0c0b2b8
KH
1911static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
1912 int nr_zones, enum zone_type zone_type)
1da177e4 1913{
1a93205b
CL
1914 struct zone *zone;
1915
98d2b0eb 1916 BUG_ON(zone_type >= MAX_NR_ZONES);
2f6726e5 1917 zone_type++;
02a68a5e
CL
1918
1919 do {
2f6726e5 1920 zone_type--;
070f8032 1921 zone = pgdat->node_zones + zone_type;
1a93205b 1922 if (populated_zone(zone)) {
dd1a239f
MG
1923 zoneref_set_zone(zone,
1924 &zonelist->_zonerefs[nr_zones++]);
070f8032 1925 check_highest_zone(zone_type);
1da177e4 1926 }
02a68a5e 1927
2f6726e5 1928 } while (zone_type);
070f8032 1929 return nr_zones;
1da177e4
LT
1930}
1931
f0c0b2b8
KH
1932
1933/*
1934 * zonelist_order:
1935 * 0 = automatic detection of better ordering.
1936 * 1 = order by ([node] distance, -zonetype)
1937 * 2 = order by (-zonetype, [node] distance)
1938 *
1939 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
1940 * the same zonelist. So only NUMA can configure this param.
1941 */
1942#define ZONELIST_ORDER_DEFAULT 0
1943#define ZONELIST_ORDER_NODE 1
1944#define ZONELIST_ORDER_ZONE 2
1945
1946/* zonelist order in the kernel.
1947 * set_zonelist_order() will set this to NODE or ZONE.
1948 */
1949static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
1950static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
1951
1952
1da177e4 1953#ifdef CONFIG_NUMA
f0c0b2b8
KH
1954/* The value user specified ....changed by config */
1955static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
1956/* string for sysctl */
1957#define NUMA_ZONELIST_ORDER_LEN 16
1958char numa_zonelist_order[16] = "default";
1959
1960/*
1961 * interface for configure zonelist ordering.
1962 * command line option "numa_zonelist_order"
1963 * = "[dD]efault - default, automatic configuration.
1964 * = "[nN]ode - order by node locality, then by zone within node
1965 * = "[zZ]one - order by zone, then by locality within zone
1966 */
1967
1968static int __parse_numa_zonelist_order(char *s)
1969{
1970 if (*s == 'd' || *s == 'D') {
1971 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
1972 } else if (*s == 'n' || *s == 'N') {
1973 user_zonelist_order = ZONELIST_ORDER_NODE;
1974 } else if (*s == 'z' || *s == 'Z') {
1975 user_zonelist_order = ZONELIST_ORDER_ZONE;
1976 } else {
1977 printk(KERN_WARNING
1978 "Ignoring invalid numa_zonelist_order value: "
1979 "%s\n", s);
1980 return -EINVAL;
1981 }
1982 return 0;
1983}
1984
1985static __init int setup_numa_zonelist_order(char *s)
1986{
1987 if (s)
1988 return __parse_numa_zonelist_order(s);
1989 return 0;
1990}
1991early_param("numa_zonelist_order", setup_numa_zonelist_order);
1992
1993/*
1994 * sysctl handler for numa_zonelist_order
1995 */
1996int numa_zonelist_order_handler(ctl_table *table, int write,
1997 struct file *file, void __user *buffer, size_t *length,
1998 loff_t *ppos)
1999{
2000 char saved_string[NUMA_ZONELIST_ORDER_LEN];
2001 int ret;
2002
2003 if (write)
2004 strncpy(saved_string, (char*)table->data,
2005 NUMA_ZONELIST_ORDER_LEN);
2006 ret = proc_dostring(table, write, file, buffer, length, ppos);
2007 if (ret)
2008 return ret;
2009 if (write) {
2010 int oldval = user_zonelist_order;
2011 if (__parse_numa_zonelist_order((char*)table->data)) {
2012 /*
2013 * bogus value. restore saved string
2014 */
2015 strncpy((char*)table->data, saved_string,
2016 NUMA_ZONELIST_ORDER_LEN);
2017 user_zonelist_order = oldval;
2018 } else if (oldval != user_zonelist_order)
2019 build_all_zonelists();
2020 }
2021 return 0;
2022}
2023
2024
1da177e4 2025#define MAX_NODE_LOAD (num_online_nodes())
f0c0b2b8
KH
2026static int node_load[MAX_NUMNODES];
2027
1da177e4 2028/**
4dc3b16b 2029 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
2030 * @node: node whose fallback list we're appending
2031 * @used_node_mask: nodemask_t of already used nodes
2032 *
2033 * We use a number of factors to determine which is the next node that should
2034 * appear on a given node's fallback list. The node should not have appeared
2035 * already in @node's fallback list, and it should be the next closest node
2036 * according to the distance array (which contains arbitrary distance values
2037 * from each node to each node in the system), and should also prefer nodes
2038 * with no CPUs, since presumably they'll have very little allocation pressure
2039 * on them otherwise.
2040 * It returns -1 if no node is found.
2041 */
f0c0b2b8 2042static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 2043{
4cf808eb 2044 int n, val;
1da177e4
LT
2045 int min_val = INT_MAX;
2046 int best_node = -1;
c5f59f08 2047 node_to_cpumask_ptr(tmp, 0);
1da177e4 2048
4cf808eb
LT
2049 /* Use the local node if we haven't already */
2050 if (!node_isset(node, *used_node_mask)) {
2051 node_set(node, *used_node_mask);
2052 return node;
2053 }
1da177e4 2054
37b07e41 2055 for_each_node_state(n, N_HIGH_MEMORY) {
1da177e4
LT
2056
2057 /* Don't want a node to appear more than once */
2058 if (node_isset(n, *used_node_mask))
2059 continue;
2060
1da177e4
LT
2061 /* Use the distance array to find the distance */
2062 val = node_distance(node, n);
2063
4cf808eb
LT
2064 /* Penalize nodes under us ("prefer the next node") */
2065 val += (n < node);
2066
1da177e4 2067 /* Give preference to headless and unused nodes */
c5f59f08
MT
2068 node_to_cpumask_ptr_next(tmp, n);
2069 if (!cpus_empty(*tmp))
1da177e4
LT
2070 val += PENALTY_FOR_NODE_WITH_CPUS;
2071
2072 /* Slight preference for less loaded node */
2073 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2074 val += node_load[n];
2075
2076 if (val < min_val) {
2077 min_val = val;
2078 best_node = n;
2079 }
2080 }
2081
2082 if (best_node >= 0)
2083 node_set(best_node, *used_node_mask);
2084
2085 return best_node;
2086}
2087
f0c0b2b8
KH
2088
2089/*
2090 * Build zonelists ordered by node and zones within node.
2091 * This results in maximum locality--normal zone overflows into local
2092 * DMA zone, if any--but risks exhausting DMA zone.
2093 */
2094static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
1da177e4 2095{
f0c0b2b8 2096 int j;
1da177e4 2097 struct zonelist *zonelist;
f0c0b2b8 2098
54a6eb5c 2099 zonelist = &pgdat->node_zonelists[0];
dd1a239f 2100 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
54a6eb5c
MG
2101 ;
2102 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2103 MAX_NR_ZONES - 1);
dd1a239f
MG
2104 zonelist->_zonerefs[j].zone = NULL;
2105 zonelist->_zonerefs[j].zone_idx = 0;
f0c0b2b8
KH
2106}
2107
523b9458
CL
2108/*
2109 * Build gfp_thisnode zonelists
2110 */
2111static void build_thisnode_zonelists(pg_data_t *pgdat)
2112{
523b9458
CL
2113 int j;
2114 struct zonelist *zonelist;
2115
54a6eb5c
MG
2116 zonelist = &pgdat->node_zonelists[1];
2117 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
dd1a239f
MG
2118 zonelist->_zonerefs[j].zone = NULL;
2119 zonelist->_zonerefs[j].zone_idx = 0;
523b9458
CL
2120}
2121
f0c0b2b8
KH
2122/*
2123 * Build zonelists ordered by zone and nodes within zones.
2124 * This results in conserving DMA zone[s] until all Normal memory is
2125 * exhausted, but results in overflowing to remote node while memory
2126 * may still exist in local DMA zone.
2127 */
2128static int node_order[MAX_NUMNODES];
2129
2130static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2131{
f0c0b2b8
KH
2132 int pos, j, node;
2133 int zone_type; /* needs to be signed */
2134 struct zone *z;
2135 struct zonelist *zonelist;
2136
54a6eb5c
MG
2137 zonelist = &pgdat->node_zonelists[0];
2138 pos = 0;
2139 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2140 for (j = 0; j < nr_nodes; j++) {
2141 node = node_order[j];
2142 z = &NODE_DATA(node)->node_zones[zone_type];
2143 if (populated_zone(z)) {
dd1a239f
MG
2144 zoneref_set_zone(z,
2145 &zonelist->_zonerefs[pos++]);
54a6eb5c 2146 check_highest_zone(zone_type);
f0c0b2b8
KH
2147 }
2148 }
f0c0b2b8 2149 }
dd1a239f
MG
2150 zonelist->_zonerefs[pos].zone = NULL;
2151 zonelist->_zonerefs[pos].zone_idx = 0;
f0c0b2b8
KH
2152}
2153
2154static int default_zonelist_order(void)
2155{
2156 int nid, zone_type;
2157 unsigned long low_kmem_size,total_size;
2158 struct zone *z;
2159 int average_size;
2160 /*
2161 * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2162 * If they are really small and used heavily, the system can fall
2163 * into OOM very easily.
2164 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2165 */
2166 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2167 low_kmem_size = 0;
2168 total_size = 0;
2169 for_each_online_node(nid) {
2170 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2171 z = &NODE_DATA(nid)->node_zones[zone_type];
2172 if (populated_zone(z)) {
2173 if (zone_type < ZONE_NORMAL)
2174 low_kmem_size += z->present_pages;
2175 total_size += z->present_pages;
2176 }
2177 }
2178 }
2179 if (!low_kmem_size || /* there are no DMA area. */
2180 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2181 return ZONELIST_ORDER_NODE;
2182 /*
2183 * look into each node's config.
2184 * If there is a node whose DMA/DMA32 memory is very big area on
2185 * local memory, NODE_ORDER may be suitable.
2186 */
37b07e41
LS
2187 average_size = total_size /
2188 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
f0c0b2b8
KH
2189 for_each_online_node(nid) {
2190 low_kmem_size = 0;
2191 total_size = 0;
2192 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2193 z = &NODE_DATA(nid)->node_zones[zone_type];
2194 if (populated_zone(z)) {
2195 if (zone_type < ZONE_NORMAL)
2196 low_kmem_size += z->present_pages;
2197 total_size += z->present_pages;
2198 }
2199 }
2200 if (low_kmem_size &&
2201 total_size > average_size && /* ignore small node */
2202 low_kmem_size > total_size * 70/100)
2203 return ZONELIST_ORDER_NODE;
2204 }
2205 return ZONELIST_ORDER_ZONE;
2206}
2207
2208static void set_zonelist_order(void)
2209{
2210 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2211 current_zonelist_order = default_zonelist_order();
2212 else
2213 current_zonelist_order = user_zonelist_order;
2214}
2215
2216static void build_zonelists(pg_data_t *pgdat)
2217{
2218 int j, node, load;
2219 enum zone_type i;
1da177e4 2220 nodemask_t used_mask;
f0c0b2b8
KH
2221 int local_node, prev_node;
2222 struct zonelist *zonelist;
2223 int order = current_zonelist_order;
1da177e4
LT
2224
2225 /* initialize zonelists */
523b9458 2226 for (i = 0; i < MAX_ZONELISTS; i++) {
1da177e4 2227 zonelist = pgdat->node_zonelists + i;
dd1a239f
MG
2228 zonelist->_zonerefs[0].zone = NULL;
2229 zonelist->_zonerefs[0].zone_idx = 0;
1da177e4
LT
2230 }
2231
2232 /* NUMA-aware ordering of nodes */
2233 local_node = pgdat->node_id;
2234 load = num_online_nodes();
2235 prev_node = local_node;
2236 nodes_clear(used_mask);
f0c0b2b8
KH
2237
2238 memset(node_load, 0, sizeof(node_load));
2239 memset(node_order, 0, sizeof(node_order));
2240 j = 0;
2241
1da177e4 2242 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
9eeff239
CL
2243 int distance = node_distance(local_node, node);
2244
2245 /*
2246 * If another node is sufficiently far away then it is better
2247 * to reclaim pages in a zone before going off node.
2248 */
2249 if (distance > RECLAIM_DISTANCE)
2250 zone_reclaim_mode = 1;
2251
1da177e4
LT
2252 /*
2253 * We don't want to pressure a particular node.
2254 * So adding penalty to the first node in same
2255 * distance group to make it round-robin.
2256 */
9eeff239 2257 if (distance != node_distance(local_node, prev_node))
f0c0b2b8
KH
2258 node_load[node] = load;
2259
1da177e4
LT
2260 prev_node = node;
2261 load--;
f0c0b2b8
KH
2262 if (order == ZONELIST_ORDER_NODE)
2263 build_zonelists_in_node_order(pgdat, node);
2264 else
2265 node_order[j++] = node; /* remember order */
2266 }
1da177e4 2267
f0c0b2b8
KH
2268 if (order == ZONELIST_ORDER_ZONE) {
2269 /* calculate node order -- i.e., DMA last! */
2270 build_zonelists_in_zone_order(pgdat, j);
1da177e4 2271 }
523b9458
CL
2272
2273 build_thisnode_zonelists(pgdat);
1da177e4
LT
2274}
2275
9276b1bc 2276/* Construct the zonelist performance cache - see further mmzone.h */
f0c0b2b8 2277static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 2278{
54a6eb5c
MG
2279 struct zonelist *zonelist;
2280 struct zonelist_cache *zlc;
dd1a239f 2281 struct zoneref *z;
9276b1bc 2282
54a6eb5c
MG
2283 zonelist = &pgdat->node_zonelists[0];
2284 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2285 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
dd1a239f
MG
2286 for (z = zonelist->_zonerefs; z->zone; z++)
2287 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
9276b1bc
PJ
2288}
2289
f0c0b2b8 2290
1da177e4
LT
2291#else /* CONFIG_NUMA */
2292
f0c0b2b8
KH
2293static void set_zonelist_order(void)
2294{
2295 current_zonelist_order = ZONELIST_ORDER_ZONE;
2296}
2297
2298static void build_zonelists(pg_data_t *pgdat)
1da177e4 2299{
19655d34 2300 int node, local_node;
54a6eb5c
MG
2301 enum zone_type j;
2302 struct zonelist *zonelist;
1da177e4
LT
2303
2304 local_node = pgdat->node_id;
1da177e4 2305
54a6eb5c
MG
2306 zonelist = &pgdat->node_zonelists[0];
2307 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
1da177e4 2308
54a6eb5c
MG
2309 /*
2310 * Now we build the zonelist so that it contains the zones
2311 * of all the other nodes.
2312 * We don't want to pressure a particular node, so when
2313 * building the zones for node N, we make sure that the
2314 * zones coming right after the local ones are those from
2315 * node N+1 (modulo N)
2316 */
2317 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2318 if (!node_online(node))
2319 continue;
2320 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2321 MAX_NR_ZONES - 1);
1da177e4 2322 }
54a6eb5c
MG
2323 for (node = 0; node < local_node; node++) {
2324 if (!node_online(node))
2325 continue;
2326 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2327 MAX_NR_ZONES - 1);
2328 }
2329
dd1a239f
MG
2330 zonelist->_zonerefs[j].zone = NULL;
2331 zonelist->_zonerefs[j].zone_idx = 0;
1da177e4
LT
2332}
2333
9276b1bc 2334/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
f0c0b2b8 2335static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 2336{
54a6eb5c
MG
2337 pgdat->node_zonelists[0].zlcache_ptr = NULL;
2338 pgdat->node_zonelists[1].zlcache_ptr = NULL;
9276b1bc
PJ
2339}
2340
1da177e4
LT
2341#endif /* CONFIG_NUMA */
2342
6811378e 2343/* return values int ....just for stop_machine_run() */
f0c0b2b8 2344static int __build_all_zonelists(void *dummy)
1da177e4 2345{
6811378e 2346 int nid;
9276b1bc
PJ
2347
2348 for_each_online_node(nid) {
7ea1530a
CL
2349 pg_data_t *pgdat = NODE_DATA(nid);
2350
2351 build_zonelists(pgdat);
2352 build_zonelist_cache(pgdat);
9276b1bc 2353 }
6811378e
YG
2354 return 0;
2355}
2356
f0c0b2b8 2357void build_all_zonelists(void)
6811378e 2358{
f0c0b2b8
KH
2359 set_zonelist_order();
2360
6811378e 2361 if (system_state == SYSTEM_BOOTING) {
423b41d7 2362 __build_all_zonelists(NULL);
6811378e
YG
2363 cpuset_init_current_mems_allowed();
2364 } else {
183ff22b 2365 /* we have to stop all cpus to guarantee there is no user
6811378e
YG
2366 of zonelist */
2367 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
2368 /* cpuset refresh routine should be here */
2369 }
bd1e22b8 2370 vm_total_pages = nr_free_pagecache_pages();
9ef9acb0
MG
2371 /*
2372 * Disable grouping by mobility if the number of pages in the
2373 * system is too low to allow the mechanism to work. It would be
2374 * more accurate, but expensive to check per-zone. This check is
2375 * made on memory-hotadd so a system can start with mobility
2376 * disabled and enable it later
2377 */
d9c23400 2378 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
2379 page_group_by_mobility_disabled = 1;
2380 else
2381 page_group_by_mobility_disabled = 0;
2382
2383 printk("Built %i zonelists in %s order, mobility grouping %s. "
2384 "Total pages: %ld\n",
f0c0b2b8
KH
2385 num_online_nodes(),
2386 zonelist_order_name[current_zonelist_order],
9ef9acb0 2387 page_group_by_mobility_disabled ? "off" : "on",
f0c0b2b8
KH
2388 vm_total_pages);
2389#ifdef CONFIG_NUMA
2390 printk("Policy zone: %s\n", zone_names[policy_zone]);
2391#endif
1da177e4
LT
2392}
2393
2394/*
2395 * Helper functions to size the waitqueue hash table.
2396 * Essentially these want to choose hash table sizes sufficiently
2397 * large so that collisions trying to wait on pages are rare.
2398 * But in fact, the number of active page waitqueues on typical
2399 * systems is ridiculously low, less than 200. So this is even
2400 * conservative, even though it seems large.
2401 *
2402 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2403 * waitqueues, i.e. the size of the waitq table given the number of pages.
2404 */
2405#define PAGES_PER_WAITQUEUE 256
2406
cca448fe 2407#ifndef CONFIG_MEMORY_HOTPLUG
02b694de 2408static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1da177e4
LT
2409{
2410 unsigned long size = 1;
2411
2412 pages /= PAGES_PER_WAITQUEUE;
2413
2414 while (size < pages)
2415 size <<= 1;
2416
2417 /*
2418 * Once we have dozens or even hundreds of threads sleeping
2419 * on IO we've got bigger problems than wait queue collision.
2420 * Limit the size of the wait table to a reasonable size.
2421 */
2422 size = min(size, 4096UL);
2423
2424 return max(size, 4UL);
2425}
cca448fe
YG
2426#else
2427/*
2428 * A zone's size might be changed by hot-add, so it is not possible to determine
2429 * a suitable size for its wait_table. So we use the maximum size now.
2430 *
2431 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
2432 *
2433 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
2434 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2435 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
2436 *
2437 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2438 * or more by the traditional way. (See above). It equals:
2439 *
2440 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
2441 * ia64(16K page size) : = ( 8G + 4M)byte.
2442 * powerpc (64K page size) : = (32G +16M)byte.
2443 */
2444static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2445{
2446 return 4096UL;
2447}
2448#endif
1da177e4
LT
2449
2450/*
2451 * This is an integer logarithm so that shifts can be used later
2452 * to extract the more random high bits from the multiplicative
2453 * hash function before the remainder is taken.
2454 */
2455static inline unsigned long wait_table_bits(unsigned long size)
2456{
2457 return ffz(~size);
2458}
2459
2460#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2461
56fd56b8 2462/*
d9c23400 2463 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
56fd56b8
MG
2464 * of blocks reserved is based on zone->pages_min. The memory within the
2465 * reserve will tend to store contiguous free pages. Setting min_free_kbytes
2466 * higher will lead to a bigger reserve which will get freed as contiguous
2467 * blocks as reclaim kicks in
2468 */
2469static void setup_zone_migrate_reserve(struct zone *zone)
2470{
2471 unsigned long start_pfn, pfn, end_pfn;
2472 struct page *page;
2473 unsigned long reserve, block_migratetype;
2474
2475 /* Get the start pfn, end pfn and the number of blocks to reserve */
2476 start_pfn = zone->zone_start_pfn;
2477 end_pfn = start_pfn + zone->spanned_pages;
d9c23400
MG
2478 reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
2479 pageblock_order;
56fd56b8 2480
d9c23400 2481 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
56fd56b8
MG
2482 if (!pfn_valid(pfn))
2483 continue;
2484 page = pfn_to_page(pfn);
2485
2486 /* Blocks with reserved pages will never free, skip them. */
2487 if (PageReserved(page))
2488 continue;
2489
2490 block_migratetype = get_pageblock_migratetype(page);
2491
2492 /* If this block is reserved, account for it */
2493 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2494 reserve--;
2495 continue;
2496 }
2497
2498 /* Suitable for reserving if this block is movable */
2499 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2500 set_pageblock_migratetype(page, MIGRATE_RESERVE);
2501 move_freepages_block(zone, page, MIGRATE_RESERVE);
2502 reserve--;
2503 continue;
2504 }
2505
2506 /*
2507 * If the reserve is met and this is a previous reserved block,
2508 * take it back
2509 */
2510 if (block_migratetype == MIGRATE_RESERVE) {
2511 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2512 move_freepages_block(zone, page, MIGRATE_MOVABLE);
2513 }
2514 }
2515}
ac0e5b7a 2516
1da177e4
LT
2517/*
2518 * Initially all pages are reserved - free ones are freed
2519 * up by free_all_bootmem() once the early boot process is
2520 * done. Non-atomic initialization, single-pass.
2521 */
c09b4240 2522void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
a2f3aa02 2523 unsigned long start_pfn, enum memmap_context context)
1da177e4 2524{
1da177e4 2525 struct page *page;
29751f69
AW
2526 unsigned long end_pfn = start_pfn + size;
2527 unsigned long pfn;
86051ca5 2528 struct zone *z;
1da177e4 2529
86051ca5 2530 z = &NODE_DATA(nid)->node_zones[zone];
cbe8dd4a 2531 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
a2f3aa02
DH
2532 /*
2533 * There can be holes in boot-time mem_map[]s
2534 * handed to this function. They do not
2535 * exist on hotplugged memory.
2536 */
2537 if (context == MEMMAP_EARLY) {
2538 if (!early_pfn_valid(pfn))
2539 continue;
2540 if (!early_pfn_in_nid(pfn, nid))
2541 continue;
2542 }
d41dee36
AW
2543 page = pfn_to_page(pfn);
2544 set_page_links(page, zone, nid, pfn);
7835e98b 2545 init_page_count(page);
1da177e4
LT
2546 reset_page_mapcount(page);
2547 SetPageReserved(page);
b2a0ac88
MG
2548 /*
2549 * Mark the block movable so that blocks are reserved for
2550 * movable at startup. This will force kernel allocations
2551 * to reserve their blocks rather than leaking throughout
2552 * the address space during boot when many long-lived
56fd56b8
MG
2553 * kernel allocations are made. Later some blocks near
2554 * the start are marked MIGRATE_RESERVE by
2555 * setup_zone_migrate_reserve()
86051ca5
KH
2556 *
2557 * bitmap is created for zone's valid pfn range. but memmap
2558 * can be created for invalid pages (for alignment)
2559 * check here not to call set_pageblock_migratetype() against
2560 * pfn out of zone.
b2a0ac88 2561 */
86051ca5
KH
2562 if ((z->zone_start_pfn <= pfn)
2563 && (pfn < z->zone_start_pfn + z->spanned_pages)
2564 && !(pfn & (pageblock_nr_pages - 1)))
56fd56b8 2565 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
b2a0ac88 2566
1da177e4
LT
2567 INIT_LIST_HEAD(&page->lru);
2568#ifdef WANT_PAGE_VIRTUAL
2569 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
2570 if (!is_highmem_idx(zone))
3212c6be 2571 set_page_address(page, __va(pfn << PAGE_SHIFT));
1da177e4 2572#endif
1da177e4
LT
2573 }
2574}
2575
1e548deb 2576static void __meminit zone_init_free_lists(struct zone *zone)
1da177e4 2577{
b2a0ac88
MG
2578 int order, t;
2579 for_each_migratetype_order(order, t) {
2580 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
2581 zone->free_area[order].nr_free = 0;
2582 }
2583}
2584
2585#ifndef __HAVE_ARCH_MEMMAP_INIT
2586#define memmap_init(size, nid, zone, start_pfn) \
a2f3aa02 2587 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1da177e4
LT
2588#endif
2589
1d6f4e60 2590static int zone_batchsize(struct zone *zone)
e7c8d5c9
CL
2591{
2592 int batch;
2593
2594 /*
2595 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 2596 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
2597 *
2598 * OK, so we don't know how big the cache is. So guess.
2599 */
2600 batch = zone->present_pages / 1024;
ba56e91c
SR
2601 if (batch * PAGE_SIZE > 512 * 1024)
2602 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
2603 batch /= 4; /* We effectively *= 4 below */
2604 if (batch < 1)
2605 batch = 1;
2606
2607 /*
0ceaacc9
NP
2608 * Clamp the batch to a 2^n - 1 value. Having a power
2609 * of 2 value was found to be more likely to have
2610 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 2611 *
0ceaacc9
NP
2612 * For example if 2 tasks are alternately allocating
2613 * batches of pages, one task can end up with a lot
2614 * of pages of one half of the possible page colors
2615 * and the other with pages of the other colors.
e7c8d5c9 2616 */
0ceaacc9 2617 batch = (1 << (fls(batch + batch/2)-1)) - 1;
ba56e91c 2618
e7c8d5c9
CL
2619 return batch;
2620}
2621
2caaad41
CL
2622inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2623{
2624 struct per_cpu_pages *pcp;
2625
1c6fe946
MD
2626 memset(p, 0, sizeof(*p));
2627
3dfa5721 2628 pcp = &p->pcp;
2caaad41 2629 pcp->count = 0;
2caaad41
CL
2630 pcp->high = 6 * batch;
2631 pcp->batch = max(1UL, 1 * batch);
2632 INIT_LIST_HEAD(&pcp->list);
2caaad41
CL
2633}
2634
8ad4b1fb
RS
2635/*
2636 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
2637 * to the value high for the pageset p.
2638 */
2639
2640static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2641 unsigned long high)
2642{
2643 struct per_cpu_pages *pcp;
2644
3dfa5721 2645 pcp = &p->pcp;
8ad4b1fb
RS
2646 pcp->high = high;
2647 pcp->batch = max(1UL, high/4);
2648 if ((high/4) > (PAGE_SHIFT * 8))
2649 pcp->batch = PAGE_SHIFT * 8;
2650}
2651
2652
e7c8d5c9
CL
2653#ifdef CONFIG_NUMA
2654/*
2caaad41
CL
2655 * Boot pageset table. One per cpu which is going to be used for all
2656 * zones and all nodes. The parameters will be set in such a way
2657 * that an item put on a list will immediately be handed over to
2658 * the buddy list. This is safe since pageset manipulation is done
2659 * with interrupts disabled.
2660 *
2661 * Some NUMA counter updates may also be caught by the boot pagesets.
b7c84c6a
CL
2662 *
2663 * The boot_pagesets must be kept even after bootup is complete for
2664 * unused processors and/or zones. They do play a role for bootstrapping
2665 * hotplugged processors.
2666 *
2667 * zoneinfo_show() and maybe other functions do
2668 * not check if the processor is online before following the pageset pointer.
2669 * Other parts of the kernel may not check if the zone is available.
2caaad41 2670 */
88a2a4ac 2671static struct per_cpu_pageset boot_pageset[NR_CPUS];
2caaad41
CL
2672
2673/*
2674 * Dynamically allocate memory for the
e7c8d5c9
CL
2675 * per cpu pageset array in struct zone.
2676 */
6292d9aa 2677static int __cpuinit process_zones(int cpu)
e7c8d5c9
CL
2678{
2679 struct zone *zone, *dzone;
37c0708d
CL
2680 int node = cpu_to_node(cpu);
2681
2682 node_set_state(node, N_CPU); /* this node has a cpu */
e7c8d5c9
CL
2683
2684 for_each_zone(zone) {
e7c8d5c9 2685
66a55030
CL
2686 if (!populated_zone(zone))
2687 continue;
2688
23316bc8 2689 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
37c0708d 2690 GFP_KERNEL, node);
23316bc8 2691 if (!zone_pcp(zone, cpu))
e7c8d5c9 2692 goto bad;
e7c8d5c9 2693
23316bc8 2694 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
8ad4b1fb
RS
2695
2696 if (percpu_pagelist_fraction)
2697 setup_pagelist_highmark(zone_pcp(zone, cpu),
2698 (zone->present_pages / percpu_pagelist_fraction));
e7c8d5c9
CL
2699 }
2700
2701 return 0;
2702bad:
2703 for_each_zone(dzone) {
64191688
AM
2704 if (!populated_zone(dzone))
2705 continue;
e7c8d5c9
CL
2706 if (dzone == zone)
2707 break;
23316bc8
NP
2708 kfree(zone_pcp(dzone, cpu));
2709 zone_pcp(dzone, cpu) = NULL;
e7c8d5c9
CL
2710 }
2711 return -ENOMEM;
2712}
2713
2714static inline void free_zone_pagesets(int cpu)
2715{
e7c8d5c9
CL
2716 struct zone *zone;
2717
2718 for_each_zone(zone) {
2719 struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
2720
f3ef9ead
DR
2721 /* Free per_cpu_pageset if it is slab allocated */
2722 if (pset != &boot_pageset[cpu])
2723 kfree(pset);
e7c8d5c9 2724 zone_pcp(zone, cpu) = NULL;
e7c8d5c9 2725 }
e7c8d5c9
CL
2726}
2727
9c7b216d 2728static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
e7c8d5c9
CL
2729 unsigned long action,
2730 void *hcpu)
2731{
2732 int cpu = (long)hcpu;
2733 int ret = NOTIFY_OK;
2734
2735 switch (action) {
ce421c79 2736 case CPU_UP_PREPARE:
8bb78442 2737 case CPU_UP_PREPARE_FROZEN:
ce421c79
AW
2738 if (process_zones(cpu))
2739 ret = NOTIFY_BAD;
2740 break;
2741 case CPU_UP_CANCELED:
8bb78442 2742 case CPU_UP_CANCELED_FROZEN:
ce421c79 2743 case CPU_DEAD:
8bb78442 2744 case CPU_DEAD_FROZEN:
ce421c79
AW
2745 free_zone_pagesets(cpu);
2746 break;
2747 default:
2748 break;
e7c8d5c9
CL
2749 }
2750 return ret;
2751}
2752
74b85f37 2753static struct notifier_block __cpuinitdata pageset_notifier =
e7c8d5c9
CL
2754 { &pageset_cpuup_callback, NULL, 0 };
2755
78d9955b 2756void __init setup_per_cpu_pageset(void)
e7c8d5c9
CL
2757{
2758 int err;
2759
2760 /* Initialize per_cpu_pageset for cpu 0.
2761 * A cpuup callback will do this for every cpu
2762 * as it comes online
2763 */
2764 err = process_zones(smp_processor_id());
2765 BUG_ON(err);
2766 register_cpu_notifier(&pageset_notifier);
2767}
2768
2769#endif
2770
577a32f6 2771static noinline __init_refok
cca448fe 2772int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
ed8ece2e
DH
2773{
2774 int i;
2775 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe 2776 size_t alloc_size;
ed8ece2e
DH
2777
2778 /*
2779 * The per-page waitqueue mechanism uses hashed waitqueues
2780 * per zone.
2781 */
02b694de
YG
2782 zone->wait_table_hash_nr_entries =
2783 wait_table_hash_nr_entries(zone_size_pages);
2784 zone->wait_table_bits =
2785 wait_table_bits(zone->wait_table_hash_nr_entries);
cca448fe
YG
2786 alloc_size = zone->wait_table_hash_nr_entries
2787 * sizeof(wait_queue_head_t);
2788
2789 if (system_state == SYSTEM_BOOTING) {
2790 zone->wait_table = (wait_queue_head_t *)
2791 alloc_bootmem_node(pgdat, alloc_size);
2792 } else {
2793 /*
2794 * This case means that a zone whose size was 0 gets new memory
2795 * via memory hot-add.
2796 * But it may be the case that a new node was hot-added. In
2797 * this case vmalloc() will not be able to use this new node's
2798 * memory - this wait_table must be initialized to use this new
2799 * node itself as well.
2800 * To use this new node's memory, further consideration will be
2801 * necessary.
2802 */
8691f3a7 2803 zone->wait_table = vmalloc(alloc_size);
cca448fe
YG
2804 }
2805 if (!zone->wait_table)
2806 return -ENOMEM;
ed8ece2e 2807
02b694de 2808 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
ed8ece2e 2809 init_waitqueue_head(zone->wait_table + i);
cca448fe
YG
2810
2811 return 0;
ed8ece2e
DH
2812}
2813
c09b4240 2814static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e
DH
2815{
2816 int cpu;
2817 unsigned long batch = zone_batchsize(zone);
2818
2819 for (cpu = 0; cpu < NR_CPUS; cpu++) {
2820#ifdef CONFIG_NUMA
2821 /* Early boot. Slab allocator not functional yet */
23316bc8 2822 zone_pcp(zone, cpu) = &boot_pageset[cpu];
ed8ece2e
DH
2823 setup_pageset(&boot_pageset[cpu],0);
2824#else
2825 setup_pageset(zone_pcp(zone,cpu), batch);
2826#endif
2827 }
f5335c0f
AB
2828 if (zone->present_pages)
2829 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
2830 zone->name, zone->present_pages, batch);
ed8ece2e
DH
2831}
2832
718127cc
YG
2833__meminit int init_currently_empty_zone(struct zone *zone,
2834 unsigned long zone_start_pfn,
a2f3aa02
DH
2835 unsigned long size,
2836 enum memmap_context context)
ed8ece2e
DH
2837{
2838 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe
YG
2839 int ret;
2840 ret = zone_wait_table_init(zone, size);
2841 if (ret)
2842 return ret;
ed8ece2e
DH
2843 pgdat->nr_zones = zone_idx(zone) + 1;
2844
ed8ece2e
DH
2845 zone->zone_start_pfn = zone_start_pfn;
2846
2847 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
2848
1e548deb 2849 zone_init_free_lists(zone);
718127cc
YG
2850
2851 return 0;
ed8ece2e
DH
2852}
2853
c713216d
MG
2854#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2855/*
2856 * Basic iterator support. Return the first range of PFNs for a node
2857 * Note: nid == MAX_NUMNODES returns first region regardless of node
2858 */
a3142c8e 2859static int __meminit first_active_region_index_in_nid(int nid)
c713216d
MG
2860{
2861 int i;
2862
2863 for (i = 0; i < nr_nodemap_entries; i++)
2864 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
2865 return i;
2866
2867 return -1;
2868}
2869
2870/*
2871 * Basic iterator support. Return the next active range of PFNs for a node
183ff22b 2872 * Note: nid == MAX_NUMNODES returns next region regardless of node
c713216d 2873 */
a3142c8e 2874static int __meminit next_active_region_index_in_nid(int index, int nid)
c713216d
MG
2875{
2876 for (index = index + 1; index < nr_nodemap_entries; index++)
2877 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
2878 return index;
2879
2880 return -1;
2881}
2882
2883#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
2884/*
2885 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
2886 * Architectures may implement their own version but if add_active_range()
2887 * was used and there are no special requirements, this is a convenient
2888 * alternative
2889 */
6f076f5d 2890int __meminit early_pfn_to_nid(unsigned long pfn)
c713216d
MG
2891{
2892 int i;
2893
2894 for (i = 0; i < nr_nodemap_entries; i++) {
2895 unsigned long start_pfn = early_node_map[i].start_pfn;
2896 unsigned long end_pfn = early_node_map[i].end_pfn;
2897
2898 if (start_pfn <= pfn && pfn < end_pfn)
2899 return early_node_map[i].nid;
2900 }
2901
2902 return 0;
2903}
2904#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
2905
2906/* Basic iterator support to walk early_node_map[] */
2907#define for_each_active_range_index_in_nid(i, nid) \
2908 for (i = first_active_region_index_in_nid(nid); i != -1; \
2909 i = next_active_region_index_in_nid(i, nid))
2910
2911/**
2912 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
88ca3b94
RD
2913 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
2914 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
c713216d
MG
2915 *
2916 * If an architecture guarantees that all ranges registered with
2917 * add_active_ranges() contain no holes and may be freed, this
2918 * this function may be used instead of calling free_bootmem() manually.
2919 */
2920void __init free_bootmem_with_active_regions(int nid,
2921 unsigned long max_low_pfn)
2922{
2923 int i;
2924
2925 for_each_active_range_index_in_nid(i, nid) {
2926 unsigned long size_pages = 0;
2927 unsigned long end_pfn = early_node_map[i].end_pfn;
2928
2929 if (early_node_map[i].start_pfn >= max_low_pfn)
2930 continue;
2931
2932 if (end_pfn > max_low_pfn)
2933 end_pfn = max_low_pfn;
2934
2935 size_pages = end_pfn - early_node_map[i].start_pfn;
2936 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
2937 PFN_PHYS(early_node_map[i].start_pfn),
2938 size_pages << PAGE_SHIFT);
2939 }
2940}
2941
2942/**
2943 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 2944 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d
MG
2945 *
2946 * If an architecture guarantees that all ranges registered with
2947 * add_active_ranges() contain no holes and may be freed, this
88ca3b94 2948 * function may be used instead of calling memory_present() manually.
c713216d
MG
2949 */
2950void __init sparse_memory_present_with_active_regions(int nid)
2951{
2952 int i;
2953
2954 for_each_active_range_index_in_nid(i, nid)
2955 memory_present(early_node_map[i].nid,
2956 early_node_map[i].start_pfn,
2957 early_node_map[i].end_pfn);
2958}
2959
fb01439c
MG
2960/**
2961 * push_node_boundaries - Push node boundaries to at least the requested boundary
2962 * @nid: The nid of the node to push the boundary for
2963 * @start_pfn: The start pfn of the node
2964 * @end_pfn: The end pfn of the node
2965 *
2966 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd
2967 * time. Specifically, on x86_64, SRAT will report ranges that can potentially
2968 * be hotplugged even though no physical memory exists. This function allows
2969 * an arch to push out the node boundaries so mem_map is allocated that can
2970 * be used later.
2971 */
2972#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
2973void __init push_node_boundaries(unsigned int nid,
2974 unsigned long start_pfn, unsigned long end_pfn)
2975{
2976 printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n",
2977 nid, start_pfn, end_pfn);
2978
2979 /* Initialise the boundary for this node if necessary */
2980 if (node_boundary_end_pfn[nid] == 0)
2981 node_boundary_start_pfn[nid] = -1UL;
2982
2983 /* Update the boundaries */
2984 if (node_boundary_start_pfn[nid] > start_pfn)
2985 node_boundary_start_pfn[nid] = start_pfn;
2986 if (node_boundary_end_pfn[nid] < end_pfn)
2987 node_boundary_end_pfn[nid] = end_pfn;
2988}
2989
2990/* If necessary, push the node boundary out for reserve hotadd */
98011f56 2991static void __meminit account_node_boundary(unsigned int nid,
fb01439c
MG
2992 unsigned long *start_pfn, unsigned long *end_pfn)
2993{
2994 printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
2995 nid, *start_pfn, *end_pfn);
2996
2997 /* Return if boundary information has not been provided */
2998 if (node_boundary_end_pfn[nid] == 0)
2999 return;
3000
3001 /* Check the boundaries and update if necessary */
3002 if (node_boundary_start_pfn[nid] < *start_pfn)
3003 *start_pfn = node_boundary_start_pfn[nid];
3004 if (node_boundary_end_pfn[nid] > *end_pfn)
3005 *end_pfn = node_boundary_end_pfn[nid];
3006}
3007#else
3008void __init push_node_boundaries(unsigned int nid,
3009 unsigned long start_pfn, unsigned long end_pfn) {}
3010
98011f56 3011static void __meminit account_node_boundary(unsigned int nid,
fb01439c
MG
3012 unsigned long *start_pfn, unsigned long *end_pfn) {}
3013#endif
3014
3015
c713216d
MG
3016/**
3017 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
3018 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3019 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3020 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
3021 *
3022 * It returns the start and end page frame of a node based on information
3023 * provided by an arch calling add_active_range(). If called for a node
3024 * with no available memory, a warning is printed and the start and end
88ca3b94 3025 * PFNs will be 0.
c713216d 3026 */
a3142c8e 3027void __meminit get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
3028 unsigned long *start_pfn, unsigned long *end_pfn)
3029{
3030 int i;
3031 *start_pfn = -1UL;
3032 *end_pfn = 0;
3033
3034 for_each_active_range_index_in_nid(i, nid) {
3035 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3036 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3037 }
3038
633c0666 3039 if (*start_pfn == -1UL)
c713216d 3040 *start_pfn = 0;
fb01439c
MG
3041
3042 /* Push the node boundaries out if requested */
3043 account_node_boundary(nid, start_pfn, end_pfn);
c713216d
MG
3044}
3045
2a1e274a
MG
3046/*
3047 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3048 * assumption is made that zones within a node are ordered in monotonic
3049 * increasing memory addresses so that the "highest" populated zone is used
3050 */
3051void __init find_usable_zone_for_movable(void)
3052{
3053 int zone_index;
3054 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3055 if (zone_index == ZONE_MOVABLE)
3056 continue;
3057
3058 if (arch_zone_highest_possible_pfn[zone_index] >
3059 arch_zone_lowest_possible_pfn[zone_index])
3060 break;
3061 }
3062
3063 VM_BUG_ON(zone_index == -1);
3064 movable_zone = zone_index;
3065}
3066
3067/*
3068 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3069 * because it is sized independant of architecture. Unlike the other zones,
3070 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3071 * in each node depending on the size of each node and how evenly kernelcore
3072 * is distributed. This helper function adjusts the zone ranges
3073 * provided by the architecture for a given node by using the end of the
3074 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3075 * zones within a node are in order of monotonic increases memory addresses
3076 */
3077void __meminit adjust_zone_range_for_zone_movable(int nid,
3078 unsigned long zone_type,
3079 unsigned long node_start_pfn,
3080 unsigned long node_end_pfn,
3081 unsigned long *zone_start_pfn,
3082 unsigned long *zone_end_pfn)
3083{
3084 /* Only adjust if ZONE_MOVABLE is on this node */
3085 if (zone_movable_pfn[nid]) {
3086 /* Size ZONE_MOVABLE */
3087 if (zone_type == ZONE_MOVABLE) {
3088 *zone_start_pfn = zone_movable_pfn[nid];
3089 *zone_end_pfn = min(node_end_pfn,
3090 arch_zone_highest_possible_pfn[movable_zone]);
3091
3092 /* Adjust for ZONE_MOVABLE starting within this range */
3093 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3094 *zone_end_pfn > zone_movable_pfn[nid]) {
3095 *zone_end_pfn = zone_movable_pfn[nid];
3096
3097 /* Check if this whole range is within ZONE_MOVABLE */
3098 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3099 *zone_start_pfn = *zone_end_pfn;
3100 }
3101}
3102
c713216d
MG
3103/*
3104 * Return the number of pages a zone spans in a node, including holes
3105 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3106 */
6ea6e688 3107static unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
3108 unsigned long zone_type,
3109 unsigned long *ignored)
3110{
3111 unsigned long node_start_pfn, node_end_pfn;
3112 unsigned long zone_start_pfn, zone_end_pfn;
3113
3114 /* Get the start and end of the node and zone */
3115 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3116 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3117 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2a1e274a
MG
3118 adjust_zone_range_for_zone_movable(nid, zone_type,
3119 node_start_pfn, node_end_pfn,
3120 &zone_start_pfn, &zone_end_pfn);
c713216d
MG
3121
3122 /* Check that this node has pages within the zone's required range */
3123 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3124 return 0;
3125
3126 /* Move the zone boundaries inside the node if necessary */
3127 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3128 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3129
3130 /* Return the spanned pages */
3131 return zone_end_pfn - zone_start_pfn;
3132}
3133
3134/*
3135 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 3136 * then all holes in the requested range will be accounted for.
c713216d 3137 */
a3142c8e 3138unsigned long __meminit __absent_pages_in_range(int nid,
c713216d
MG
3139 unsigned long range_start_pfn,
3140 unsigned long range_end_pfn)
3141{
3142 int i = 0;
3143 unsigned long prev_end_pfn = 0, hole_pages = 0;
3144 unsigned long start_pfn;
3145
3146 /* Find the end_pfn of the first active range of pfns in the node */
3147 i = first_active_region_index_in_nid(nid);
3148 if (i == -1)
3149 return 0;
3150
b5445f95
MG
3151 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3152
9c7cd687
MG
3153 /* Account for ranges before physical memory on this node */
3154 if (early_node_map[i].start_pfn > range_start_pfn)
b5445f95 3155 hole_pages = prev_end_pfn - range_start_pfn;
c713216d
MG
3156
3157 /* Find all holes for the zone within the node */
3158 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3159
3160 /* No need to continue if prev_end_pfn is outside the zone */
3161 if (prev_end_pfn >= range_end_pfn)
3162 break;
3163
3164 /* Make sure the end of the zone is not within the hole */
3165 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3166 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3167
3168 /* Update the hole size cound and move on */
3169 if (start_pfn > range_start_pfn) {
3170 BUG_ON(prev_end_pfn > start_pfn);
3171 hole_pages += start_pfn - prev_end_pfn;
3172 }
3173 prev_end_pfn = early_node_map[i].end_pfn;
3174 }
3175
9c7cd687
MG
3176 /* Account for ranges past physical memory on this node */
3177 if (range_end_pfn > prev_end_pfn)
0c6cb974 3178 hole_pages += range_end_pfn -
9c7cd687
MG
3179 max(range_start_pfn, prev_end_pfn);
3180
c713216d
MG
3181 return hole_pages;
3182}
3183
3184/**
3185 * absent_pages_in_range - Return number of page frames in holes within a range
3186 * @start_pfn: The start PFN to start searching for holes
3187 * @end_pfn: The end PFN to stop searching for holes
3188 *
88ca3b94 3189 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
3190 */
3191unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3192 unsigned long end_pfn)
3193{
3194 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3195}
3196
3197/* Return the number of page frames in holes in a zone on a node */
6ea6e688 3198static unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
3199 unsigned long zone_type,
3200 unsigned long *ignored)
3201{
9c7cd687
MG
3202 unsigned long node_start_pfn, node_end_pfn;
3203 unsigned long zone_start_pfn, zone_end_pfn;
3204
3205 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3206 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3207 node_start_pfn);
3208 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3209 node_end_pfn);
3210
2a1e274a
MG
3211 adjust_zone_range_for_zone_movable(nid, zone_type,
3212 node_start_pfn, node_end_pfn,
3213 &zone_start_pfn, &zone_end_pfn);
9c7cd687 3214 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
c713216d 3215}
0e0b864e 3216
c713216d 3217#else
6ea6e688 3218static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
3219 unsigned long zone_type,
3220 unsigned long *zones_size)
3221{
3222 return zones_size[zone_type];
3223}
3224
6ea6e688 3225static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
3226 unsigned long zone_type,
3227 unsigned long *zholes_size)
3228{
3229 if (!zholes_size)
3230 return 0;
3231
3232 return zholes_size[zone_type];
3233}
0e0b864e 3234
c713216d
MG
3235#endif
3236
a3142c8e 3237static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
c713216d
MG
3238 unsigned long *zones_size, unsigned long *zholes_size)
3239{
3240 unsigned long realtotalpages, totalpages = 0;
3241 enum zone_type i;
3242
3243 for (i = 0; i < MAX_NR_ZONES; i++)
3244 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3245 zones_size);
3246 pgdat->node_spanned_pages = totalpages;
3247
3248 realtotalpages = totalpages;
3249 for (i = 0; i < MAX_NR_ZONES; i++)
3250 realtotalpages -=
3251 zone_absent_pages_in_node(pgdat->node_id, i,
3252 zholes_size);
3253 pgdat->node_present_pages = realtotalpages;
3254 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3255 realtotalpages);
3256}
3257
835c134e
MG
3258#ifndef CONFIG_SPARSEMEM
3259/*
3260 * Calculate the size of the zone->blockflags rounded to an unsigned long
d9c23400
MG
3261 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3262 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
835c134e
MG
3263 * round what is now in bits to nearest long in bits, then return it in
3264 * bytes.
3265 */
3266static unsigned long __init usemap_size(unsigned long zonesize)
3267{
3268 unsigned long usemapsize;
3269
d9c23400
MG
3270 usemapsize = roundup(zonesize, pageblock_nr_pages);
3271 usemapsize = usemapsize >> pageblock_order;
835c134e
MG
3272 usemapsize *= NR_PAGEBLOCK_BITS;
3273 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3274
3275 return usemapsize / 8;
3276}
3277
3278static void __init setup_usemap(struct pglist_data *pgdat,
3279 struct zone *zone, unsigned long zonesize)
3280{
3281 unsigned long usemapsize = usemap_size(zonesize);
3282 zone->pageblock_flags = NULL;
3283 if (usemapsize) {
3284 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3285 memset(zone->pageblock_flags, 0, usemapsize);
3286 }
3287}
3288#else
3289static void inline setup_usemap(struct pglist_data *pgdat,
3290 struct zone *zone, unsigned long zonesize) {}
3291#endif /* CONFIG_SPARSEMEM */
3292
d9c23400 3293#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
ba72cb8c
MG
3294
3295/* Return a sensible default order for the pageblock size. */
3296static inline int pageblock_default_order(void)
3297{
3298 if (HPAGE_SHIFT > PAGE_SHIFT)
3299 return HUGETLB_PAGE_ORDER;
3300
3301 return MAX_ORDER-1;
3302}
3303
d9c23400
MG
3304/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3305static inline void __init set_pageblock_order(unsigned int order)
3306{
3307 /* Check that pageblock_nr_pages has not already been setup */
3308 if (pageblock_order)
3309 return;
3310
3311 /*
3312 * Assume the largest contiguous order of interest is a huge page.
3313 * This value may be variable depending on boot parameters on IA64
3314 */
3315 pageblock_order = order;
3316}
3317#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3318
ba72cb8c
MG
3319/*
3320 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3321 * and pageblock_default_order() are unused as pageblock_order is set
3322 * at compile-time. See include/linux/pageblock-flags.h for the values of
3323 * pageblock_order based on the kernel config
3324 */
3325static inline int pageblock_default_order(unsigned int order)
3326{
3327 return MAX_ORDER-1;
3328}
d9c23400
MG
3329#define set_pageblock_order(x) do {} while (0)
3330
3331#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3332
1da177e4
LT
3333/*
3334 * Set up the zone data structures:
3335 * - mark all pages reserved
3336 * - mark all memory queues empty
3337 * - clear the memory bitmaps
3338 */
b5a0e011 3339static void __paginginit free_area_init_core(struct pglist_data *pgdat,
1da177e4
LT
3340 unsigned long *zones_size, unsigned long *zholes_size)
3341{
2f1b6248 3342 enum zone_type j;
ed8ece2e 3343 int nid = pgdat->node_id;
1da177e4 3344 unsigned long zone_start_pfn = pgdat->node_start_pfn;
718127cc 3345 int ret;
1da177e4 3346
208d54e5 3347 pgdat_resize_init(pgdat);
1da177e4
LT
3348 pgdat->nr_zones = 0;
3349 init_waitqueue_head(&pgdat->kswapd_wait);
3350 pgdat->kswapd_max_order = 0;
3351
3352 for (j = 0; j < MAX_NR_ZONES; j++) {
3353 struct zone *zone = pgdat->node_zones + j;
0e0b864e 3354 unsigned long size, realsize, memmap_pages;
1da177e4 3355
c713216d
MG
3356 size = zone_spanned_pages_in_node(nid, j, zones_size);
3357 realsize = size - zone_absent_pages_in_node(nid, j,
3358 zholes_size);
1da177e4 3359
0e0b864e
MG
3360 /*
3361 * Adjust realsize so that it accounts for how much memory
3362 * is used by this zone for memmap. This affects the watermark
3363 * and per-cpu initialisations
3364 */
3365 memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
3366 if (realsize >= memmap_pages) {
3367 realsize -= memmap_pages;
3368 printk(KERN_DEBUG
3369 " %s zone: %lu pages used for memmap\n",
3370 zone_names[j], memmap_pages);
3371 } else
3372 printk(KERN_WARNING
3373 " %s zone: %lu pages exceeds realsize %lu\n",
3374 zone_names[j], memmap_pages, realsize);
3375
6267276f
CL
3376 /* Account for reserved pages */
3377 if (j == 0 && realsize > dma_reserve) {
0e0b864e 3378 realsize -= dma_reserve;
6267276f
CL
3379 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
3380 zone_names[0], dma_reserve);
0e0b864e
MG
3381 }
3382
98d2b0eb 3383 if (!is_highmem_idx(j))
1da177e4
LT
3384 nr_kernel_pages += realsize;
3385 nr_all_pages += realsize;
3386
3387 zone->spanned_pages = size;
3388 zone->present_pages = realsize;
9614634f 3389#ifdef CONFIG_NUMA
d5f541ed 3390 zone->node = nid;
8417bba4 3391 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
9614634f 3392 / 100;
0ff38490 3393 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
9614634f 3394#endif
1da177e4
LT
3395 zone->name = zone_names[j];
3396 spin_lock_init(&zone->lock);
3397 spin_lock_init(&zone->lru_lock);
bdc8cb98 3398 zone_seqlock_init(zone);
1da177e4 3399 zone->zone_pgdat = pgdat;
1da177e4 3400
3bb1a852 3401 zone->prev_priority = DEF_PRIORITY;
1da177e4 3402
ed8ece2e 3403 zone_pcp_init(zone);
1da177e4
LT
3404 INIT_LIST_HEAD(&zone->active_list);
3405 INIT_LIST_HEAD(&zone->inactive_list);
3406 zone->nr_scan_active = 0;
3407 zone->nr_scan_inactive = 0;
2244b95a 3408 zap_zone_vm_stats(zone);
e815af95 3409 zone->flags = 0;
1da177e4
LT
3410 if (!size)
3411 continue;
3412
ba72cb8c 3413 set_pageblock_order(pageblock_default_order());
835c134e 3414 setup_usemap(pgdat, zone, size);
a2f3aa02
DH
3415 ret = init_currently_empty_zone(zone, zone_start_pfn,
3416 size, MEMMAP_EARLY);
718127cc 3417 BUG_ON(ret);
1da177e4 3418 zone_start_pfn += size;
1da177e4
LT
3419 }
3420}
3421
577a32f6 3422static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 3423{
1da177e4
LT
3424 /* Skip empty nodes */
3425 if (!pgdat->node_spanned_pages)
3426 return;
3427
d41dee36 3428#ifdef CONFIG_FLAT_NODE_MEM_MAP
1da177e4
LT
3429 /* ia64 gets its own node_mem_map, before this, without bootmem */
3430 if (!pgdat->node_mem_map) {
e984bb43 3431 unsigned long size, start, end;
d41dee36
AW
3432 struct page *map;
3433
e984bb43
BP
3434 /*
3435 * The zone's endpoints aren't required to be MAX_ORDER
3436 * aligned but the node_mem_map endpoints must be in order
3437 * for the buddy allocator to function correctly.
3438 */
3439 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3440 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3441 end = ALIGN(end, MAX_ORDER_NR_PAGES);
3442 size = (end - start) * sizeof(struct page);
6f167ec7
DH
3443 map = alloc_remap(pgdat->node_id, size);
3444 if (!map)
3445 map = alloc_bootmem_node(pgdat, size);
e984bb43 3446 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
1da177e4 3447 }
12d810c1 3448#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
3449 /*
3450 * With no DISCONTIG, the global mem_map is just set as node 0's
3451 */
c713216d 3452 if (pgdat == NODE_DATA(0)) {
1da177e4 3453 mem_map = NODE_DATA(0)->node_mem_map;
c713216d
MG
3454#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3455 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
467bc461 3456 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
c713216d
MG
3457#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3458 }
1da177e4 3459#endif
d41dee36 3460#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
3461}
3462
b5a0e011 3463void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat,
1da177e4
LT
3464 unsigned long *zones_size, unsigned long node_start_pfn,
3465 unsigned long *zholes_size)
3466{
3467 pgdat->node_id = nid;
3468 pgdat->node_start_pfn = node_start_pfn;
c713216d 3469 calculate_node_totalpages(pgdat, zones_size, zholes_size);
1da177e4
LT
3470
3471 alloc_node_mem_map(pgdat);
3472
3473 free_area_init_core(pgdat, zones_size, zholes_size);
3474}
3475
c713216d 3476#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
418508c1
MS
3477
3478#if MAX_NUMNODES > 1
3479/*
3480 * Figure out the number of possible node ids.
3481 */
3482static void __init setup_nr_node_ids(void)
3483{
3484 unsigned int node;
3485 unsigned int highest = 0;
3486
3487 for_each_node_mask(node, node_possible_map)
3488 highest = node;
3489 nr_node_ids = highest + 1;
3490}
3491#else
3492static inline void setup_nr_node_ids(void)
3493{
3494}
3495#endif
3496
c713216d
MG
3497/**
3498 * add_active_range - Register a range of PFNs backed by physical memory
3499 * @nid: The node ID the range resides on
3500 * @start_pfn: The start PFN of the available physical memory
3501 * @end_pfn: The end PFN of the available physical memory
3502 *
3503 * These ranges are stored in an early_node_map[] and later used by
3504 * free_area_init_nodes() to calculate zone sizes and holes. If the
3505 * range spans a memory hole, it is up to the architecture to ensure
3506 * the memory is not freed by the bootmem allocator. If possible
3507 * the range being registered will be merged with existing ranges.
3508 */
3509void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3510 unsigned long end_pfn)
3511{
3512 int i;
3513
3514 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) "
3515 "%d entries of %d used\n",
3516 nid, start_pfn, end_pfn,
3517 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3518
3519 /* Merge with existing active regions if possible */
3520 for (i = 0; i < nr_nodemap_entries; i++) {
3521 if (early_node_map[i].nid != nid)
3522 continue;
3523
3524 /* Skip if an existing region covers this new one */
3525 if (start_pfn >= early_node_map[i].start_pfn &&
3526 end_pfn <= early_node_map[i].end_pfn)
3527 return;
3528
3529 /* Merge forward if suitable */
3530 if (start_pfn <= early_node_map[i].end_pfn &&
3531 end_pfn > early_node_map[i].end_pfn) {
3532 early_node_map[i].end_pfn = end_pfn;
3533 return;
3534 }
3535
3536 /* Merge backward if suitable */
3537 if (start_pfn < early_node_map[i].end_pfn &&
3538 end_pfn >= early_node_map[i].start_pfn) {
3539 early_node_map[i].start_pfn = start_pfn;
3540 return;
3541 }
3542 }
3543
3544 /* Check that early_node_map is large enough */
3545 if (i >= MAX_ACTIVE_REGIONS) {
3546 printk(KERN_CRIT "More than %d memory regions, truncating\n",
3547 MAX_ACTIVE_REGIONS);
3548 return;
3549 }
3550
3551 early_node_map[i].nid = nid;
3552 early_node_map[i].start_pfn = start_pfn;
3553 early_node_map[i].end_pfn = end_pfn;
3554 nr_nodemap_entries = i + 1;
3555}
3556
3557/**
3558 * shrink_active_range - Shrink an existing registered range of PFNs
3559 * @nid: The node id the range is on that should be shrunk
3560 * @old_end_pfn: The old end PFN of the range
3561 * @new_end_pfn: The new PFN of the range
3562 *
3563 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3564 * The map is kept at the end physical page range that has already been
3565 * registered with add_active_range(). This function allows an arch to shrink
3566 * an existing registered range.
3567 */
3568void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
3569 unsigned long new_end_pfn)
3570{
3571 int i;
3572
3573 /* Find the old active region end and shrink */
3574 for_each_active_range_index_in_nid(i, nid)
3575 if (early_node_map[i].end_pfn == old_end_pfn) {
3576 early_node_map[i].end_pfn = new_end_pfn;
3577 break;
3578 }
3579}
3580
3581/**
3582 * remove_all_active_ranges - Remove all currently registered regions
88ca3b94 3583 *
c713216d
MG
3584 * During discovery, it may be found that a table like SRAT is invalid
3585 * and an alternative discovery method must be used. This function removes
3586 * all currently registered regions.
3587 */
88ca3b94 3588void __init remove_all_active_ranges(void)
c713216d
MG
3589{
3590 memset(early_node_map, 0, sizeof(early_node_map));
3591 nr_nodemap_entries = 0;
fb01439c
MG
3592#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
3593 memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
3594 memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
3595#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
c713216d
MG
3596}
3597
3598/* Compare two active node_active_regions */
3599static int __init cmp_node_active_region(const void *a, const void *b)
3600{
3601 struct node_active_region *arange = (struct node_active_region *)a;
3602 struct node_active_region *brange = (struct node_active_region *)b;
3603
3604 /* Done this way to avoid overflows */
3605 if (arange->start_pfn > brange->start_pfn)
3606 return 1;
3607 if (arange->start_pfn < brange->start_pfn)
3608 return -1;
3609
3610 return 0;
3611}
3612
3613/* sort the node_map by start_pfn */
3614static void __init sort_node_map(void)
3615{
3616 sort(early_node_map, (size_t)nr_nodemap_entries,
3617 sizeof(struct node_active_region),
3618 cmp_node_active_region, NULL);
3619}
3620
a6af2bc3 3621/* Find the lowest pfn for a node */
c713216d
MG
3622unsigned long __init find_min_pfn_for_node(unsigned long nid)
3623{
3624 int i;
a6af2bc3 3625 unsigned long min_pfn = ULONG_MAX;
1abbfb41 3626
c713216d
MG
3627 /* Assuming a sorted map, the first range found has the starting pfn */
3628 for_each_active_range_index_in_nid(i, nid)
a6af2bc3 3629 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
c713216d 3630
a6af2bc3
MG
3631 if (min_pfn == ULONG_MAX) {
3632 printk(KERN_WARNING
3633 "Could not find start_pfn for node %lu\n", nid);
3634 return 0;
3635 }
3636
3637 return min_pfn;
c713216d
MG
3638}
3639
3640/**
3641 * find_min_pfn_with_active_regions - Find the minimum PFN registered
3642 *
3643 * It returns the minimum PFN based on information provided via
88ca3b94 3644 * add_active_range().
c713216d
MG
3645 */
3646unsigned long __init find_min_pfn_with_active_regions(void)
3647{
3648 return find_min_pfn_for_node(MAX_NUMNODES);
3649}
3650
3651/**
3652 * find_max_pfn_with_active_regions - Find the maximum PFN registered
3653 *
3654 * It returns the maximum PFN based on information provided via
88ca3b94 3655 * add_active_range().
c713216d
MG
3656 */
3657unsigned long __init find_max_pfn_with_active_regions(void)
3658{
3659 int i;
3660 unsigned long max_pfn = 0;
3661
3662 for (i = 0; i < nr_nodemap_entries; i++)
3663 max_pfn = max(max_pfn, early_node_map[i].end_pfn);
3664
3665 return max_pfn;
3666}
3667
37b07e41
LS
3668/*
3669 * early_calculate_totalpages()
3670 * Sum pages in active regions for movable zone.
3671 * Populate N_HIGH_MEMORY for calculating usable_nodes.
3672 */
484f51f8 3673static unsigned long __init early_calculate_totalpages(void)
7e63efef
MG
3674{
3675 int i;
3676 unsigned long totalpages = 0;
3677
37b07e41
LS
3678 for (i = 0; i < nr_nodemap_entries; i++) {
3679 unsigned long pages = early_node_map[i].end_pfn -
7e63efef 3680 early_node_map[i].start_pfn;
37b07e41
LS
3681 totalpages += pages;
3682 if (pages)
3683 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
3684 }
3685 return totalpages;
7e63efef
MG
3686}
3687
2a1e274a
MG
3688/*
3689 * Find the PFN the Movable zone begins in each node. Kernel memory
3690 * is spread evenly between nodes as long as the nodes have enough
3691 * memory. When they don't, some nodes will have more kernelcore than
3692 * others
3693 */
3694void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3695{
3696 int i, nid;
3697 unsigned long usable_startpfn;
3698 unsigned long kernelcore_node, kernelcore_remaining;
37b07e41
LS
3699 unsigned long totalpages = early_calculate_totalpages();
3700 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
2a1e274a 3701
7e63efef
MG
3702 /*
3703 * If movablecore was specified, calculate what size of
3704 * kernelcore that corresponds so that memory usable for
3705 * any allocation type is evenly spread. If both kernelcore
3706 * and movablecore are specified, then the value of kernelcore
3707 * will be used for required_kernelcore if it's greater than
3708 * what movablecore would have allowed.
3709 */
3710 if (required_movablecore) {
7e63efef
MG
3711 unsigned long corepages;
3712
3713 /*
3714 * Round-up so that ZONE_MOVABLE is at least as large as what
3715 * was requested by the user
3716 */
3717 required_movablecore =
3718 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
3719 corepages = totalpages - required_movablecore;
3720
3721 required_kernelcore = max(required_kernelcore, corepages);
3722 }
3723
2a1e274a
MG
3724 /* If kernelcore was not specified, there is no ZONE_MOVABLE */
3725 if (!required_kernelcore)
3726 return;
3727
3728 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
3729 find_usable_zone_for_movable();
3730 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
3731
3732restart:
3733 /* Spread kernelcore memory as evenly as possible throughout nodes */
3734 kernelcore_node = required_kernelcore / usable_nodes;
37b07e41 3735 for_each_node_state(nid, N_HIGH_MEMORY) {
2a1e274a
MG
3736 /*
3737 * Recalculate kernelcore_node if the division per node
3738 * now exceeds what is necessary to satisfy the requested
3739 * amount of memory for the kernel
3740 */
3741 if (required_kernelcore < kernelcore_node)
3742 kernelcore_node = required_kernelcore / usable_nodes;
3743
3744 /*
3745 * As the map is walked, we track how much memory is usable
3746 * by the kernel using kernelcore_remaining. When it is
3747 * 0, the rest of the node is usable by ZONE_MOVABLE
3748 */
3749 kernelcore_remaining = kernelcore_node;
3750
3751 /* Go through each range of PFNs within this node */
3752 for_each_active_range_index_in_nid(i, nid) {
3753 unsigned long start_pfn, end_pfn;
3754 unsigned long size_pages;
3755
3756 start_pfn = max(early_node_map[i].start_pfn,
3757 zone_movable_pfn[nid]);
3758 end_pfn = early_node_map[i].end_pfn;
3759 if (start_pfn >= end_pfn)
3760 continue;
3761
3762 /* Account for what is only usable for kernelcore */
3763 if (start_pfn < usable_startpfn) {
3764 unsigned long kernel_pages;
3765 kernel_pages = min(end_pfn, usable_startpfn)
3766 - start_pfn;
3767
3768 kernelcore_remaining -= min(kernel_pages,
3769 kernelcore_remaining);
3770 required_kernelcore -= min(kernel_pages,
3771 required_kernelcore);
3772
3773 /* Continue if range is now fully accounted */
3774 if (end_pfn <= usable_startpfn) {
3775
3776 /*
3777 * Push zone_movable_pfn to the end so
3778 * that if we have to rebalance
3779 * kernelcore across nodes, we will
3780 * not double account here
3781 */
3782 zone_movable_pfn[nid] = end_pfn;
3783 continue;
3784 }
3785 start_pfn = usable_startpfn;
3786 }
3787
3788 /*
3789 * The usable PFN range for ZONE_MOVABLE is from
3790 * start_pfn->end_pfn. Calculate size_pages as the
3791 * number of pages used as kernelcore
3792 */
3793 size_pages = end_pfn - start_pfn;
3794 if (size_pages > kernelcore_remaining)
3795 size_pages = kernelcore_remaining;
3796 zone_movable_pfn[nid] = start_pfn + size_pages;
3797
3798 /*
3799 * Some kernelcore has been met, update counts and
3800 * break if the kernelcore for this node has been
3801 * satisified
3802 */
3803 required_kernelcore -= min(required_kernelcore,
3804 size_pages);
3805 kernelcore_remaining -= size_pages;
3806 if (!kernelcore_remaining)
3807 break;
3808 }
3809 }
3810
3811 /*
3812 * If there is still required_kernelcore, we do another pass with one
3813 * less node in the count. This will push zone_movable_pfn[nid] further
3814 * along on the nodes that still have memory until kernelcore is
3815 * satisified
3816 */
3817 usable_nodes--;
3818 if (usable_nodes && required_kernelcore > usable_nodes)
3819 goto restart;
3820
3821 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
3822 for (nid = 0; nid < MAX_NUMNODES; nid++)
3823 zone_movable_pfn[nid] =
3824 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
3825}
3826
37b07e41
LS
3827/* Any regular memory on that node ? */
3828static void check_for_regular_memory(pg_data_t *pgdat)
3829{
3830#ifdef CONFIG_HIGHMEM
3831 enum zone_type zone_type;
3832
3833 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
3834 struct zone *zone = &pgdat->node_zones[zone_type];
3835 if (zone->present_pages)
3836 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
3837 }
3838#endif
3839}
3840
c713216d
MG
3841/**
3842 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 3843 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
3844 *
3845 * This will call free_area_init_node() for each active node in the system.
3846 * Using the page ranges provided by add_active_range(), the size of each
3847 * zone in each node and their holes is calculated. If the maximum PFN
3848 * between two adjacent zones match, it is assumed that the zone is empty.
3849 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
3850 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
3851 * starts where the previous one ended. For example, ZONE_DMA32 starts
3852 * at arch_max_dma_pfn.
3853 */
3854void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3855{
3856 unsigned long nid;
3857 enum zone_type i;
3858
a6af2bc3
MG
3859 /* Sort early_node_map as initialisation assumes it is sorted */
3860 sort_node_map();
3861
c713216d
MG
3862 /* Record where the zone boundaries are */
3863 memset(arch_zone_lowest_possible_pfn, 0,
3864 sizeof(arch_zone_lowest_possible_pfn));
3865 memset(arch_zone_highest_possible_pfn, 0,
3866 sizeof(arch_zone_highest_possible_pfn));
3867 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
3868 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
3869 for (i = 1; i < MAX_NR_ZONES; i++) {
2a1e274a
MG
3870 if (i == ZONE_MOVABLE)
3871 continue;
c713216d
MG
3872 arch_zone_lowest_possible_pfn[i] =
3873 arch_zone_highest_possible_pfn[i-1];
3874 arch_zone_highest_possible_pfn[i] =
3875 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
3876 }
2a1e274a
MG
3877 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
3878 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
3879
3880 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
3881 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
3882 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
c713216d 3883
c713216d
MG
3884 /* Print out the zone ranges */
3885 printk("Zone PFN ranges:\n");
2a1e274a
MG
3886 for (i = 0; i < MAX_NR_ZONES; i++) {
3887 if (i == ZONE_MOVABLE)
3888 continue;
c713216d
MG
3889 printk(" %-8s %8lu -> %8lu\n",
3890 zone_names[i],
3891 arch_zone_lowest_possible_pfn[i],
3892 arch_zone_highest_possible_pfn[i]);
2a1e274a
MG
3893 }
3894
3895 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
3896 printk("Movable zone start PFN for each node\n");
3897 for (i = 0; i < MAX_NUMNODES; i++) {
3898 if (zone_movable_pfn[i])
3899 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
3900 }
c713216d
MG
3901
3902 /* Print out the early_node_map[] */
3903 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
3904 for (i = 0; i < nr_nodemap_entries; i++)
3905 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid,
3906 early_node_map[i].start_pfn,
3907 early_node_map[i].end_pfn);
3908
3909 /* Initialise every node */
8ef82866 3910 setup_nr_node_ids();
c713216d
MG
3911 for_each_online_node(nid) {
3912 pg_data_t *pgdat = NODE_DATA(nid);
3913 free_area_init_node(nid, pgdat, NULL,
3914 find_min_pfn_for_node(nid), NULL);
37b07e41
LS
3915
3916 /* Any memory on that node */
3917 if (pgdat->node_present_pages)
3918 node_set_state(nid, N_HIGH_MEMORY);
3919 check_for_regular_memory(pgdat);
c713216d
MG
3920 }
3921}
2a1e274a 3922
7e63efef 3923static int __init cmdline_parse_core(char *p, unsigned long *core)
2a1e274a
MG
3924{
3925 unsigned long long coremem;
3926 if (!p)
3927 return -EINVAL;
3928
3929 coremem = memparse(p, &p);
7e63efef 3930 *core = coremem >> PAGE_SHIFT;
2a1e274a 3931
7e63efef 3932 /* Paranoid check that UL is enough for the coremem value */
2a1e274a
MG
3933 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
3934
3935 return 0;
3936}
ed7ed365 3937
7e63efef
MG
3938/*
3939 * kernelcore=size sets the amount of memory for use for allocations that
3940 * cannot be reclaimed or migrated.
3941 */
3942static int __init cmdline_parse_kernelcore(char *p)
3943{
3944 return cmdline_parse_core(p, &required_kernelcore);
3945}
3946
3947/*
3948 * movablecore=size sets the amount of memory for use for allocations that
3949 * can be reclaimed or migrated.
3950 */
3951static int __init cmdline_parse_movablecore(char *p)
3952{
3953 return cmdline_parse_core(p, &required_movablecore);
3954}
3955
ed7ed365 3956early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 3957early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 3958
c713216d
MG
3959#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3960
0e0b864e 3961/**
88ca3b94
RD
3962 * set_dma_reserve - set the specified number of pages reserved in the first zone
3963 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e
MG
3964 *
3965 * The per-cpu batchsize and zone watermarks are determined by present_pages.
3966 * In the DMA zone, a significant percentage may be consumed by kernel image
3967 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
3968 * function may optionally be used to account for unfreeable pages in the
3969 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
3970 * smaller per-cpu batchsize.
0e0b864e
MG
3971 */
3972void __init set_dma_reserve(unsigned long new_dma_reserve)
3973{
3974 dma_reserve = new_dma_reserve;
3975}
3976
93b7504e 3977#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
3978static bootmem_data_t contig_bootmem_data;
3979struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
3980
3981EXPORT_SYMBOL(contig_page_data);
93b7504e 3982#endif
1da177e4
LT
3983
3984void __init free_area_init(unsigned long *zones_size)
3985{
93b7504e 3986 free_area_init_node(0, NODE_DATA(0), zones_size,
1da177e4
LT
3987 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
3988}
1da177e4 3989
1da177e4
LT
3990static int page_alloc_cpu_notify(struct notifier_block *self,
3991 unsigned long action, void *hcpu)
3992{
3993 int cpu = (unsigned long)hcpu;
1da177e4 3994
8bb78442 3995 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
9f8f2172
CL
3996 drain_pages(cpu);
3997
3998 /*
3999 * Spill the event counters of the dead processor
4000 * into the current processors event counters.
4001 * This artificially elevates the count of the current
4002 * processor.
4003 */
f8891e5e 4004 vm_events_fold_cpu(cpu);
9f8f2172
CL
4005
4006 /*
4007 * Zero the differential counters of the dead processor
4008 * so that the vm statistics are consistent.
4009 *
4010 * This is only okay since the processor is dead and cannot
4011 * race with what we are doing.
4012 */
2244b95a 4013 refresh_cpu_vm_stats(cpu);
1da177e4
LT
4014 }
4015 return NOTIFY_OK;
4016}
1da177e4
LT
4017
4018void __init page_alloc_init(void)
4019{
4020 hotcpu_notifier(page_alloc_cpu_notify, 0);
4021}
4022
cb45b0e9
HA
4023/*
4024 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4025 * or min_free_kbytes changes.
4026 */
4027static void calculate_totalreserve_pages(void)
4028{
4029 struct pglist_data *pgdat;
4030 unsigned long reserve_pages = 0;
2f6726e5 4031 enum zone_type i, j;
cb45b0e9
HA
4032
4033 for_each_online_pgdat(pgdat) {
4034 for (i = 0; i < MAX_NR_ZONES; i++) {
4035 struct zone *zone = pgdat->node_zones + i;
4036 unsigned long max = 0;
4037
4038 /* Find valid and maximum lowmem_reserve in the zone */
4039 for (j = i; j < MAX_NR_ZONES; j++) {
4040 if (zone->lowmem_reserve[j] > max)
4041 max = zone->lowmem_reserve[j];
4042 }
4043
4044 /* we treat pages_high as reserved pages. */
4045 max += zone->pages_high;
4046
4047 if (max > zone->present_pages)
4048 max = zone->present_pages;
4049 reserve_pages += max;
4050 }
4051 }
4052 totalreserve_pages = reserve_pages;
4053}
4054
1da177e4
LT
4055/*
4056 * setup_per_zone_lowmem_reserve - called whenever
4057 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
4058 * has a correct pages reserved value, so an adequate number of
4059 * pages are left in the zone after a successful __alloc_pages().
4060 */
4061static void setup_per_zone_lowmem_reserve(void)
4062{
4063 struct pglist_data *pgdat;
2f6726e5 4064 enum zone_type j, idx;
1da177e4 4065
ec936fc5 4066 for_each_online_pgdat(pgdat) {
1da177e4
LT
4067 for (j = 0; j < MAX_NR_ZONES; j++) {
4068 struct zone *zone = pgdat->node_zones + j;
4069 unsigned long present_pages = zone->present_pages;
4070
4071 zone->lowmem_reserve[j] = 0;
4072
2f6726e5
CL
4073 idx = j;
4074 while (idx) {
1da177e4
LT
4075 struct zone *lower_zone;
4076
2f6726e5
CL
4077 idx--;
4078
1da177e4
LT
4079 if (sysctl_lowmem_reserve_ratio[idx] < 1)
4080 sysctl_lowmem_reserve_ratio[idx] = 1;
4081
4082 lower_zone = pgdat->node_zones + idx;
4083 lower_zone->lowmem_reserve[j] = present_pages /
4084 sysctl_lowmem_reserve_ratio[idx];
4085 present_pages += lower_zone->present_pages;
4086 }
4087 }
4088 }
cb45b0e9
HA
4089
4090 /* update totalreserve_pages */
4091 calculate_totalreserve_pages();
1da177e4
LT
4092}
4093
88ca3b94
RD
4094/**
4095 * setup_per_zone_pages_min - called when min_free_kbytes changes.
4096 *
4097 * Ensures that the pages_{min,low,high} values for each zone are set correctly
4098 * with respect to min_free_kbytes.
1da177e4 4099 */
3947be19 4100void setup_per_zone_pages_min(void)
1da177e4
LT
4101{
4102 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4103 unsigned long lowmem_pages = 0;
4104 struct zone *zone;
4105 unsigned long flags;
4106
4107 /* Calculate total number of !ZONE_HIGHMEM pages */
4108 for_each_zone(zone) {
4109 if (!is_highmem(zone))
4110 lowmem_pages += zone->present_pages;
4111 }
4112
4113 for_each_zone(zone) {
ac924c60
AM
4114 u64 tmp;
4115
1da177e4 4116 spin_lock_irqsave(&zone->lru_lock, flags);
ac924c60
AM
4117 tmp = (u64)pages_min * zone->present_pages;
4118 do_div(tmp, lowmem_pages);
1da177e4
LT
4119 if (is_highmem(zone)) {
4120 /*
669ed175
NP
4121 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4122 * need highmem pages, so cap pages_min to a small
4123 * value here.
4124 *
4125 * The (pages_high-pages_low) and (pages_low-pages_min)
4126 * deltas controls asynch page reclaim, and so should
4127 * not be capped for highmem.
1da177e4
LT
4128 */
4129 int min_pages;
4130
4131 min_pages = zone->present_pages / 1024;
4132 if (min_pages < SWAP_CLUSTER_MAX)
4133 min_pages = SWAP_CLUSTER_MAX;
4134 if (min_pages > 128)
4135 min_pages = 128;
4136 zone->pages_min = min_pages;
4137 } else {
669ed175
NP
4138 /*
4139 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
4140 * proportionate to the zone's size.
4141 */
669ed175 4142 zone->pages_min = tmp;
1da177e4
LT
4143 }
4144
ac924c60
AM
4145 zone->pages_low = zone->pages_min + (tmp >> 2);
4146 zone->pages_high = zone->pages_min + (tmp >> 1);
56fd56b8 4147 setup_zone_migrate_reserve(zone);
1da177e4
LT
4148 spin_unlock_irqrestore(&zone->lru_lock, flags);
4149 }
cb45b0e9
HA
4150
4151 /* update totalreserve_pages */
4152 calculate_totalreserve_pages();
1da177e4
LT
4153}
4154
4155/*
4156 * Initialise min_free_kbytes.
4157 *
4158 * For small machines we want it small (128k min). For large machines
4159 * we want it large (64MB max). But it is not linear, because network
4160 * bandwidth does not increase linearly with machine size. We use
4161 *
4162 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4163 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
4164 *
4165 * which yields
4166 *
4167 * 16MB: 512k
4168 * 32MB: 724k
4169 * 64MB: 1024k
4170 * 128MB: 1448k
4171 * 256MB: 2048k
4172 * 512MB: 2896k
4173 * 1024MB: 4096k
4174 * 2048MB: 5792k
4175 * 4096MB: 8192k
4176 * 8192MB: 11584k
4177 * 16384MB: 16384k
4178 */
4179static int __init init_per_zone_pages_min(void)
4180{
4181 unsigned long lowmem_kbytes;
4182
4183 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4184
4185 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4186 if (min_free_kbytes < 128)
4187 min_free_kbytes = 128;
4188 if (min_free_kbytes > 65536)
4189 min_free_kbytes = 65536;
4190 setup_per_zone_pages_min();
4191 setup_per_zone_lowmem_reserve();
4192 return 0;
4193}
4194module_init(init_per_zone_pages_min)
4195
4196/*
4197 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4198 * that we can call two helper functions whenever min_free_kbytes
4199 * changes.
4200 */
4201int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4202 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4203{
4204 proc_dointvec(table, write, file, buffer, length, ppos);
3b1d92c5
MG
4205 if (write)
4206 setup_per_zone_pages_min();
1da177e4
LT
4207 return 0;
4208}
4209
9614634f
CL
4210#ifdef CONFIG_NUMA
4211int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4212 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4213{
4214 struct zone *zone;
4215 int rc;
4216
4217 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4218 if (rc)
4219 return rc;
4220
4221 for_each_zone(zone)
8417bba4 4222 zone->min_unmapped_pages = (zone->present_pages *
9614634f
CL
4223 sysctl_min_unmapped_ratio) / 100;
4224 return 0;
4225}
0ff38490
CL
4226
4227int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4228 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4229{
4230 struct zone *zone;
4231 int rc;
4232
4233 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4234 if (rc)
4235 return rc;
4236
4237 for_each_zone(zone)
4238 zone->min_slab_pages = (zone->present_pages *
4239 sysctl_min_slab_ratio) / 100;
4240 return 0;
4241}
9614634f
CL
4242#endif
4243
1da177e4
LT
4244/*
4245 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4246 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4247 * whenever sysctl_lowmem_reserve_ratio changes.
4248 *
4249 * The reserve ratio obviously has absolutely no relation with the
4250 * pages_min watermarks. The lowmem reserve ratio can only make sense
4251 * if in function of the boot time zone sizes.
4252 */
4253int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4254 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4255{
4256 proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4257 setup_per_zone_lowmem_reserve();
4258 return 0;
4259}
4260
8ad4b1fb
RS
4261/*
4262 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4263 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
4264 * can have before it gets flushed back to buddy allocator.
4265 */
4266
4267int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4268 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4269{
4270 struct zone *zone;
4271 unsigned int cpu;
4272 int ret;
4273
4274 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4275 if (!write || (ret == -EINVAL))
4276 return ret;
4277 for_each_zone(zone) {
4278 for_each_online_cpu(cpu) {
4279 unsigned long high;
4280 high = zone->present_pages / percpu_pagelist_fraction;
4281 setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4282 }
4283 }
4284 return 0;
4285}
4286
f034b5d4 4287int hashdist = HASHDIST_DEFAULT;
1da177e4
LT
4288
4289#ifdef CONFIG_NUMA
4290static int __init set_hashdist(char *str)
4291{
4292 if (!str)
4293 return 0;
4294 hashdist = simple_strtoul(str, &str, 0);
4295 return 1;
4296}
4297__setup("hashdist=", set_hashdist);
4298#endif
4299
4300/*
4301 * allocate a large system hash table from bootmem
4302 * - it is assumed that the hash table must contain an exact power-of-2
4303 * quantity of entries
4304 * - limit is the number of hash buckets, not the total allocation size
4305 */
4306void *__init alloc_large_system_hash(const char *tablename,
4307 unsigned long bucketsize,
4308 unsigned long numentries,
4309 int scale,
4310 int flags,
4311 unsigned int *_hash_shift,
4312 unsigned int *_hash_mask,
4313 unsigned long limit)
4314{
4315 unsigned long long max = limit;
4316 unsigned long log2qty, size;
4317 void *table = NULL;
4318
4319 /* allow the kernel cmdline to have a say */
4320 if (!numentries) {
4321 /* round applicable memory size up to nearest megabyte */
04903664 4322 numentries = nr_kernel_pages;
1da177e4
LT
4323 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4324 numentries >>= 20 - PAGE_SHIFT;
4325 numentries <<= 20 - PAGE_SHIFT;
4326
4327 /* limit to 1 bucket per 2^scale bytes of low memory */
4328 if (scale > PAGE_SHIFT)
4329 numentries >>= (scale - PAGE_SHIFT);
4330 else
4331 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
4332
4333 /* Make sure we've got at least a 0-order allocation.. */
4334 if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4335 numentries = PAGE_SIZE / bucketsize;
1da177e4 4336 }
6e692ed3 4337 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
4338
4339 /* limit allocation size to 1/16 total memory by default */
4340 if (max == 0) {
4341 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4342 do_div(max, bucketsize);
4343 }
4344
4345 if (numentries > max)
4346 numentries = max;
4347
f0d1b0b3 4348 log2qty = ilog2(numentries);
1da177e4
LT
4349
4350 do {
4351 size = bucketsize << log2qty;
4352 if (flags & HASH_EARLY)
4353 table = alloc_bootmem(size);
4354 else if (hashdist)
4355 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4356 else {
2309f9e6 4357 unsigned long order = get_order(size);
1da177e4 4358 table = (void*) __get_free_pages(GFP_ATOMIC, order);
1037b83b
ED
4359 /*
4360 * If bucketsize is not a power-of-two, we may free
4361 * some pages at the end of hash table.
4362 */
4363 if (table) {
4364 unsigned long alloc_end = (unsigned long)table +
4365 (PAGE_SIZE << order);
4366 unsigned long used = (unsigned long)table +
4367 PAGE_ALIGN(size);
4368 split_page(virt_to_page(table), order);
4369 while (used < alloc_end) {
4370 free_page(used);
4371 used += PAGE_SIZE;
4372 }
4373 }
1da177e4
LT
4374 }
4375 } while (!table && size > PAGE_SIZE && --log2qty);
4376
4377 if (!table)
4378 panic("Failed to allocate %s hash table\n", tablename);
4379
b49ad484 4380 printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
1da177e4
LT
4381 tablename,
4382 (1U << log2qty),
f0d1b0b3 4383 ilog2(size) - PAGE_SHIFT,
1da177e4
LT
4384 size);
4385
4386 if (_hash_shift)
4387 *_hash_shift = log2qty;
4388 if (_hash_mask)
4389 *_hash_mask = (1 << log2qty) - 1;
4390
4391 return table;
4392}
a117e66e
KH
4393
4394#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
a117e66e
KH
4395struct page *pfn_to_page(unsigned long pfn)
4396{
67de6482 4397 return __pfn_to_page(pfn);
a117e66e
KH
4398}
4399unsigned long page_to_pfn(struct page *page)
4400{
67de6482 4401 return __page_to_pfn(page);
a117e66e 4402}
a117e66e
KH
4403EXPORT_SYMBOL(pfn_to_page);
4404EXPORT_SYMBOL(page_to_pfn);
4405#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
6220ec78 4406
835c134e
MG
4407/* Return a pointer to the bitmap storing bits affecting a block of pages */
4408static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4409 unsigned long pfn)
4410{
4411#ifdef CONFIG_SPARSEMEM
4412 return __pfn_to_section(pfn)->pageblock_flags;
4413#else
4414 return zone->pageblock_flags;
4415#endif /* CONFIG_SPARSEMEM */
4416}
4417
4418static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4419{
4420#ifdef CONFIG_SPARSEMEM
4421 pfn &= (PAGES_PER_SECTION-1);
d9c23400 4422 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
4423#else
4424 pfn = pfn - zone->zone_start_pfn;
d9c23400 4425 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
4426#endif /* CONFIG_SPARSEMEM */
4427}
4428
4429/**
d9c23400 4430 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
835c134e
MG
4431 * @page: The page within the block of interest
4432 * @start_bitidx: The first bit of interest to retrieve
4433 * @end_bitidx: The last bit of interest
4434 * returns pageblock_bits flags
4435 */
4436unsigned long get_pageblock_flags_group(struct page *page,
4437 int start_bitidx, int end_bitidx)
4438{
4439 struct zone *zone;
4440 unsigned long *bitmap;
4441 unsigned long pfn, bitidx;
4442 unsigned long flags = 0;
4443 unsigned long value = 1;
4444
4445 zone = page_zone(page);
4446 pfn = page_to_pfn(page);
4447 bitmap = get_pageblock_bitmap(zone, pfn);
4448 bitidx = pfn_to_bitidx(zone, pfn);
4449
4450 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4451 if (test_bit(bitidx + start_bitidx, bitmap))
4452 flags |= value;
6220ec78 4453
835c134e
MG
4454 return flags;
4455}
4456
4457/**
d9c23400 4458 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
835c134e
MG
4459 * @page: The page within the block of interest
4460 * @start_bitidx: The first bit of interest
4461 * @end_bitidx: The last bit of interest
4462 * @flags: The flags to set
4463 */
4464void set_pageblock_flags_group(struct page *page, unsigned long flags,
4465 int start_bitidx, int end_bitidx)
4466{
4467 struct zone *zone;
4468 unsigned long *bitmap;
4469 unsigned long pfn, bitidx;
4470 unsigned long value = 1;
4471
4472 zone = page_zone(page);
4473 pfn = page_to_pfn(page);
4474 bitmap = get_pageblock_bitmap(zone, pfn);
4475 bitidx = pfn_to_bitidx(zone, pfn);
86051ca5
KH
4476 VM_BUG_ON(pfn < zone->zone_start_pfn);
4477 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
835c134e
MG
4478
4479 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4480 if (flags & value)
4481 __set_bit(bitidx + start_bitidx, bitmap);
4482 else
4483 __clear_bit(bitidx + start_bitidx, bitmap);
4484}
a5d76b54
KH
4485
4486/*
4487 * This is designed as sub function...plz see page_isolation.c also.
4488 * set/clear page block's type to be ISOLATE.
4489 * page allocater never alloc memory from ISOLATE block.
4490 */
4491
4492int set_migratetype_isolate(struct page *page)
4493{
4494 struct zone *zone;
4495 unsigned long flags;
4496 int ret = -EBUSY;
4497
4498 zone = page_zone(page);
4499 spin_lock_irqsave(&zone->lock, flags);
4500 /*
4501 * In future, more migrate types will be able to be isolation target.
4502 */
4503 if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
4504 goto out;
4505 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
4506 move_freepages_block(zone, page, MIGRATE_ISOLATE);
4507 ret = 0;
4508out:
4509 spin_unlock_irqrestore(&zone->lock, flags);
4510 if (!ret)
9f8f2172 4511 drain_all_pages();
a5d76b54
KH
4512 return ret;
4513}
4514
4515void unset_migratetype_isolate(struct page *page)
4516{
4517 struct zone *zone;
4518 unsigned long flags;
4519 zone = page_zone(page);
4520 spin_lock_irqsave(&zone->lock, flags);
4521 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
4522 goto out;
4523 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4524 move_freepages_block(zone, page, MIGRATE_MOVABLE);
4525out:
4526 spin_unlock_irqrestore(&zone->lock, flags);
4527}
0c0e6195
KH
4528
4529#ifdef CONFIG_MEMORY_HOTREMOVE
4530/*
4531 * All pages in the range must be isolated before calling this.
4532 */
4533void
4534__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
4535{
4536 struct page *page;
4537 struct zone *zone;
4538 int order, i;
4539 unsigned long pfn;
4540 unsigned long flags;
4541 /* find the first valid pfn */
4542 for (pfn = start_pfn; pfn < end_pfn; pfn++)
4543 if (pfn_valid(pfn))
4544 break;
4545 if (pfn == end_pfn)
4546 return;
4547 zone = page_zone(pfn_to_page(pfn));
4548 spin_lock_irqsave(&zone->lock, flags);
4549 pfn = start_pfn;
4550 while (pfn < end_pfn) {
4551 if (!pfn_valid(pfn)) {
4552 pfn++;
4553 continue;
4554 }
4555 page = pfn_to_page(pfn);
4556 BUG_ON(page_count(page));
4557 BUG_ON(!PageBuddy(page));
4558 order = page_order(page);
4559#ifdef CONFIG_DEBUG_VM
4560 printk(KERN_INFO "remove from free list %lx %d %lx\n",
4561 pfn, 1 << order, end_pfn);
4562#endif
4563 list_del(&page->lru);
4564 rmv_page_order(page);
4565 zone->free_area[order].nr_free--;
4566 __mod_zone_page_state(zone, NR_FREE_PAGES,
4567 - (1UL << order));
4568 for (i = 0; i < (1 << order); i++)
4569 SetPageReserved((page+i));
4570 pfn += (1 << order);
4571 }
4572 spin_unlock_irqrestore(&zone->lock, flags);
4573}
4574#endif