mm: remember what the preferred zone is for zone_statistics
[linux-2.6-block.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
10ed273f 22#include <linux/jiffies.h>
1da177e4
LT
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
9f158333 25#include <linux/kernel.h>
1da177e4
LT
26#include <linux/module.h>
27#include <linux/suspend.h>
28#include <linux/pagevec.h>
29#include <linux/blkdev.h>
30#include <linux/slab.h>
5a3135c2 31#include <linux/oom.h>
1da177e4
LT
32#include <linux/notifier.h>
33#include <linux/topology.h>
34#include <linux/sysctl.h>
35#include <linux/cpu.h>
36#include <linux/cpuset.h>
bdc8cb98 37#include <linux/memory_hotplug.h>
1da177e4
LT
38#include <linux/nodemask.h>
39#include <linux/vmalloc.h>
4be38e35 40#include <linux/mempolicy.h>
6811378e 41#include <linux/stop_machine.h>
c713216d
MG
42#include <linux/sort.h>
43#include <linux/pfn.h>
3fcfab16 44#include <linux/backing-dev.h>
933e312e 45#include <linux/fault-inject.h>
a5d76b54 46#include <linux/page-isolation.h>
8a9f3ccd 47#include <linux/memcontrol.h>
1da177e4
LT
48
49#include <asm/tlbflush.h>
ac924c60 50#include <asm/div64.h>
1da177e4
LT
51#include "internal.h"
52
53/*
13808910 54 * Array of node states.
1da177e4 55 */
13808910
CL
56nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
57 [N_POSSIBLE] = NODE_MASK_ALL,
58 [N_ONLINE] = { { [0] = 1UL } },
59#ifndef CONFIG_NUMA
60 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
61#ifdef CONFIG_HIGHMEM
62 [N_HIGH_MEMORY] = { { [0] = 1UL } },
63#endif
64 [N_CPU] = { { [0] = 1UL } },
65#endif /* NUMA */
66};
67EXPORT_SYMBOL(node_states);
68
6c231b7b 69unsigned long totalram_pages __read_mostly;
cb45b0e9 70unsigned long totalreserve_pages __read_mostly;
1da177e4 71long nr_swap_pages;
8ad4b1fb 72int percpu_pagelist_fraction;
1da177e4 73
d9c23400
MG
74#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
75int pageblock_order __read_mostly;
76#endif
77
d98c7a09 78static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 79
1da177e4
LT
80/*
81 * results with 256, 32 in the lowmem_reserve sysctl:
82 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
83 * 1G machine -> (16M dma, 784M normal, 224M high)
84 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
85 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
86 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
87 *
88 * TBD: should special case ZONE_DMA32 machines here - in those we normally
89 * don't need any ZONE_NORMAL reservation
1da177e4 90 */
2f1b6248 91int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
4b51d669 92#ifdef CONFIG_ZONE_DMA
2f1b6248 93 256,
4b51d669 94#endif
fb0e7942 95#ifdef CONFIG_ZONE_DMA32
2f1b6248 96 256,
fb0e7942 97#endif
e53ef38d 98#ifdef CONFIG_HIGHMEM
2a1e274a 99 32,
e53ef38d 100#endif
2a1e274a 101 32,
2f1b6248 102};
1da177e4
LT
103
104EXPORT_SYMBOL(totalram_pages);
1da177e4 105
15ad7cdc 106static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 107#ifdef CONFIG_ZONE_DMA
2f1b6248 108 "DMA",
4b51d669 109#endif
fb0e7942 110#ifdef CONFIG_ZONE_DMA32
2f1b6248 111 "DMA32",
fb0e7942 112#endif
2f1b6248 113 "Normal",
e53ef38d 114#ifdef CONFIG_HIGHMEM
2a1e274a 115 "HighMem",
e53ef38d 116#endif
2a1e274a 117 "Movable",
2f1b6248
CL
118};
119
1da177e4
LT
120int min_free_kbytes = 1024;
121
86356ab1
YG
122unsigned long __meminitdata nr_kernel_pages;
123unsigned long __meminitdata nr_all_pages;
a3142c8e 124static unsigned long __meminitdata dma_reserve;
1da177e4 125
c713216d
MG
126#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
127 /*
183ff22b 128 * MAX_ACTIVE_REGIONS determines the maximum number of distinct
c713216d
MG
129 * ranges of memory (RAM) that may be registered with add_active_range().
130 * Ranges passed to add_active_range() will be merged if possible
131 * so the number of times add_active_range() can be called is
132 * related to the number of nodes and the number of holes
133 */
134 #ifdef CONFIG_MAX_ACTIVE_REGIONS
135 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
136 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
137 #else
138 #if MAX_NUMNODES >= 32
139 /* If there can be many nodes, allow up to 50 holes per node */
140 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
141 #else
142 /* By default, allow up to 256 distinct regions */
143 #define MAX_ACTIVE_REGIONS 256
144 #endif
145 #endif
146
98011f56
JB
147 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
148 static int __meminitdata nr_nodemap_entries;
149 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
150 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
fb01439c 151#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
98011f56
JB
152 static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
153 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
fb01439c 154#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
2a1e274a 155 unsigned long __initdata required_kernelcore;
484f51f8 156 static unsigned long __initdata required_movablecore;
e228929b 157 unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
2a1e274a
MG
158
159 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
160 int movable_zone;
161 EXPORT_SYMBOL(movable_zone);
c713216d
MG
162#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
163
418508c1
MS
164#if MAX_NUMNODES > 1
165int nr_node_ids __read_mostly = MAX_NUMNODES;
166EXPORT_SYMBOL(nr_node_ids);
167#endif
168
9ef9acb0
MG
169int page_group_by_mobility_disabled __read_mostly;
170
b2a0ac88
MG
171static void set_pageblock_migratetype(struct page *page, int migratetype)
172{
173 set_pageblock_flags_group(page, (unsigned long)migratetype,
174 PB_migrate, PB_migrate_end);
175}
176
13e7444b 177#ifdef CONFIG_DEBUG_VM
c6a57e19 178static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 179{
bdc8cb98
DH
180 int ret = 0;
181 unsigned seq;
182 unsigned long pfn = page_to_pfn(page);
c6a57e19 183
bdc8cb98
DH
184 do {
185 seq = zone_span_seqbegin(zone);
186 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
187 ret = 1;
188 else if (pfn < zone->zone_start_pfn)
189 ret = 1;
190 } while (zone_span_seqretry(zone, seq));
191
192 return ret;
c6a57e19
DH
193}
194
195static int page_is_consistent(struct zone *zone, struct page *page)
196{
14e07298 197 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 198 return 0;
1da177e4 199 if (zone != page_zone(page))
c6a57e19
DH
200 return 0;
201
202 return 1;
203}
204/*
205 * Temporary debugging check for pages not lying within a given zone.
206 */
207static int bad_range(struct zone *zone, struct page *page)
208{
209 if (page_outside_zone_boundaries(zone, page))
1da177e4 210 return 1;
c6a57e19
DH
211 if (!page_is_consistent(zone, page))
212 return 1;
213
1da177e4
LT
214 return 0;
215}
13e7444b
NP
216#else
217static inline int bad_range(struct zone *zone, struct page *page)
218{
219 return 0;
220}
221#endif
222
224abf92 223static void bad_page(struct page *page)
1da177e4 224{
9442ec9d
HD
225 void *pc = page_get_page_cgroup(page);
226
227 printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG
228 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
224abf92
NP
229 current->comm, page, (int)(2*sizeof(unsigned long)),
230 (unsigned long)page->flags, page->mapping,
231 page_mapcount(page), page_count(page));
9442ec9d
HD
232 if (pc) {
233 printk(KERN_EMERG "cgroup:%p\n", pc);
234 page_reset_bad_cgroup(page);
235 }
236 printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
237 KERN_EMERG "Backtrace:\n");
1da177e4 238 dump_stack();
334795ec
HD
239 page->flags &= ~(1 << PG_lru |
240 1 << PG_private |
1da177e4 241 1 << PG_locked |
1da177e4
LT
242 1 << PG_active |
243 1 << PG_dirty |
334795ec
HD
244 1 << PG_reclaim |
245 1 << PG_slab |
1da177e4 246 1 << PG_swapcache |
676165a8
NP
247 1 << PG_writeback |
248 1 << PG_buddy );
1da177e4
LT
249 set_page_count(page, 0);
250 reset_page_mapcount(page);
251 page->mapping = NULL;
9f158333 252 add_taint(TAINT_BAD_PAGE);
1da177e4
LT
253}
254
1da177e4
LT
255/*
256 * Higher-order pages are called "compound pages". They are structured thusly:
257 *
258 * The first PAGE_SIZE page is called the "head page".
259 *
260 * The remaining PAGE_SIZE pages are called "tail pages".
261 *
262 * All pages have PG_compound set. All pages have their ->private pointing at
263 * the head page (even the head page has this).
264 *
41d78ba5
HD
265 * The first tail page's ->lru.next holds the address of the compound page's
266 * put_page() function. Its ->lru.prev holds the order of allocation.
267 * This usage means that zero-order pages may not be compound.
1da177e4 268 */
d98c7a09
HD
269
270static void free_compound_page(struct page *page)
271{
d85f3385 272 __free_pages_ok(page, compound_order(page));
d98c7a09
HD
273}
274
1da177e4
LT
275static void prep_compound_page(struct page *page, unsigned long order)
276{
277 int i;
278 int nr_pages = 1 << order;
279
33f2ef89 280 set_compound_page_dtor(page, free_compound_page);
d85f3385 281 set_compound_order(page, order);
6d777953 282 __SetPageHead(page);
d85f3385 283 for (i = 1; i < nr_pages; i++) {
1da177e4
LT
284 struct page *p = page + i;
285
d85f3385 286 __SetPageTail(p);
d85f3385 287 p->first_page = page;
1da177e4
LT
288 }
289}
290
291static void destroy_compound_page(struct page *page, unsigned long order)
292{
293 int i;
294 int nr_pages = 1 << order;
295
d85f3385 296 if (unlikely(compound_order(page) != order))
224abf92 297 bad_page(page);
1da177e4 298
6d777953 299 if (unlikely(!PageHead(page)))
d85f3385 300 bad_page(page);
6d777953 301 __ClearPageHead(page);
d85f3385 302 for (i = 1; i < nr_pages; i++) {
1da177e4
LT
303 struct page *p = page + i;
304
6d777953 305 if (unlikely(!PageTail(p) |
d85f3385 306 (p->first_page != page)))
224abf92 307 bad_page(page);
d85f3385 308 __ClearPageTail(p);
1da177e4
LT
309 }
310}
1da177e4 311
17cf4406
NP
312static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
313{
314 int i;
315
6626c5d5
AM
316 /*
317 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
318 * and __GFP_HIGHMEM from hard or soft interrupt context.
319 */
725d704e 320 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
17cf4406
NP
321 for (i = 0; i < (1 << order); i++)
322 clear_highpage(page + i);
323}
324
6aa3001b
AM
325static inline void set_page_order(struct page *page, int order)
326{
4c21e2f2 327 set_page_private(page, order);
676165a8 328 __SetPageBuddy(page);
1da177e4
LT
329}
330
331static inline void rmv_page_order(struct page *page)
332{
676165a8 333 __ClearPageBuddy(page);
4c21e2f2 334 set_page_private(page, 0);
1da177e4
LT
335}
336
337/*
338 * Locate the struct page for both the matching buddy in our
339 * pair (buddy1) and the combined O(n+1) page they form (page).
340 *
341 * 1) Any buddy B1 will have an order O twin B2 which satisfies
342 * the following equation:
343 * B2 = B1 ^ (1 << O)
344 * For example, if the starting buddy (buddy2) is #8 its order
345 * 1 buddy is #10:
346 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
347 *
348 * 2) Any buddy B will have an order O+1 parent P which
349 * satisfies the following equation:
350 * P = B & ~(1 << O)
351 *
d6e05edc 352 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
1da177e4
LT
353 */
354static inline struct page *
355__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
356{
357 unsigned long buddy_idx = page_idx ^ (1 << order);
358
359 return page + (buddy_idx - page_idx);
360}
361
362static inline unsigned long
363__find_combined_index(unsigned long page_idx, unsigned int order)
364{
365 return (page_idx & ~(1 << order));
366}
367
368/*
369 * This function checks whether a page is free && is the buddy
370 * we can do coalesce a page and its buddy if
13e7444b 371 * (a) the buddy is not in a hole &&
676165a8 372 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
373 * (c) a page and its buddy have the same order &&
374 * (d) a page and its buddy are in the same zone.
676165a8
NP
375 *
376 * For recording whether a page is in the buddy system, we use PG_buddy.
377 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
1da177e4 378 *
676165a8 379 * For recording page's order, we use page_private(page).
1da177e4 380 */
cb2b95e1
AW
381static inline int page_is_buddy(struct page *page, struct page *buddy,
382 int order)
1da177e4 383{
14e07298 384 if (!pfn_valid_within(page_to_pfn(buddy)))
13e7444b 385 return 0;
13e7444b 386
cb2b95e1
AW
387 if (page_zone_id(page) != page_zone_id(buddy))
388 return 0;
389
390 if (PageBuddy(buddy) && page_order(buddy) == order) {
391 BUG_ON(page_count(buddy) != 0);
6aa3001b 392 return 1;
676165a8 393 }
6aa3001b 394 return 0;
1da177e4
LT
395}
396
397/*
398 * Freeing function for a buddy system allocator.
399 *
400 * The concept of a buddy system is to maintain direct-mapped table
401 * (containing bit values) for memory blocks of various "orders".
402 * The bottom level table contains the map for the smallest allocatable
403 * units of memory (here, pages), and each level above it describes
404 * pairs of units from the levels below, hence, "buddies".
405 * At a high level, all that happens here is marking the table entry
406 * at the bottom level available, and propagating the changes upward
407 * as necessary, plus some accounting needed to play nicely with other
408 * parts of the VM system.
409 * At each level, we keep a list of pages, which are heads of continuous
676165a8 410 * free pages of length of (1 << order) and marked with PG_buddy. Page's
4c21e2f2 411 * order is recorded in page_private(page) field.
1da177e4
LT
412 * So when we are allocating or freeing one, we can derive the state of the
413 * other. That is, if we allocate a small block, and both were
414 * free, the remainder of the region must be split into blocks.
415 * If a block is freed, and its buddy is also free, then this
416 * triggers coalescing into a block of larger size.
417 *
418 * -- wli
419 */
420
48db57f8 421static inline void __free_one_page(struct page *page,
1da177e4
LT
422 struct zone *zone, unsigned int order)
423{
424 unsigned long page_idx;
425 int order_size = 1 << order;
b2a0ac88 426 int migratetype = get_pageblock_migratetype(page);
1da177e4 427
224abf92 428 if (unlikely(PageCompound(page)))
1da177e4
LT
429 destroy_compound_page(page, order);
430
431 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
432
725d704e
NP
433 VM_BUG_ON(page_idx & (order_size - 1));
434 VM_BUG_ON(bad_range(zone, page));
1da177e4 435
d23ad423 436 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
1da177e4
LT
437 while (order < MAX_ORDER-1) {
438 unsigned long combined_idx;
1da177e4
LT
439 struct page *buddy;
440
1da177e4 441 buddy = __page_find_buddy(page, page_idx, order);
cb2b95e1 442 if (!page_is_buddy(page, buddy, order))
1da177e4 443 break; /* Move the buddy up one level. */
13e7444b 444
1da177e4 445 list_del(&buddy->lru);
b2a0ac88 446 zone->free_area[order].nr_free--;
1da177e4 447 rmv_page_order(buddy);
13e7444b 448 combined_idx = __find_combined_index(page_idx, order);
1da177e4
LT
449 page = page + (combined_idx - page_idx);
450 page_idx = combined_idx;
451 order++;
452 }
453 set_page_order(page, order);
b2a0ac88
MG
454 list_add(&page->lru,
455 &zone->free_area[order].free_list[migratetype]);
1da177e4
LT
456 zone->free_area[order].nr_free++;
457}
458
224abf92 459static inline int free_pages_check(struct page *page)
1da177e4 460{
92be2e33
NP
461 if (unlikely(page_mapcount(page) |
462 (page->mapping != NULL) |
9442ec9d 463 (page_get_page_cgroup(page) != NULL) |
92be2e33 464 (page_count(page) != 0) |
1da177e4
LT
465 (page->flags & (
466 1 << PG_lru |
467 1 << PG_private |
468 1 << PG_locked |
469 1 << PG_active |
1da177e4
LT
470 1 << PG_slab |
471 1 << PG_swapcache |
b5810039 472 1 << PG_writeback |
676165a8
NP
473 1 << PG_reserved |
474 1 << PG_buddy ))))
224abf92 475 bad_page(page);
1da177e4 476 if (PageDirty(page))
242e5468 477 __ClearPageDirty(page);
689bcebf
HD
478 /*
479 * For now, we report if PG_reserved was found set, but do not
480 * clear it, and do not free the page. But we shall soon need
481 * to do more, for when the ZERO_PAGE count wraps negative.
482 */
483 return PageReserved(page);
1da177e4
LT
484}
485
486/*
487 * Frees a list of pages.
488 * Assumes all pages on list are in same zone, and of same order.
207f36ee 489 * count is the number of pages to free.
1da177e4
LT
490 *
491 * If the zone was previously in an "all pages pinned" state then look to
492 * see if this freeing clears that state.
493 *
494 * And clear the zone's pages_scanned counter, to hold off the "all pages are
495 * pinned" detection logic.
496 */
48db57f8
NP
497static void free_pages_bulk(struct zone *zone, int count,
498 struct list_head *list, int order)
1da177e4 499{
c54ad30c 500 spin_lock(&zone->lock);
e815af95 501 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
1da177e4 502 zone->pages_scanned = 0;
48db57f8
NP
503 while (count--) {
504 struct page *page;
505
725d704e 506 VM_BUG_ON(list_empty(list));
1da177e4 507 page = list_entry(list->prev, struct page, lru);
48db57f8 508 /* have to delete it as __free_one_page list manipulates */
1da177e4 509 list_del(&page->lru);
48db57f8 510 __free_one_page(page, zone, order);
1da177e4 511 }
c54ad30c 512 spin_unlock(&zone->lock);
1da177e4
LT
513}
514
48db57f8 515static void free_one_page(struct zone *zone, struct page *page, int order)
1da177e4 516{
006d22d9 517 spin_lock(&zone->lock);
e815af95 518 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
006d22d9 519 zone->pages_scanned = 0;
0798e519 520 __free_one_page(page, zone, order);
006d22d9 521 spin_unlock(&zone->lock);
48db57f8
NP
522}
523
524static void __free_pages_ok(struct page *page, unsigned int order)
525{
526 unsigned long flags;
1da177e4 527 int i;
689bcebf 528 int reserved = 0;
1da177e4 529
1da177e4 530 for (i = 0 ; i < (1 << order) ; ++i)
224abf92 531 reserved += free_pages_check(page + i);
689bcebf
HD
532 if (reserved)
533 return;
534
9858db50
NP
535 if (!PageHighMem(page))
536 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
dafb1367 537 arch_free_page(page, order);
48db57f8 538 kernel_map_pages(page, 1 << order, 0);
dafb1367 539
c54ad30c 540 local_irq_save(flags);
f8891e5e 541 __count_vm_events(PGFREE, 1 << order);
48db57f8 542 free_one_page(page_zone(page), page, order);
c54ad30c 543 local_irq_restore(flags);
1da177e4
LT
544}
545
a226f6c8
DH
546/*
547 * permit the bootmem allocator to evade page validation on high-order frees
548 */
920c7a5d 549void __init __free_pages_bootmem(struct page *page, unsigned int order)
a226f6c8
DH
550{
551 if (order == 0) {
552 __ClearPageReserved(page);
553 set_page_count(page, 0);
7835e98b 554 set_page_refcounted(page);
545b1ea9 555 __free_page(page);
a226f6c8 556 } else {
a226f6c8
DH
557 int loop;
558
545b1ea9 559 prefetchw(page);
a226f6c8
DH
560 for (loop = 0; loop < BITS_PER_LONG; loop++) {
561 struct page *p = &page[loop];
562
545b1ea9
NP
563 if (loop + 1 < BITS_PER_LONG)
564 prefetchw(p + 1);
a226f6c8
DH
565 __ClearPageReserved(p);
566 set_page_count(p, 0);
567 }
568
7835e98b 569 set_page_refcounted(page);
545b1ea9 570 __free_pages(page, order);
a226f6c8
DH
571 }
572}
573
1da177e4
LT
574
575/*
576 * The order of subdivision here is critical for the IO subsystem.
577 * Please do not alter this order without good reasons and regression
578 * testing. Specifically, as large blocks of memory are subdivided,
579 * the order in which smaller blocks are delivered depends on the order
580 * they're subdivided in this function. This is the primary factor
581 * influencing the order in which pages are delivered to the IO
582 * subsystem according to empirical testing, and this is also justified
583 * by considering the behavior of a buddy system containing a single
584 * large block of memory acted on by a series of small allocations.
585 * This behavior is a critical factor in sglist merging's success.
586 *
587 * -- wli
588 */
085cc7d5 589static inline void expand(struct zone *zone, struct page *page,
b2a0ac88
MG
590 int low, int high, struct free_area *area,
591 int migratetype)
1da177e4
LT
592{
593 unsigned long size = 1 << high;
594
595 while (high > low) {
596 area--;
597 high--;
598 size >>= 1;
725d704e 599 VM_BUG_ON(bad_range(zone, &page[size]));
b2a0ac88 600 list_add(&page[size].lru, &area->free_list[migratetype]);
1da177e4
LT
601 area->nr_free++;
602 set_page_order(&page[size], high);
603 }
1da177e4
LT
604}
605
1da177e4
LT
606/*
607 * This page is about to be returned from the page allocator
608 */
17cf4406 609static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
1da177e4 610{
92be2e33
NP
611 if (unlikely(page_mapcount(page) |
612 (page->mapping != NULL) |
9442ec9d 613 (page_get_page_cgroup(page) != NULL) |
92be2e33 614 (page_count(page) != 0) |
334795ec
HD
615 (page->flags & (
616 1 << PG_lru |
1da177e4
LT
617 1 << PG_private |
618 1 << PG_locked |
1da177e4
LT
619 1 << PG_active |
620 1 << PG_dirty |
334795ec 621 1 << PG_slab |
1da177e4 622 1 << PG_swapcache |
b5810039 623 1 << PG_writeback |
676165a8
NP
624 1 << PG_reserved |
625 1 << PG_buddy ))))
224abf92 626 bad_page(page);
1da177e4 627
689bcebf
HD
628 /*
629 * For now, we report if PG_reserved was found set, but do not
630 * clear it, and do not allocate the page: as a safety net.
631 */
632 if (PageReserved(page))
633 return 1;
634
d77c2d7c 635 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead |
1da177e4 636 1 << PG_referenced | 1 << PG_arch_1 |
5409bae0 637 1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
4c21e2f2 638 set_page_private(page, 0);
7835e98b 639 set_page_refcounted(page);
cc102509
NP
640
641 arch_alloc_page(page, order);
1da177e4 642 kernel_map_pages(page, 1 << order, 1);
17cf4406
NP
643
644 if (gfp_flags & __GFP_ZERO)
645 prep_zero_page(page, order, gfp_flags);
646
647 if (order && (gfp_flags & __GFP_COMP))
648 prep_compound_page(page, order);
649
689bcebf 650 return 0;
1da177e4
LT
651}
652
56fd56b8
MG
653/*
654 * Go through the free lists for the given migratetype and remove
655 * the smallest available page from the freelists
656 */
657static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
658 int migratetype)
659{
660 unsigned int current_order;
661 struct free_area * area;
662 struct page *page;
663
664 /* Find a page of the appropriate size in the preferred list */
665 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
666 area = &(zone->free_area[current_order]);
667 if (list_empty(&area->free_list[migratetype]))
668 continue;
669
670 page = list_entry(area->free_list[migratetype].next,
671 struct page, lru);
672 list_del(&page->lru);
673 rmv_page_order(page);
674 area->nr_free--;
675 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
676 expand(zone, page, order, current_order, area, migratetype);
677 return page;
678 }
679
680 return NULL;
681}
682
683
b2a0ac88
MG
684/*
685 * This array describes the order lists are fallen back to when
686 * the free lists for the desirable migrate type are depleted
687 */
688static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
64c5e135
MG
689 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
690 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
691 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
692 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
b2a0ac88
MG
693};
694
c361be55
MG
695/*
696 * Move the free pages in a range to the free lists of the requested type.
d9c23400 697 * Note that start_page and end_pages are not aligned on a pageblock
c361be55
MG
698 * boundary. If alignment is required, use move_freepages_block()
699 */
700int move_freepages(struct zone *zone,
701 struct page *start_page, struct page *end_page,
702 int migratetype)
703{
704 struct page *page;
705 unsigned long order;
d100313f 706 int pages_moved = 0;
c361be55
MG
707
708#ifndef CONFIG_HOLES_IN_ZONE
709 /*
710 * page_zone is not safe to call in this context when
711 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
712 * anyway as we check zone boundaries in move_freepages_block().
713 * Remove at a later date when no bug reports exist related to
ac0e5b7a 714 * grouping pages by mobility
c361be55
MG
715 */
716 BUG_ON(page_zone(start_page) != page_zone(end_page));
717#endif
718
719 for (page = start_page; page <= end_page;) {
720 if (!pfn_valid_within(page_to_pfn(page))) {
721 page++;
722 continue;
723 }
724
725 if (!PageBuddy(page)) {
726 page++;
727 continue;
728 }
729
730 order = page_order(page);
731 list_del(&page->lru);
732 list_add(&page->lru,
733 &zone->free_area[order].free_list[migratetype]);
734 page += 1 << order;
d100313f 735 pages_moved += 1 << order;
c361be55
MG
736 }
737
d100313f 738 return pages_moved;
c361be55
MG
739}
740
741int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
742{
743 unsigned long start_pfn, end_pfn;
744 struct page *start_page, *end_page;
745
746 start_pfn = page_to_pfn(page);
d9c23400 747 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
c361be55 748 start_page = pfn_to_page(start_pfn);
d9c23400
MG
749 end_page = start_page + pageblock_nr_pages - 1;
750 end_pfn = start_pfn + pageblock_nr_pages - 1;
c361be55
MG
751
752 /* Do not cross zone boundaries */
753 if (start_pfn < zone->zone_start_pfn)
754 start_page = page;
755 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
756 return 0;
757
758 return move_freepages(zone, start_page, end_page, migratetype);
759}
760
b2a0ac88
MG
761/* Remove an element from the buddy allocator from the fallback list */
762static struct page *__rmqueue_fallback(struct zone *zone, int order,
763 int start_migratetype)
764{
765 struct free_area * area;
766 int current_order;
767 struct page *page;
768 int migratetype, i;
769
770 /* Find the largest possible block of pages in the other list */
771 for (current_order = MAX_ORDER-1; current_order >= order;
772 --current_order) {
773 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
774 migratetype = fallbacks[start_migratetype][i];
775
56fd56b8
MG
776 /* MIGRATE_RESERVE handled later if necessary */
777 if (migratetype == MIGRATE_RESERVE)
778 continue;
e010487d 779
b2a0ac88
MG
780 area = &(zone->free_area[current_order]);
781 if (list_empty(&area->free_list[migratetype]))
782 continue;
783
784 page = list_entry(area->free_list[migratetype].next,
785 struct page, lru);
786 area->nr_free--;
787
788 /*
c361be55 789 * If breaking a large block of pages, move all free
46dafbca
MG
790 * pages to the preferred allocation list. If falling
791 * back for a reclaimable kernel allocation, be more
792 * agressive about taking ownership of free pages
b2a0ac88 793 */
d9c23400 794 if (unlikely(current_order >= (pageblock_order >> 1)) ||
46dafbca
MG
795 start_migratetype == MIGRATE_RECLAIMABLE) {
796 unsigned long pages;
797 pages = move_freepages_block(zone, page,
798 start_migratetype);
799
800 /* Claim the whole block if over half of it is free */
d9c23400 801 if (pages >= (1 << (pageblock_order-1)))
46dafbca
MG
802 set_pageblock_migratetype(page,
803 start_migratetype);
804
b2a0ac88 805 migratetype = start_migratetype;
c361be55 806 }
b2a0ac88
MG
807
808 /* Remove the page from the freelists */
809 list_del(&page->lru);
810 rmv_page_order(page);
811 __mod_zone_page_state(zone, NR_FREE_PAGES,
812 -(1UL << order));
813
d9c23400 814 if (current_order == pageblock_order)
b2a0ac88
MG
815 set_pageblock_migratetype(page,
816 start_migratetype);
817
818 expand(zone, page, order, current_order, area, migratetype);
819 return page;
820 }
821 }
822
56fd56b8
MG
823 /* Use MIGRATE_RESERVE rather than fail an allocation */
824 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
b2a0ac88
MG
825}
826
56fd56b8 827/*
1da177e4
LT
828 * Do the hard work of removing an element from the buddy allocator.
829 * Call me with the zone->lock already held.
830 */
b2a0ac88
MG
831static struct page *__rmqueue(struct zone *zone, unsigned int order,
832 int migratetype)
1da177e4 833{
1da177e4
LT
834 struct page *page;
835
56fd56b8 836 page = __rmqueue_smallest(zone, order, migratetype);
b2a0ac88 837
56fd56b8
MG
838 if (unlikely(!page))
839 page = __rmqueue_fallback(zone, order, migratetype);
b2a0ac88
MG
840
841 return page;
1da177e4
LT
842}
843
844/*
845 * Obtain a specified number of elements from the buddy allocator, all under
846 * a single hold of the lock, for efficiency. Add them to the supplied list.
847 * Returns the number of new pages which were placed at *list.
848 */
849static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88
MG
850 unsigned long count, struct list_head *list,
851 int migratetype)
1da177e4 852{
1da177e4 853 int i;
1da177e4 854
c54ad30c 855 spin_lock(&zone->lock);
1da177e4 856 for (i = 0; i < count; ++i) {
b2a0ac88 857 struct page *page = __rmqueue(zone, order, migratetype);
085cc7d5 858 if (unlikely(page == NULL))
1da177e4 859 break;
81eabcbe
MG
860
861 /*
862 * Split buddy pages returned by expand() are received here
863 * in physical page order. The page is added to the callers and
864 * list and the list head then moves forward. From the callers
865 * perspective, the linked list is ordered by page number in
866 * some conditions. This is useful for IO devices that can
867 * merge IO requests if the physical pages are ordered
868 * properly.
869 */
535131e6
MG
870 list_add(&page->lru, list);
871 set_page_private(page, migratetype);
81eabcbe 872 list = &page->lru;
1da177e4 873 }
c54ad30c 874 spin_unlock(&zone->lock);
085cc7d5 875 return i;
1da177e4
LT
876}
877
4ae7c039 878#ifdef CONFIG_NUMA
8fce4d8e 879/*
4037d452
CL
880 * Called from the vmstat counter updater to drain pagesets of this
881 * currently executing processor on remote nodes after they have
882 * expired.
883 *
879336c3
CL
884 * Note that this function must be called with the thread pinned to
885 * a single processor.
8fce4d8e 886 */
4037d452 887void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 888{
4ae7c039 889 unsigned long flags;
4037d452 890 int to_drain;
4ae7c039 891
4037d452
CL
892 local_irq_save(flags);
893 if (pcp->count >= pcp->batch)
894 to_drain = pcp->batch;
895 else
896 to_drain = pcp->count;
897 free_pages_bulk(zone, to_drain, &pcp->list, 0);
898 pcp->count -= to_drain;
899 local_irq_restore(flags);
4ae7c039
CL
900}
901#endif
902
9f8f2172
CL
903/*
904 * Drain pages of the indicated processor.
905 *
906 * The processor must either be the current processor and the
907 * thread pinned to the current processor or a processor that
908 * is not online.
909 */
910static void drain_pages(unsigned int cpu)
1da177e4 911{
c54ad30c 912 unsigned long flags;
1da177e4 913 struct zone *zone;
1da177e4
LT
914
915 for_each_zone(zone) {
916 struct per_cpu_pageset *pset;
3dfa5721 917 struct per_cpu_pages *pcp;
1da177e4 918
f2e12bb2
CL
919 if (!populated_zone(zone))
920 continue;
921
e7c8d5c9 922 pset = zone_pcp(zone, cpu);
3dfa5721
CL
923
924 pcp = &pset->pcp;
925 local_irq_save(flags);
926 free_pages_bulk(zone, pcp->count, &pcp->list, 0);
927 pcp->count = 0;
928 local_irq_restore(flags);
1da177e4
LT
929 }
930}
1da177e4 931
9f8f2172
CL
932/*
933 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
934 */
935void drain_local_pages(void *arg)
936{
937 drain_pages(smp_processor_id());
938}
939
940/*
941 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
942 */
943void drain_all_pages(void)
944{
945 on_each_cpu(drain_local_pages, NULL, 0, 1);
946}
947
296699de 948#ifdef CONFIG_HIBERNATION
1da177e4
LT
949
950void mark_free_pages(struct zone *zone)
951{
f623f0db
RW
952 unsigned long pfn, max_zone_pfn;
953 unsigned long flags;
b2a0ac88 954 int order, t;
1da177e4
LT
955 struct list_head *curr;
956
957 if (!zone->spanned_pages)
958 return;
959
960 spin_lock_irqsave(&zone->lock, flags);
f623f0db
RW
961
962 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
963 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
964 if (pfn_valid(pfn)) {
965 struct page *page = pfn_to_page(pfn);
966
7be98234
RW
967 if (!swsusp_page_is_forbidden(page))
968 swsusp_unset_page_free(page);
f623f0db 969 }
1da177e4 970
b2a0ac88
MG
971 for_each_migratetype_order(order, t) {
972 list_for_each(curr, &zone->free_area[order].free_list[t]) {
f623f0db 973 unsigned long i;
1da177e4 974
f623f0db
RW
975 pfn = page_to_pfn(list_entry(curr, struct page, lru));
976 for (i = 0; i < (1UL << order); i++)
7be98234 977 swsusp_set_page_free(pfn_to_page(pfn + i));
f623f0db 978 }
b2a0ac88 979 }
1da177e4
LT
980 spin_unlock_irqrestore(&zone->lock, flags);
981}
e2c55dc8 982#endif /* CONFIG_PM */
1da177e4 983
1da177e4
LT
984/*
985 * Free a 0-order page
986 */
920c7a5d 987static void free_hot_cold_page(struct page *page, int cold)
1da177e4
LT
988{
989 struct zone *zone = page_zone(page);
990 struct per_cpu_pages *pcp;
991 unsigned long flags;
992
1da177e4
LT
993 if (PageAnon(page))
994 page->mapping = NULL;
224abf92 995 if (free_pages_check(page))
689bcebf
HD
996 return;
997
9858db50
NP
998 if (!PageHighMem(page))
999 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
dafb1367 1000 arch_free_page(page, 0);
689bcebf
HD
1001 kernel_map_pages(page, 1, 0);
1002
3dfa5721 1003 pcp = &zone_pcp(zone, get_cpu())->pcp;
1da177e4 1004 local_irq_save(flags);
f8891e5e 1005 __count_vm_event(PGFREE);
3dfa5721
CL
1006 if (cold)
1007 list_add_tail(&page->lru, &pcp->list);
1008 else
1009 list_add(&page->lru, &pcp->list);
535131e6 1010 set_page_private(page, get_pageblock_migratetype(page));
1da177e4 1011 pcp->count++;
48db57f8
NP
1012 if (pcp->count >= pcp->high) {
1013 free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
1014 pcp->count -= pcp->batch;
1015 }
1da177e4
LT
1016 local_irq_restore(flags);
1017 put_cpu();
1018}
1019
920c7a5d 1020void free_hot_page(struct page *page)
1da177e4
LT
1021{
1022 free_hot_cold_page(page, 0);
1023}
1024
920c7a5d 1025void free_cold_page(struct page *page)
1da177e4
LT
1026{
1027 free_hot_cold_page(page, 1);
1028}
1029
8dfcc9ba
NP
1030/*
1031 * split_page takes a non-compound higher-order page, and splits it into
1032 * n (1<<order) sub-pages: page[0..n]
1033 * Each sub-page must be freed individually.
1034 *
1035 * Note: this is probably too low level an operation for use in drivers.
1036 * Please consult with lkml before using this in your driver.
1037 */
1038void split_page(struct page *page, unsigned int order)
1039{
1040 int i;
1041
725d704e
NP
1042 VM_BUG_ON(PageCompound(page));
1043 VM_BUG_ON(!page_count(page));
7835e98b
NP
1044 for (i = 1; i < (1 << order); i++)
1045 set_page_refcounted(page + i);
8dfcc9ba 1046}
8dfcc9ba 1047
1da177e4
LT
1048/*
1049 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
1050 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1051 * or two.
1052 */
18ea7e71 1053static struct page *buffered_rmqueue(struct zone *preferred_zone,
a74609fa 1054 struct zone *zone, int order, gfp_t gfp_flags)
1da177e4
LT
1055{
1056 unsigned long flags;
689bcebf 1057 struct page *page;
1da177e4 1058 int cold = !!(gfp_flags & __GFP_COLD);
a74609fa 1059 int cpu;
64c5e135 1060 int migratetype = allocflags_to_migratetype(gfp_flags);
1da177e4 1061
689bcebf 1062again:
a74609fa 1063 cpu = get_cpu();
48db57f8 1064 if (likely(order == 0)) {
1da177e4
LT
1065 struct per_cpu_pages *pcp;
1066
3dfa5721 1067 pcp = &zone_pcp(zone, cpu)->pcp;
1da177e4 1068 local_irq_save(flags);
a74609fa 1069 if (!pcp->count) {
941c7105 1070 pcp->count = rmqueue_bulk(zone, 0,
b2a0ac88 1071 pcp->batch, &pcp->list, migratetype);
a74609fa
NP
1072 if (unlikely(!pcp->count))
1073 goto failed;
1da177e4 1074 }
b92a6edd 1075
535131e6 1076 /* Find a page of the appropriate migrate type */
3dfa5721
CL
1077 if (cold) {
1078 list_for_each_entry_reverse(page, &pcp->list, lru)
1079 if (page_private(page) == migratetype)
1080 break;
1081 } else {
1082 list_for_each_entry(page, &pcp->list, lru)
1083 if (page_private(page) == migratetype)
1084 break;
1085 }
535131e6 1086
b92a6edd
MG
1087 /* Allocate more to the pcp list if necessary */
1088 if (unlikely(&page->lru == &pcp->list)) {
535131e6
MG
1089 pcp->count += rmqueue_bulk(zone, 0,
1090 pcp->batch, &pcp->list, migratetype);
1091 page = list_entry(pcp->list.next, struct page, lru);
535131e6 1092 }
b92a6edd
MG
1093
1094 list_del(&page->lru);
1095 pcp->count--;
7fb1d9fc 1096 } else {
1da177e4 1097 spin_lock_irqsave(&zone->lock, flags);
b2a0ac88 1098 page = __rmqueue(zone, order, migratetype);
a74609fa
NP
1099 spin_unlock(&zone->lock);
1100 if (!page)
1101 goto failed;
1da177e4
LT
1102 }
1103
f8891e5e 1104 __count_zone_vm_events(PGALLOC, zone, 1 << order);
18ea7e71 1105 zone_statistics(preferred_zone, zone);
a74609fa
NP
1106 local_irq_restore(flags);
1107 put_cpu();
1da177e4 1108
725d704e 1109 VM_BUG_ON(bad_range(zone, page));
17cf4406 1110 if (prep_new_page(page, order, gfp_flags))
a74609fa 1111 goto again;
1da177e4 1112 return page;
a74609fa
NP
1113
1114failed:
1115 local_irq_restore(flags);
1116 put_cpu();
1117 return NULL;
1da177e4
LT
1118}
1119
7fb1d9fc 1120#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
3148890b
NP
1121#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
1122#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
1123#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
1124#define ALLOC_HARDER 0x10 /* try to alloc harder */
1125#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
1126#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
7fb1d9fc 1127
933e312e
AM
1128#ifdef CONFIG_FAIL_PAGE_ALLOC
1129
1130static struct fail_page_alloc_attr {
1131 struct fault_attr attr;
1132
1133 u32 ignore_gfp_highmem;
1134 u32 ignore_gfp_wait;
54114994 1135 u32 min_order;
933e312e
AM
1136
1137#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1138
1139 struct dentry *ignore_gfp_highmem_file;
1140 struct dentry *ignore_gfp_wait_file;
54114994 1141 struct dentry *min_order_file;
933e312e
AM
1142
1143#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1144
1145} fail_page_alloc = {
1146 .attr = FAULT_ATTR_INITIALIZER,
6b1b60f4
DM
1147 .ignore_gfp_wait = 1,
1148 .ignore_gfp_highmem = 1,
54114994 1149 .min_order = 1,
933e312e
AM
1150};
1151
1152static int __init setup_fail_page_alloc(char *str)
1153{
1154 return setup_fault_attr(&fail_page_alloc.attr, str);
1155}
1156__setup("fail_page_alloc=", setup_fail_page_alloc);
1157
1158static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1159{
54114994
AM
1160 if (order < fail_page_alloc.min_order)
1161 return 0;
933e312e
AM
1162 if (gfp_mask & __GFP_NOFAIL)
1163 return 0;
1164 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1165 return 0;
1166 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1167 return 0;
1168
1169 return should_fail(&fail_page_alloc.attr, 1 << order);
1170}
1171
1172#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1173
1174static int __init fail_page_alloc_debugfs(void)
1175{
1176 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1177 struct dentry *dir;
1178 int err;
1179
1180 err = init_fault_attr_dentries(&fail_page_alloc.attr,
1181 "fail_page_alloc");
1182 if (err)
1183 return err;
1184 dir = fail_page_alloc.attr.dentries.dir;
1185
1186 fail_page_alloc.ignore_gfp_wait_file =
1187 debugfs_create_bool("ignore-gfp-wait", mode, dir,
1188 &fail_page_alloc.ignore_gfp_wait);
1189
1190 fail_page_alloc.ignore_gfp_highmem_file =
1191 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1192 &fail_page_alloc.ignore_gfp_highmem);
54114994
AM
1193 fail_page_alloc.min_order_file =
1194 debugfs_create_u32("min-order", mode, dir,
1195 &fail_page_alloc.min_order);
933e312e
AM
1196
1197 if (!fail_page_alloc.ignore_gfp_wait_file ||
54114994
AM
1198 !fail_page_alloc.ignore_gfp_highmem_file ||
1199 !fail_page_alloc.min_order_file) {
933e312e
AM
1200 err = -ENOMEM;
1201 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1202 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
54114994 1203 debugfs_remove(fail_page_alloc.min_order_file);
933e312e
AM
1204 cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1205 }
1206
1207 return err;
1208}
1209
1210late_initcall(fail_page_alloc_debugfs);
1211
1212#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1213
1214#else /* CONFIG_FAIL_PAGE_ALLOC */
1215
1216static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1217{
1218 return 0;
1219}
1220
1221#endif /* CONFIG_FAIL_PAGE_ALLOC */
1222
1da177e4
LT
1223/*
1224 * Return 1 if free pages are above 'mark'. This takes into account the order
1225 * of the allocation.
1226 */
1227int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
7fb1d9fc 1228 int classzone_idx, int alloc_flags)
1da177e4
LT
1229{
1230 /* free_pages my go negative - that's OK */
d23ad423
CL
1231 long min = mark;
1232 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1da177e4
LT
1233 int o;
1234
7fb1d9fc 1235 if (alloc_flags & ALLOC_HIGH)
1da177e4 1236 min -= min / 2;
7fb1d9fc 1237 if (alloc_flags & ALLOC_HARDER)
1da177e4
LT
1238 min -= min / 4;
1239
1240 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1241 return 0;
1242 for (o = 0; o < order; o++) {
1243 /* At the next order, this order's pages become unavailable */
1244 free_pages -= z->free_area[o].nr_free << o;
1245
1246 /* Require fewer higher order pages to be free */
1247 min >>= 1;
1248
1249 if (free_pages <= min)
1250 return 0;
1251 }
1252 return 1;
1253}
1254
9276b1bc
PJ
1255#ifdef CONFIG_NUMA
1256/*
1257 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1258 * skip over zones that are not allowed by the cpuset, or that have
1259 * been recently (in last second) found to be nearly full. See further
1260 * comments in mmzone.h. Reduces cache footprint of zonelist scans
183ff22b 1261 * that have to skip over a lot of full or unallowed zones.
9276b1bc
PJ
1262 *
1263 * If the zonelist cache is present in the passed in zonelist, then
1264 * returns a pointer to the allowed node mask (either the current
37b07e41 1265 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
9276b1bc
PJ
1266 *
1267 * If the zonelist cache is not available for this zonelist, does
1268 * nothing and returns NULL.
1269 *
1270 * If the fullzones BITMAP in the zonelist cache is stale (more than
1271 * a second since last zap'd) then we zap it out (clear its bits.)
1272 *
1273 * We hold off even calling zlc_setup, until after we've checked the
1274 * first zone in the zonelist, on the theory that most allocations will
1275 * be satisfied from that first zone, so best to examine that zone as
1276 * quickly as we can.
1277 */
1278static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1279{
1280 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1281 nodemask_t *allowednodes; /* zonelist_cache approximation */
1282
1283 zlc = zonelist->zlcache_ptr;
1284 if (!zlc)
1285 return NULL;
1286
10ed273f 1287 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
9276b1bc
PJ
1288 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1289 zlc->last_full_zap = jiffies;
1290 }
1291
1292 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1293 &cpuset_current_mems_allowed :
37b07e41 1294 &node_states[N_HIGH_MEMORY];
9276b1bc
PJ
1295 return allowednodes;
1296}
1297
1298/*
1299 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1300 * if it is worth looking at further for free memory:
1301 * 1) Check that the zone isn't thought to be full (doesn't have its
1302 * bit set in the zonelist_cache fullzones BITMAP).
1303 * 2) Check that the zones node (obtained from the zonelist_cache
1304 * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1305 * Return true (non-zero) if zone is worth looking at further, or
1306 * else return false (zero) if it is not.
1307 *
1308 * This check -ignores- the distinction between various watermarks,
1309 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
1310 * found to be full for any variation of these watermarks, it will
1311 * be considered full for up to one second by all requests, unless
1312 * we are so low on memory on all allowed nodes that we are forced
1313 * into the second scan of the zonelist.
1314 *
1315 * In the second scan we ignore this zonelist cache and exactly
1316 * apply the watermarks to all zones, even it is slower to do so.
1317 * We are low on memory in the second scan, and should leave no stone
1318 * unturned looking for a free page.
1319 */
1320static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
1321 nodemask_t *allowednodes)
1322{
1323 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1324 int i; /* index of *z in zonelist zones */
1325 int n; /* node that zone *z is on */
1326
1327 zlc = zonelist->zlcache_ptr;
1328 if (!zlc)
1329 return 1;
1330
1331 i = z - zonelist->zones;
1332 n = zlc->z_to_n[i];
1333
1334 /* This zone is worth trying if it is allowed but not full */
1335 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1336}
1337
1338/*
1339 * Given 'z' scanning a zonelist, set the corresponding bit in
1340 * zlc->fullzones, so that subsequent attempts to allocate a page
1341 * from that zone don't waste time re-examining it.
1342 */
1343static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
1344{
1345 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1346 int i; /* index of *z in zonelist zones */
1347
1348 zlc = zonelist->zlcache_ptr;
1349 if (!zlc)
1350 return;
1351
1352 i = z - zonelist->zones;
1353
1354 set_bit(i, zlc->fullzones);
1355}
1356
1357#else /* CONFIG_NUMA */
1358
1359static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1360{
1361 return NULL;
1362}
1363
1364static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
1365 nodemask_t *allowednodes)
1366{
1367 return 1;
1368}
1369
1370static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
1371{
1372}
1373#endif /* CONFIG_NUMA */
1374
7fb1d9fc 1375/*
0798e519 1376 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
1377 * a page.
1378 */
1379static struct page *
1380get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
1381 struct zonelist *zonelist, int alloc_flags)
753ee728 1382{
9276b1bc 1383 struct zone **z;
7fb1d9fc 1384 struct page *page = NULL;
9276b1bc 1385 int classzone_idx = zone_idx(zonelist->zones[0]);
18ea7e71 1386 struct zone *zone, *preferred_zone;
9276b1bc
PJ
1387 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1388 int zlc_active = 0; /* set if using zonelist_cache */
1389 int did_zlc_setup = 0; /* just call zlc_setup() one time */
b377fd39 1390 enum zone_type highest_zoneidx = -1; /* Gets set for policy zonelists */
7fb1d9fc 1391
9276b1bc 1392zonelist_scan:
7fb1d9fc 1393 /*
9276b1bc 1394 * Scan zonelist, looking for a zone with enough free.
7fb1d9fc
RS
1395 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1396 */
9276b1bc 1397 z = zonelist->zones;
18ea7e71 1398 preferred_zone = *z;
9276b1bc 1399
7fb1d9fc 1400 do {
b377fd39
MG
1401 /*
1402 * In NUMA, this could be a policy zonelist which contains
1403 * zones that may not be allowed by the current gfp_mask.
1404 * Check the zone is allowed by the current flags
1405 */
1406 if (unlikely(alloc_should_filter_zonelist(zonelist))) {
1407 if (highest_zoneidx == -1)
1408 highest_zoneidx = gfp_zone(gfp_mask);
1409 if (zone_idx(*z) > highest_zoneidx)
1410 continue;
1411 }
1412
9276b1bc
PJ
1413 if (NUMA_BUILD && zlc_active &&
1414 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1415 continue;
1192d526 1416 zone = *z;
7fb1d9fc 1417 if ((alloc_flags & ALLOC_CPUSET) &&
02a0e53d 1418 !cpuset_zone_allowed_softwall(zone, gfp_mask))
9276b1bc 1419 goto try_next_zone;
7fb1d9fc
RS
1420
1421 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
3148890b
NP
1422 unsigned long mark;
1423 if (alloc_flags & ALLOC_WMARK_MIN)
1192d526 1424 mark = zone->pages_min;
3148890b 1425 else if (alloc_flags & ALLOC_WMARK_LOW)
1192d526 1426 mark = zone->pages_low;
3148890b 1427 else
1192d526 1428 mark = zone->pages_high;
0798e519
PJ
1429 if (!zone_watermark_ok(zone, order, mark,
1430 classzone_idx, alloc_flags)) {
9eeff239 1431 if (!zone_reclaim_mode ||
1192d526 1432 !zone_reclaim(zone, gfp_mask, order))
9276b1bc 1433 goto this_zone_full;
0798e519 1434 }
7fb1d9fc
RS
1435 }
1436
18ea7e71 1437 page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
0798e519 1438 if (page)
7fb1d9fc 1439 break;
9276b1bc
PJ
1440this_zone_full:
1441 if (NUMA_BUILD)
1442 zlc_mark_zone_full(zonelist, z);
1443try_next_zone:
1444 if (NUMA_BUILD && !did_zlc_setup) {
1445 /* we do zlc_setup after the first zone is tried */
1446 allowednodes = zlc_setup(zonelist, alloc_flags);
1447 zlc_active = 1;
1448 did_zlc_setup = 1;
1449 }
7fb1d9fc 1450 } while (*(++z) != NULL);
9276b1bc
PJ
1451
1452 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1453 /* Disable zlc cache for second zonelist scan */
1454 zlc_active = 0;
1455 goto zonelist_scan;
1456 }
7fb1d9fc 1457 return page;
753ee728
MH
1458}
1459
1da177e4
LT
1460/*
1461 * This is the 'heart' of the zoned buddy allocator.
1462 */
edde08f2 1463struct page *
dd0fc66f 1464__alloc_pages(gfp_t gfp_mask, unsigned int order,
1da177e4
LT
1465 struct zonelist *zonelist)
1466{
260b2367 1467 const gfp_t wait = gfp_mask & __GFP_WAIT;
7fb1d9fc 1468 struct zone **z;
1da177e4
LT
1469 struct page *page;
1470 struct reclaim_state reclaim_state;
1471 struct task_struct *p = current;
1da177e4 1472 int do_retry;
7fb1d9fc 1473 int alloc_flags;
1da177e4
LT
1474 int did_some_progress;
1475
1476 might_sleep_if(wait);
1477
933e312e
AM
1478 if (should_fail_alloc_page(gfp_mask, order))
1479 return NULL;
1480
6b1de916 1481restart:
7fb1d9fc 1482 z = zonelist->zones; /* the list of zones suitable for gfp_mask */
1da177e4 1483
7fb1d9fc 1484 if (unlikely(*z == NULL)) {
523b9458
CL
1485 /*
1486 * Happens if we have an empty zonelist as a result of
1487 * GFP_THISNODE being used on a memoryless node
1488 */
1da177e4
LT
1489 return NULL;
1490 }
6b1de916 1491
7fb1d9fc 1492 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
3148890b 1493 zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
7fb1d9fc
RS
1494 if (page)
1495 goto got_pg;
1da177e4 1496
952f3b51
CL
1497 /*
1498 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1499 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1500 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1501 * using a larger set of nodes after it has established that the
1502 * allowed per node queues are empty and that nodes are
1503 * over allocated.
1504 */
1505 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1506 goto nopage;
1507
0798e519 1508 for (z = zonelist->zones; *z; z++)
43b0bc00 1509 wakeup_kswapd(*z, order);
1da177e4 1510
9bf2229f 1511 /*
7fb1d9fc
RS
1512 * OK, we're below the kswapd watermark and have kicked background
1513 * reclaim. Now things get more complex, so set up alloc_flags according
1514 * to how we want to proceed.
1515 *
1516 * The caller may dip into page reserves a bit more if the caller
1517 * cannot run direct reclaim, or if the caller has realtime scheduling
4eac915d
PJ
1518 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1519 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
9bf2229f 1520 */
3148890b 1521 alloc_flags = ALLOC_WMARK_MIN;
7fb1d9fc
RS
1522 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1523 alloc_flags |= ALLOC_HARDER;
1524 if (gfp_mask & __GFP_HIGH)
1525 alloc_flags |= ALLOC_HIGH;
bdd804f4
PJ
1526 if (wait)
1527 alloc_flags |= ALLOC_CPUSET;
1da177e4
LT
1528
1529 /*
1530 * Go through the zonelist again. Let __GFP_HIGH and allocations
7fb1d9fc 1531 * coming from realtime tasks go deeper into reserves.
1da177e4
LT
1532 *
1533 * This is the last chance, in general, before the goto nopage.
1534 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
9bf2229f 1535 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1da177e4 1536 */
7fb1d9fc
RS
1537 page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
1538 if (page)
1539 goto got_pg;
1da177e4
LT
1540
1541 /* This allocation should allow future memory freeing. */
b84a35be 1542
b43a57bb 1543rebalance:
b84a35be
NP
1544 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
1545 && !in_interrupt()) {
1546 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
885036d3 1547nofail_alloc:
b84a35be 1548 /* go through the zonelist yet again, ignoring mins */
7fb1d9fc 1549 page = get_page_from_freelist(gfp_mask, order,
47f3a867 1550 zonelist, ALLOC_NO_WATERMARKS);
7fb1d9fc
RS
1551 if (page)
1552 goto got_pg;
885036d3 1553 if (gfp_mask & __GFP_NOFAIL) {
3fcfab16 1554 congestion_wait(WRITE, HZ/50);
885036d3
KK
1555 goto nofail_alloc;
1556 }
1da177e4
LT
1557 }
1558 goto nopage;
1559 }
1560
1561 /* Atomic allocations - we can't balance anything */
1562 if (!wait)
1563 goto nopage;
1564
1da177e4
LT
1565 cond_resched();
1566
1567 /* We now go into synchronous reclaim */
3e0d98b9 1568 cpuset_memory_pressure_bump();
1da177e4
LT
1569 p->flags |= PF_MEMALLOC;
1570 reclaim_state.reclaimed_slab = 0;
1571 p->reclaim_state = &reclaim_state;
1572
dac1d27b 1573 did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
1da177e4
LT
1574
1575 p->reclaim_state = NULL;
1576 p->flags &= ~PF_MEMALLOC;
1577
1578 cond_resched();
1579
e2c55dc8 1580 if (order != 0)
9f8f2172 1581 drain_all_pages();
e2c55dc8 1582
1da177e4 1583 if (likely(did_some_progress)) {
7fb1d9fc
RS
1584 page = get_page_from_freelist(gfp_mask, order,
1585 zonelist, alloc_flags);
1586 if (page)
1587 goto got_pg;
1da177e4 1588 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
ff0ceb9d
DR
1589 if (!try_set_zone_oom(zonelist)) {
1590 schedule_timeout_uninterruptible(1);
1591 goto restart;
1592 }
1593
1da177e4
LT
1594 /*
1595 * Go through the zonelist yet one more time, keep
1596 * very high watermark here, this is only to catch
1597 * a parallel oom killing, we must fail if we're still
1598 * under heavy pressure.
1599 */
7fb1d9fc 1600 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
3148890b 1601 zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
ff0ceb9d
DR
1602 if (page) {
1603 clear_zonelist_oom(zonelist);
7fb1d9fc 1604 goto got_pg;
ff0ceb9d 1605 }
1da177e4 1606
a8bbf72a 1607 /* The OOM killer will not help higher order allocs so fail */
ff0ceb9d
DR
1608 if (order > PAGE_ALLOC_COSTLY_ORDER) {
1609 clear_zonelist_oom(zonelist);
a8bbf72a 1610 goto nopage;
ff0ceb9d 1611 }
a8bbf72a 1612
9b0f8b04 1613 out_of_memory(zonelist, gfp_mask, order);
ff0ceb9d 1614 clear_zonelist_oom(zonelist);
1da177e4
LT
1615 goto restart;
1616 }
1617
1618 /*
1619 * Don't let big-order allocations loop unless the caller explicitly
1620 * requests that. Wait for some write requests to complete then retry.
1621 *
1622 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
1623 * <= 3, but that may not be true in other implementations.
1624 */
1625 do_retry = 0;
1626 if (!(gfp_mask & __GFP_NORETRY)) {
5ad333eb
AW
1627 if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
1628 (gfp_mask & __GFP_REPEAT))
1da177e4
LT
1629 do_retry = 1;
1630 if (gfp_mask & __GFP_NOFAIL)
1631 do_retry = 1;
1632 }
1633 if (do_retry) {
3fcfab16 1634 congestion_wait(WRITE, HZ/50);
1da177e4
LT
1635 goto rebalance;
1636 }
1637
1638nopage:
1639 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1640 printk(KERN_WARNING "%s: page allocation failure."
1641 " order:%d, mode:0x%x\n",
1642 p->comm, order, gfp_mask);
1643 dump_stack();
578c2fd6 1644 show_mem();
1da177e4 1645 }
1da177e4 1646got_pg:
1da177e4
LT
1647 return page;
1648}
1649
1650EXPORT_SYMBOL(__alloc_pages);
1651
1652/*
1653 * Common helper functions.
1654 */
920c7a5d 1655unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4
LT
1656{
1657 struct page * page;
1658 page = alloc_pages(gfp_mask, order);
1659 if (!page)
1660 return 0;
1661 return (unsigned long) page_address(page);
1662}
1663
1664EXPORT_SYMBOL(__get_free_pages);
1665
920c7a5d 1666unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4
LT
1667{
1668 struct page * page;
1669
1670 /*
1671 * get_zeroed_page() returns a 32-bit address, which cannot represent
1672 * a highmem page
1673 */
725d704e 1674 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1da177e4
LT
1675
1676 page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1677 if (page)
1678 return (unsigned long) page_address(page);
1679 return 0;
1680}
1681
1682EXPORT_SYMBOL(get_zeroed_page);
1683
1684void __pagevec_free(struct pagevec *pvec)
1685{
1686 int i = pagevec_count(pvec);
1687
1688 while (--i >= 0)
1689 free_hot_cold_page(pvec->pages[i], pvec->cold);
1690}
1691
920c7a5d 1692void __free_pages(struct page *page, unsigned int order)
1da177e4 1693{
b5810039 1694 if (put_page_testzero(page)) {
1da177e4
LT
1695 if (order == 0)
1696 free_hot_page(page);
1697 else
1698 __free_pages_ok(page, order);
1699 }
1700}
1701
1702EXPORT_SYMBOL(__free_pages);
1703
920c7a5d 1704void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
1705{
1706 if (addr != 0) {
725d704e 1707 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
1708 __free_pages(virt_to_page((void *)addr), order);
1709 }
1710}
1711
1712EXPORT_SYMBOL(free_pages);
1713
1da177e4
LT
1714static unsigned int nr_free_zone_pages(int offset)
1715{
e310fd43 1716 /* Just pick one node, since fallback list is circular */
1da177e4
LT
1717 unsigned int sum = 0;
1718
0e88460d 1719 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
e310fd43
MB
1720 struct zone **zonep = zonelist->zones;
1721 struct zone *zone;
1da177e4 1722
e310fd43
MB
1723 for (zone = *zonep++; zone; zone = *zonep++) {
1724 unsigned long size = zone->present_pages;
1725 unsigned long high = zone->pages_high;
1726 if (size > high)
1727 sum += size - high;
1da177e4
LT
1728 }
1729
1730 return sum;
1731}
1732
1733/*
1734 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1735 */
1736unsigned int nr_free_buffer_pages(void)
1737{
af4ca457 1738 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 1739}
c2f1a551 1740EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4
LT
1741
1742/*
1743 * Amount of free RAM allocatable within all zones
1744 */
1745unsigned int nr_free_pagecache_pages(void)
1746{
2a1e274a 1747 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1da177e4 1748}
08e0f6a9
CL
1749
1750static inline void show_node(struct zone *zone)
1da177e4 1751{
08e0f6a9 1752 if (NUMA_BUILD)
25ba77c1 1753 printk("Node %d ", zone_to_nid(zone));
1da177e4 1754}
1da177e4 1755
1da177e4
LT
1756void si_meminfo(struct sysinfo *val)
1757{
1758 val->totalram = totalram_pages;
1759 val->sharedram = 0;
d23ad423 1760 val->freeram = global_page_state(NR_FREE_PAGES);
1da177e4 1761 val->bufferram = nr_blockdev_pages();
1da177e4
LT
1762 val->totalhigh = totalhigh_pages;
1763 val->freehigh = nr_free_highpages();
1da177e4
LT
1764 val->mem_unit = PAGE_SIZE;
1765}
1766
1767EXPORT_SYMBOL(si_meminfo);
1768
1769#ifdef CONFIG_NUMA
1770void si_meminfo_node(struct sysinfo *val, int nid)
1771{
1772 pg_data_t *pgdat = NODE_DATA(nid);
1773
1774 val->totalram = pgdat->node_present_pages;
d23ad423 1775 val->freeram = node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 1776#ifdef CONFIG_HIGHMEM
1da177e4 1777 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
d23ad423
CL
1778 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
1779 NR_FREE_PAGES);
98d2b0eb
CL
1780#else
1781 val->totalhigh = 0;
1782 val->freehigh = 0;
1783#endif
1da177e4
LT
1784 val->mem_unit = PAGE_SIZE;
1785}
1786#endif
1787
1788#define K(x) ((x) << (PAGE_SHIFT-10))
1789
1790/*
1791 * Show free area list (used inside shift_scroll-lock stuff)
1792 * We also calculate the percentage fragmentation. We do this by counting the
1793 * memory on each free list with the exception of the first item on the list.
1794 */
1795void show_free_areas(void)
1796{
c7241913 1797 int cpu;
1da177e4
LT
1798 struct zone *zone;
1799
1800 for_each_zone(zone) {
c7241913 1801 if (!populated_zone(zone))
1da177e4 1802 continue;
c7241913
JS
1803
1804 show_node(zone);
1805 printk("%s per-cpu:\n", zone->name);
1da177e4 1806
6b482c67 1807 for_each_online_cpu(cpu) {
1da177e4
LT
1808 struct per_cpu_pageset *pageset;
1809
e7c8d5c9 1810 pageset = zone_pcp(zone, cpu);
1da177e4 1811
3dfa5721
CL
1812 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
1813 cpu, pageset->pcp.high,
1814 pageset->pcp.batch, pageset->pcp.count);
1da177e4
LT
1815 }
1816 }
1817
a25700a5 1818 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
d23ad423 1819 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
65e458d4
CL
1820 global_page_state(NR_ACTIVE),
1821 global_page_state(NR_INACTIVE),
b1e7a8fd 1822 global_page_state(NR_FILE_DIRTY),
ce866b34 1823 global_page_state(NR_WRITEBACK),
fd39fc85 1824 global_page_state(NR_UNSTABLE_NFS),
d23ad423 1825 global_page_state(NR_FREE_PAGES),
972d1a7b
CL
1826 global_page_state(NR_SLAB_RECLAIMABLE) +
1827 global_page_state(NR_SLAB_UNRECLAIMABLE),
65ba55f5 1828 global_page_state(NR_FILE_MAPPED),
a25700a5
AM
1829 global_page_state(NR_PAGETABLE),
1830 global_page_state(NR_BOUNCE));
1da177e4
LT
1831
1832 for_each_zone(zone) {
1833 int i;
1834
c7241913
JS
1835 if (!populated_zone(zone))
1836 continue;
1837
1da177e4
LT
1838 show_node(zone);
1839 printk("%s"
1840 " free:%lukB"
1841 " min:%lukB"
1842 " low:%lukB"
1843 " high:%lukB"
1844 " active:%lukB"
1845 " inactive:%lukB"
1846 " present:%lukB"
1847 " pages_scanned:%lu"
1848 " all_unreclaimable? %s"
1849 "\n",
1850 zone->name,
d23ad423 1851 K(zone_page_state(zone, NR_FREE_PAGES)),
1da177e4
LT
1852 K(zone->pages_min),
1853 K(zone->pages_low),
1854 K(zone->pages_high),
c8785385
CL
1855 K(zone_page_state(zone, NR_ACTIVE)),
1856 K(zone_page_state(zone, NR_INACTIVE)),
1da177e4
LT
1857 K(zone->present_pages),
1858 zone->pages_scanned,
e815af95 1859 (zone_is_all_unreclaimable(zone) ? "yes" : "no")
1da177e4
LT
1860 );
1861 printk("lowmem_reserve[]:");
1862 for (i = 0; i < MAX_NR_ZONES; i++)
1863 printk(" %lu", zone->lowmem_reserve[i]);
1864 printk("\n");
1865 }
1866
1867 for_each_zone(zone) {
8f9de51a 1868 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1da177e4 1869
c7241913
JS
1870 if (!populated_zone(zone))
1871 continue;
1872
1da177e4
LT
1873 show_node(zone);
1874 printk("%s: ", zone->name);
1da177e4
LT
1875
1876 spin_lock_irqsave(&zone->lock, flags);
1877 for (order = 0; order < MAX_ORDER; order++) {
8f9de51a
KK
1878 nr[order] = zone->free_area[order].nr_free;
1879 total += nr[order] << order;
1da177e4
LT
1880 }
1881 spin_unlock_irqrestore(&zone->lock, flags);
8f9de51a
KK
1882 for (order = 0; order < MAX_ORDER; order++)
1883 printk("%lu*%lukB ", nr[order], K(1UL) << order);
1da177e4
LT
1884 printk("= %lukB\n", K(total));
1885 }
1886
e6f3602d
LW
1887 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
1888
1da177e4
LT
1889 show_swap_cache_info();
1890}
1891
1892/*
1893 * Builds allocation fallback zone lists.
1a93205b
CL
1894 *
1895 * Add all populated zones of a node to the zonelist.
1da177e4 1896 */
f0c0b2b8
KH
1897static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
1898 int nr_zones, enum zone_type zone_type)
1da177e4 1899{
1a93205b
CL
1900 struct zone *zone;
1901
98d2b0eb 1902 BUG_ON(zone_type >= MAX_NR_ZONES);
2f6726e5 1903 zone_type++;
02a68a5e
CL
1904
1905 do {
2f6726e5 1906 zone_type--;
070f8032 1907 zone = pgdat->node_zones + zone_type;
1a93205b 1908 if (populated_zone(zone)) {
070f8032
CL
1909 zonelist->zones[nr_zones++] = zone;
1910 check_highest_zone(zone_type);
1da177e4 1911 }
02a68a5e 1912
2f6726e5 1913 } while (zone_type);
070f8032 1914 return nr_zones;
1da177e4
LT
1915}
1916
f0c0b2b8
KH
1917
1918/*
1919 * zonelist_order:
1920 * 0 = automatic detection of better ordering.
1921 * 1 = order by ([node] distance, -zonetype)
1922 * 2 = order by (-zonetype, [node] distance)
1923 *
1924 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
1925 * the same zonelist. So only NUMA can configure this param.
1926 */
1927#define ZONELIST_ORDER_DEFAULT 0
1928#define ZONELIST_ORDER_NODE 1
1929#define ZONELIST_ORDER_ZONE 2
1930
1931/* zonelist order in the kernel.
1932 * set_zonelist_order() will set this to NODE or ZONE.
1933 */
1934static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
1935static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
1936
1937
1da177e4 1938#ifdef CONFIG_NUMA
f0c0b2b8
KH
1939/* The value user specified ....changed by config */
1940static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
1941/* string for sysctl */
1942#define NUMA_ZONELIST_ORDER_LEN 16
1943char numa_zonelist_order[16] = "default";
1944
1945/*
1946 * interface for configure zonelist ordering.
1947 * command line option "numa_zonelist_order"
1948 * = "[dD]efault - default, automatic configuration.
1949 * = "[nN]ode - order by node locality, then by zone within node
1950 * = "[zZ]one - order by zone, then by locality within zone
1951 */
1952
1953static int __parse_numa_zonelist_order(char *s)
1954{
1955 if (*s == 'd' || *s == 'D') {
1956 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
1957 } else if (*s == 'n' || *s == 'N') {
1958 user_zonelist_order = ZONELIST_ORDER_NODE;
1959 } else if (*s == 'z' || *s == 'Z') {
1960 user_zonelist_order = ZONELIST_ORDER_ZONE;
1961 } else {
1962 printk(KERN_WARNING
1963 "Ignoring invalid numa_zonelist_order value: "
1964 "%s\n", s);
1965 return -EINVAL;
1966 }
1967 return 0;
1968}
1969
1970static __init int setup_numa_zonelist_order(char *s)
1971{
1972 if (s)
1973 return __parse_numa_zonelist_order(s);
1974 return 0;
1975}
1976early_param("numa_zonelist_order", setup_numa_zonelist_order);
1977
1978/*
1979 * sysctl handler for numa_zonelist_order
1980 */
1981int numa_zonelist_order_handler(ctl_table *table, int write,
1982 struct file *file, void __user *buffer, size_t *length,
1983 loff_t *ppos)
1984{
1985 char saved_string[NUMA_ZONELIST_ORDER_LEN];
1986 int ret;
1987
1988 if (write)
1989 strncpy(saved_string, (char*)table->data,
1990 NUMA_ZONELIST_ORDER_LEN);
1991 ret = proc_dostring(table, write, file, buffer, length, ppos);
1992 if (ret)
1993 return ret;
1994 if (write) {
1995 int oldval = user_zonelist_order;
1996 if (__parse_numa_zonelist_order((char*)table->data)) {
1997 /*
1998 * bogus value. restore saved string
1999 */
2000 strncpy((char*)table->data, saved_string,
2001 NUMA_ZONELIST_ORDER_LEN);
2002 user_zonelist_order = oldval;
2003 } else if (oldval != user_zonelist_order)
2004 build_all_zonelists();
2005 }
2006 return 0;
2007}
2008
2009
1da177e4 2010#define MAX_NODE_LOAD (num_online_nodes())
f0c0b2b8
KH
2011static int node_load[MAX_NUMNODES];
2012
1da177e4 2013/**
4dc3b16b 2014 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
2015 * @node: node whose fallback list we're appending
2016 * @used_node_mask: nodemask_t of already used nodes
2017 *
2018 * We use a number of factors to determine which is the next node that should
2019 * appear on a given node's fallback list. The node should not have appeared
2020 * already in @node's fallback list, and it should be the next closest node
2021 * according to the distance array (which contains arbitrary distance values
2022 * from each node to each node in the system), and should also prefer nodes
2023 * with no CPUs, since presumably they'll have very little allocation pressure
2024 * on them otherwise.
2025 * It returns -1 if no node is found.
2026 */
f0c0b2b8 2027static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 2028{
4cf808eb 2029 int n, val;
1da177e4
LT
2030 int min_val = INT_MAX;
2031 int best_node = -1;
c5f59f08 2032 node_to_cpumask_ptr(tmp, 0);
1da177e4 2033
4cf808eb
LT
2034 /* Use the local node if we haven't already */
2035 if (!node_isset(node, *used_node_mask)) {
2036 node_set(node, *used_node_mask);
2037 return node;
2038 }
1da177e4 2039
37b07e41 2040 for_each_node_state(n, N_HIGH_MEMORY) {
1da177e4
LT
2041
2042 /* Don't want a node to appear more than once */
2043 if (node_isset(n, *used_node_mask))
2044 continue;
2045
1da177e4
LT
2046 /* Use the distance array to find the distance */
2047 val = node_distance(node, n);
2048
4cf808eb
LT
2049 /* Penalize nodes under us ("prefer the next node") */
2050 val += (n < node);
2051
1da177e4 2052 /* Give preference to headless and unused nodes */
c5f59f08
MT
2053 node_to_cpumask_ptr_next(tmp, n);
2054 if (!cpus_empty(*tmp))
1da177e4
LT
2055 val += PENALTY_FOR_NODE_WITH_CPUS;
2056
2057 /* Slight preference for less loaded node */
2058 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2059 val += node_load[n];
2060
2061 if (val < min_val) {
2062 min_val = val;
2063 best_node = n;
2064 }
2065 }
2066
2067 if (best_node >= 0)
2068 node_set(best_node, *used_node_mask);
2069
2070 return best_node;
2071}
2072
f0c0b2b8
KH
2073
2074/*
2075 * Build zonelists ordered by node and zones within node.
2076 * This results in maximum locality--normal zone overflows into local
2077 * DMA zone, if any--but risks exhausting DMA zone.
2078 */
2079static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
1da177e4 2080{
19655d34 2081 enum zone_type i;
f0c0b2b8 2082 int j;
1da177e4 2083 struct zonelist *zonelist;
f0c0b2b8
KH
2084
2085 for (i = 0; i < MAX_NR_ZONES; i++) {
2086 zonelist = pgdat->node_zonelists + i;
2087 for (j = 0; zonelist->zones[j] != NULL; j++)
2088 ;
2089 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
2090 zonelist->zones[j] = NULL;
2091 }
2092}
2093
523b9458
CL
2094/*
2095 * Build gfp_thisnode zonelists
2096 */
2097static void build_thisnode_zonelists(pg_data_t *pgdat)
2098{
2099 enum zone_type i;
2100 int j;
2101 struct zonelist *zonelist;
2102
2103 for (i = 0; i < MAX_NR_ZONES; i++) {
2104 zonelist = pgdat->node_zonelists + MAX_NR_ZONES + i;
2105 j = build_zonelists_node(pgdat, zonelist, 0, i);
2106 zonelist->zones[j] = NULL;
2107 }
2108}
2109
f0c0b2b8
KH
2110/*
2111 * Build zonelists ordered by zone and nodes within zones.
2112 * This results in conserving DMA zone[s] until all Normal memory is
2113 * exhausted, but results in overflowing to remote node while memory
2114 * may still exist in local DMA zone.
2115 */
2116static int node_order[MAX_NUMNODES];
2117
2118static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2119{
2120 enum zone_type i;
2121 int pos, j, node;
2122 int zone_type; /* needs to be signed */
2123 struct zone *z;
2124 struct zonelist *zonelist;
2125
2126 for (i = 0; i < MAX_NR_ZONES; i++) {
2127 zonelist = pgdat->node_zonelists + i;
2128 pos = 0;
2129 for (zone_type = i; zone_type >= 0; zone_type--) {
2130 for (j = 0; j < nr_nodes; j++) {
2131 node = node_order[j];
2132 z = &NODE_DATA(node)->node_zones[zone_type];
2133 if (populated_zone(z)) {
2134 zonelist->zones[pos++] = z;
2135 check_highest_zone(zone_type);
2136 }
2137 }
2138 }
2139 zonelist->zones[pos] = NULL;
2140 }
2141}
2142
2143static int default_zonelist_order(void)
2144{
2145 int nid, zone_type;
2146 unsigned long low_kmem_size,total_size;
2147 struct zone *z;
2148 int average_size;
2149 /*
2150 * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2151 * If they are really small and used heavily, the system can fall
2152 * into OOM very easily.
2153 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2154 */
2155 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2156 low_kmem_size = 0;
2157 total_size = 0;
2158 for_each_online_node(nid) {
2159 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2160 z = &NODE_DATA(nid)->node_zones[zone_type];
2161 if (populated_zone(z)) {
2162 if (zone_type < ZONE_NORMAL)
2163 low_kmem_size += z->present_pages;
2164 total_size += z->present_pages;
2165 }
2166 }
2167 }
2168 if (!low_kmem_size || /* there are no DMA area. */
2169 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2170 return ZONELIST_ORDER_NODE;
2171 /*
2172 * look into each node's config.
2173 * If there is a node whose DMA/DMA32 memory is very big area on
2174 * local memory, NODE_ORDER may be suitable.
2175 */
37b07e41
LS
2176 average_size = total_size /
2177 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
f0c0b2b8
KH
2178 for_each_online_node(nid) {
2179 low_kmem_size = 0;
2180 total_size = 0;
2181 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2182 z = &NODE_DATA(nid)->node_zones[zone_type];
2183 if (populated_zone(z)) {
2184 if (zone_type < ZONE_NORMAL)
2185 low_kmem_size += z->present_pages;
2186 total_size += z->present_pages;
2187 }
2188 }
2189 if (low_kmem_size &&
2190 total_size > average_size && /* ignore small node */
2191 low_kmem_size > total_size * 70/100)
2192 return ZONELIST_ORDER_NODE;
2193 }
2194 return ZONELIST_ORDER_ZONE;
2195}
2196
2197static void set_zonelist_order(void)
2198{
2199 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2200 current_zonelist_order = default_zonelist_order();
2201 else
2202 current_zonelist_order = user_zonelist_order;
2203}
2204
2205static void build_zonelists(pg_data_t *pgdat)
2206{
2207 int j, node, load;
2208 enum zone_type i;
1da177e4 2209 nodemask_t used_mask;
f0c0b2b8
KH
2210 int local_node, prev_node;
2211 struct zonelist *zonelist;
2212 int order = current_zonelist_order;
1da177e4
LT
2213
2214 /* initialize zonelists */
523b9458 2215 for (i = 0; i < MAX_ZONELISTS; i++) {
1da177e4
LT
2216 zonelist = pgdat->node_zonelists + i;
2217 zonelist->zones[0] = NULL;
2218 }
2219
2220 /* NUMA-aware ordering of nodes */
2221 local_node = pgdat->node_id;
2222 load = num_online_nodes();
2223 prev_node = local_node;
2224 nodes_clear(used_mask);
f0c0b2b8
KH
2225
2226 memset(node_load, 0, sizeof(node_load));
2227 memset(node_order, 0, sizeof(node_order));
2228 j = 0;
2229
1da177e4 2230 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
9eeff239
CL
2231 int distance = node_distance(local_node, node);
2232
2233 /*
2234 * If another node is sufficiently far away then it is better
2235 * to reclaim pages in a zone before going off node.
2236 */
2237 if (distance > RECLAIM_DISTANCE)
2238 zone_reclaim_mode = 1;
2239
1da177e4
LT
2240 /*
2241 * We don't want to pressure a particular node.
2242 * So adding penalty to the first node in same
2243 * distance group to make it round-robin.
2244 */
9eeff239 2245 if (distance != node_distance(local_node, prev_node))
f0c0b2b8
KH
2246 node_load[node] = load;
2247
1da177e4
LT
2248 prev_node = node;
2249 load--;
f0c0b2b8
KH
2250 if (order == ZONELIST_ORDER_NODE)
2251 build_zonelists_in_node_order(pgdat, node);
2252 else
2253 node_order[j++] = node; /* remember order */
2254 }
1da177e4 2255
f0c0b2b8
KH
2256 if (order == ZONELIST_ORDER_ZONE) {
2257 /* calculate node order -- i.e., DMA last! */
2258 build_zonelists_in_zone_order(pgdat, j);
1da177e4 2259 }
523b9458
CL
2260
2261 build_thisnode_zonelists(pgdat);
1da177e4
LT
2262}
2263
9276b1bc 2264/* Construct the zonelist performance cache - see further mmzone.h */
f0c0b2b8 2265static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc
PJ
2266{
2267 int i;
2268
2269 for (i = 0; i < MAX_NR_ZONES; i++) {
2270 struct zonelist *zonelist;
2271 struct zonelist_cache *zlc;
2272 struct zone **z;
2273
2274 zonelist = pgdat->node_zonelists + i;
2275 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2276 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2277 for (z = zonelist->zones; *z; z++)
2278 zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z);
2279 }
2280}
2281
f0c0b2b8 2282
1da177e4
LT
2283#else /* CONFIG_NUMA */
2284
f0c0b2b8
KH
2285static void set_zonelist_order(void)
2286{
2287 current_zonelist_order = ZONELIST_ORDER_ZONE;
2288}
2289
2290static void build_zonelists(pg_data_t *pgdat)
1da177e4 2291{
19655d34
CL
2292 int node, local_node;
2293 enum zone_type i,j;
1da177e4
LT
2294
2295 local_node = pgdat->node_id;
19655d34 2296 for (i = 0; i < MAX_NR_ZONES; i++) {
1da177e4
LT
2297 struct zonelist *zonelist;
2298
2299 zonelist = pgdat->node_zonelists + i;
2300
19655d34 2301 j = build_zonelists_node(pgdat, zonelist, 0, i);
1da177e4
LT
2302 /*
2303 * Now we build the zonelist so that it contains the zones
2304 * of all the other nodes.
2305 * We don't want to pressure a particular node, so when
2306 * building the zones for node N, we make sure that the
2307 * zones coming right after the local ones are those from
2308 * node N+1 (modulo N)
2309 */
2310 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2311 if (!node_online(node))
2312 continue;
19655d34 2313 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
1da177e4
LT
2314 }
2315 for (node = 0; node < local_node; node++) {
2316 if (!node_online(node))
2317 continue;
19655d34 2318 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
1da177e4
LT
2319 }
2320
2321 zonelist->zones[j] = NULL;
2322 }
2323}
2324
9276b1bc 2325/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
f0c0b2b8 2326static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc
PJ
2327{
2328 int i;
2329
2330 for (i = 0; i < MAX_NR_ZONES; i++)
2331 pgdat->node_zonelists[i].zlcache_ptr = NULL;
2332}
2333
1da177e4
LT
2334#endif /* CONFIG_NUMA */
2335
6811378e 2336/* return values int ....just for stop_machine_run() */
f0c0b2b8 2337static int __build_all_zonelists(void *dummy)
1da177e4 2338{
6811378e 2339 int nid;
9276b1bc
PJ
2340
2341 for_each_online_node(nid) {
7ea1530a
CL
2342 pg_data_t *pgdat = NODE_DATA(nid);
2343
2344 build_zonelists(pgdat);
2345 build_zonelist_cache(pgdat);
9276b1bc 2346 }
6811378e
YG
2347 return 0;
2348}
2349
f0c0b2b8 2350void build_all_zonelists(void)
6811378e 2351{
f0c0b2b8
KH
2352 set_zonelist_order();
2353
6811378e 2354 if (system_state == SYSTEM_BOOTING) {
423b41d7 2355 __build_all_zonelists(NULL);
6811378e
YG
2356 cpuset_init_current_mems_allowed();
2357 } else {
183ff22b 2358 /* we have to stop all cpus to guarantee there is no user
6811378e
YG
2359 of zonelist */
2360 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
2361 /* cpuset refresh routine should be here */
2362 }
bd1e22b8 2363 vm_total_pages = nr_free_pagecache_pages();
9ef9acb0
MG
2364 /*
2365 * Disable grouping by mobility if the number of pages in the
2366 * system is too low to allow the mechanism to work. It would be
2367 * more accurate, but expensive to check per-zone. This check is
2368 * made on memory-hotadd so a system can start with mobility
2369 * disabled and enable it later
2370 */
d9c23400 2371 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
2372 page_group_by_mobility_disabled = 1;
2373 else
2374 page_group_by_mobility_disabled = 0;
2375
2376 printk("Built %i zonelists in %s order, mobility grouping %s. "
2377 "Total pages: %ld\n",
f0c0b2b8
KH
2378 num_online_nodes(),
2379 zonelist_order_name[current_zonelist_order],
9ef9acb0 2380 page_group_by_mobility_disabled ? "off" : "on",
f0c0b2b8
KH
2381 vm_total_pages);
2382#ifdef CONFIG_NUMA
2383 printk("Policy zone: %s\n", zone_names[policy_zone]);
2384#endif
1da177e4
LT
2385}
2386
2387/*
2388 * Helper functions to size the waitqueue hash table.
2389 * Essentially these want to choose hash table sizes sufficiently
2390 * large so that collisions trying to wait on pages are rare.
2391 * But in fact, the number of active page waitqueues on typical
2392 * systems is ridiculously low, less than 200. So this is even
2393 * conservative, even though it seems large.
2394 *
2395 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2396 * waitqueues, i.e. the size of the waitq table given the number of pages.
2397 */
2398#define PAGES_PER_WAITQUEUE 256
2399
cca448fe 2400#ifndef CONFIG_MEMORY_HOTPLUG
02b694de 2401static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1da177e4
LT
2402{
2403 unsigned long size = 1;
2404
2405 pages /= PAGES_PER_WAITQUEUE;
2406
2407 while (size < pages)
2408 size <<= 1;
2409
2410 /*
2411 * Once we have dozens or even hundreds of threads sleeping
2412 * on IO we've got bigger problems than wait queue collision.
2413 * Limit the size of the wait table to a reasonable size.
2414 */
2415 size = min(size, 4096UL);
2416
2417 return max(size, 4UL);
2418}
cca448fe
YG
2419#else
2420/*
2421 * A zone's size might be changed by hot-add, so it is not possible to determine
2422 * a suitable size for its wait_table. So we use the maximum size now.
2423 *
2424 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
2425 *
2426 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
2427 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2428 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
2429 *
2430 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2431 * or more by the traditional way. (See above). It equals:
2432 *
2433 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
2434 * ia64(16K page size) : = ( 8G + 4M)byte.
2435 * powerpc (64K page size) : = (32G +16M)byte.
2436 */
2437static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2438{
2439 return 4096UL;
2440}
2441#endif
1da177e4
LT
2442
2443/*
2444 * This is an integer logarithm so that shifts can be used later
2445 * to extract the more random high bits from the multiplicative
2446 * hash function before the remainder is taken.
2447 */
2448static inline unsigned long wait_table_bits(unsigned long size)
2449{
2450 return ffz(~size);
2451}
2452
2453#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2454
56fd56b8 2455/*
d9c23400 2456 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
56fd56b8
MG
2457 * of blocks reserved is based on zone->pages_min. The memory within the
2458 * reserve will tend to store contiguous free pages. Setting min_free_kbytes
2459 * higher will lead to a bigger reserve which will get freed as contiguous
2460 * blocks as reclaim kicks in
2461 */
2462static void setup_zone_migrate_reserve(struct zone *zone)
2463{
2464 unsigned long start_pfn, pfn, end_pfn;
2465 struct page *page;
2466 unsigned long reserve, block_migratetype;
2467
2468 /* Get the start pfn, end pfn and the number of blocks to reserve */
2469 start_pfn = zone->zone_start_pfn;
2470 end_pfn = start_pfn + zone->spanned_pages;
d9c23400
MG
2471 reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
2472 pageblock_order;
56fd56b8 2473
d9c23400 2474 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
56fd56b8
MG
2475 if (!pfn_valid(pfn))
2476 continue;
2477 page = pfn_to_page(pfn);
2478
2479 /* Blocks with reserved pages will never free, skip them. */
2480 if (PageReserved(page))
2481 continue;
2482
2483 block_migratetype = get_pageblock_migratetype(page);
2484
2485 /* If this block is reserved, account for it */
2486 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2487 reserve--;
2488 continue;
2489 }
2490
2491 /* Suitable for reserving if this block is movable */
2492 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2493 set_pageblock_migratetype(page, MIGRATE_RESERVE);
2494 move_freepages_block(zone, page, MIGRATE_RESERVE);
2495 reserve--;
2496 continue;
2497 }
2498
2499 /*
2500 * If the reserve is met and this is a previous reserved block,
2501 * take it back
2502 */
2503 if (block_migratetype == MIGRATE_RESERVE) {
2504 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2505 move_freepages_block(zone, page, MIGRATE_MOVABLE);
2506 }
2507 }
2508}
ac0e5b7a 2509
1da177e4
LT
2510/*
2511 * Initially all pages are reserved - free ones are freed
2512 * up by free_all_bootmem() once the early boot process is
2513 * done. Non-atomic initialization, single-pass.
2514 */
c09b4240 2515void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
a2f3aa02 2516 unsigned long start_pfn, enum memmap_context context)
1da177e4 2517{
1da177e4 2518 struct page *page;
29751f69
AW
2519 unsigned long end_pfn = start_pfn + size;
2520 unsigned long pfn;
1da177e4 2521
cbe8dd4a 2522 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
a2f3aa02
DH
2523 /*
2524 * There can be holes in boot-time mem_map[]s
2525 * handed to this function. They do not
2526 * exist on hotplugged memory.
2527 */
2528 if (context == MEMMAP_EARLY) {
2529 if (!early_pfn_valid(pfn))
2530 continue;
2531 if (!early_pfn_in_nid(pfn, nid))
2532 continue;
2533 }
d41dee36
AW
2534 page = pfn_to_page(pfn);
2535 set_page_links(page, zone, nid, pfn);
7835e98b 2536 init_page_count(page);
1da177e4
LT
2537 reset_page_mapcount(page);
2538 SetPageReserved(page);
b2a0ac88
MG
2539
2540 /*
2541 * Mark the block movable so that blocks are reserved for
2542 * movable at startup. This will force kernel allocations
2543 * to reserve their blocks rather than leaking throughout
2544 * the address space during boot when many long-lived
56fd56b8
MG
2545 * kernel allocations are made. Later some blocks near
2546 * the start are marked MIGRATE_RESERVE by
2547 * setup_zone_migrate_reserve()
b2a0ac88 2548 */
d9c23400 2549 if ((pfn & (pageblock_nr_pages-1)))
56fd56b8 2550 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
b2a0ac88 2551
1da177e4
LT
2552 INIT_LIST_HEAD(&page->lru);
2553#ifdef WANT_PAGE_VIRTUAL
2554 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
2555 if (!is_highmem_idx(zone))
3212c6be 2556 set_page_address(page, __va(pfn << PAGE_SHIFT));
1da177e4 2557#endif
1da177e4
LT
2558 }
2559}
2560
1e548deb 2561static void __meminit zone_init_free_lists(struct zone *zone)
1da177e4 2562{
b2a0ac88
MG
2563 int order, t;
2564 for_each_migratetype_order(order, t) {
2565 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
2566 zone->free_area[order].nr_free = 0;
2567 }
2568}
2569
2570#ifndef __HAVE_ARCH_MEMMAP_INIT
2571#define memmap_init(size, nid, zone, start_pfn) \
a2f3aa02 2572 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1da177e4
LT
2573#endif
2574
1d6f4e60 2575static int zone_batchsize(struct zone *zone)
e7c8d5c9
CL
2576{
2577 int batch;
2578
2579 /*
2580 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 2581 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
2582 *
2583 * OK, so we don't know how big the cache is. So guess.
2584 */
2585 batch = zone->present_pages / 1024;
ba56e91c
SR
2586 if (batch * PAGE_SIZE > 512 * 1024)
2587 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
2588 batch /= 4; /* We effectively *= 4 below */
2589 if (batch < 1)
2590 batch = 1;
2591
2592 /*
0ceaacc9
NP
2593 * Clamp the batch to a 2^n - 1 value. Having a power
2594 * of 2 value was found to be more likely to have
2595 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 2596 *
0ceaacc9
NP
2597 * For example if 2 tasks are alternately allocating
2598 * batches of pages, one task can end up with a lot
2599 * of pages of one half of the possible page colors
2600 * and the other with pages of the other colors.
e7c8d5c9 2601 */
0ceaacc9 2602 batch = (1 << (fls(batch + batch/2)-1)) - 1;
ba56e91c 2603
e7c8d5c9
CL
2604 return batch;
2605}
2606
2caaad41
CL
2607inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2608{
2609 struct per_cpu_pages *pcp;
2610
1c6fe946
MD
2611 memset(p, 0, sizeof(*p));
2612
3dfa5721 2613 pcp = &p->pcp;
2caaad41 2614 pcp->count = 0;
2caaad41
CL
2615 pcp->high = 6 * batch;
2616 pcp->batch = max(1UL, 1 * batch);
2617 INIT_LIST_HEAD(&pcp->list);
2caaad41
CL
2618}
2619
8ad4b1fb
RS
2620/*
2621 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
2622 * to the value high for the pageset p.
2623 */
2624
2625static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2626 unsigned long high)
2627{
2628 struct per_cpu_pages *pcp;
2629
3dfa5721 2630 pcp = &p->pcp;
8ad4b1fb
RS
2631 pcp->high = high;
2632 pcp->batch = max(1UL, high/4);
2633 if ((high/4) > (PAGE_SHIFT * 8))
2634 pcp->batch = PAGE_SHIFT * 8;
2635}
2636
2637
e7c8d5c9
CL
2638#ifdef CONFIG_NUMA
2639/*
2caaad41
CL
2640 * Boot pageset table. One per cpu which is going to be used for all
2641 * zones and all nodes. The parameters will be set in such a way
2642 * that an item put on a list will immediately be handed over to
2643 * the buddy list. This is safe since pageset manipulation is done
2644 * with interrupts disabled.
2645 *
2646 * Some NUMA counter updates may also be caught by the boot pagesets.
b7c84c6a
CL
2647 *
2648 * The boot_pagesets must be kept even after bootup is complete for
2649 * unused processors and/or zones. They do play a role for bootstrapping
2650 * hotplugged processors.
2651 *
2652 * zoneinfo_show() and maybe other functions do
2653 * not check if the processor is online before following the pageset pointer.
2654 * Other parts of the kernel may not check if the zone is available.
2caaad41 2655 */
88a2a4ac 2656static struct per_cpu_pageset boot_pageset[NR_CPUS];
2caaad41
CL
2657
2658/*
2659 * Dynamically allocate memory for the
e7c8d5c9
CL
2660 * per cpu pageset array in struct zone.
2661 */
6292d9aa 2662static int __cpuinit process_zones(int cpu)
e7c8d5c9
CL
2663{
2664 struct zone *zone, *dzone;
37c0708d
CL
2665 int node = cpu_to_node(cpu);
2666
2667 node_set_state(node, N_CPU); /* this node has a cpu */
e7c8d5c9
CL
2668
2669 for_each_zone(zone) {
e7c8d5c9 2670
66a55030
CL
2671 if (!populated_zone(zone))
2672 continue;
2673
23316bc8 2674 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
37c0708d 2675 GFP_KERNEL, node);
23316bc8 2676 if (!zone_pcp(zone, cpu))
e7c8d5c9 2677 goto bad;
e7c8d5c9 2678
23316bc8 2679 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
8ad4b1fb
RS
2680
2681 if (percpu_pagelist_fraction)
2682 setup_pagelist_highmark(zone_pcp(zone, cpu),
2683 (zone->present_pages / percpu_pagelist_fraction));
e7c8d5c9
CL
2684 }
2685
2686 return 0;
2687bad:
2688 for_each_zone(dzone) {
64191688
AM
2689 if (!populated_zone(dzone))
2690 continue;
e7c8d5c9
CL
2691 if (dzone == zone)
2692 break;
23316bc8
NP
2693 kfree(zone_pcp(dzone, cpu));
2694 zone_pcp(dzone, cpu) = NULL;
e7c8d5c9
CL
2695 }
2696 return -ENOMEM;
2697}
2698
2699static inline void free_zone_pagesets(int cpu)
2700{
e7c8d5c9
CL
2701 struct zone *zone;
2702
2703 for_each_zone(zone) {
2704 struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
2705
f3ef9ead
DR
2706 /* Free per_cpu_pageset if it is slab allocated */
2707 if (pset != &boot_pageset[cpu])
2708 kfree(pset);
e7c8d5c9 2709 zone_pcp(zone, cpu) = NULL;
e7c8d5c9 2710 }
e7c8d5c9
CL
2711}
2712
9c7b216d 2713static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
e7c8d5c9
CL
2714 unsigned long action,
2715 void *hcpu)
2716{
2717 int cpu = (long)hcpu;
2718 int ret = NOTIFY_OK;
2719
2720 switch (action) {
ce421c79 2721 case CPU_UP_PREPARE:
8bb78442 2722 case CPU_UP_PREPARE_FROZEN:
ce421c79
AW
2723 if (process_zones(cpu))
2724 ret = NOTIFY_BAD;
2725 break;
2726 case CPU_UP_CANCELED:
8bb78442 2727 case CPU_UP_CANCELED_FROZEN:
ce421c79 2728 case CPU_DEAD:
8bb78442 2729 case CPU_DEAD_FROZEN:
ce421c79
AW
2730 free_zone_pagesets(cpu);
2731 break;
2732 default:
2733 break;
e7c8d5c9
CL
2734 }
2735 return ret;
2736}
2737
74b85f37 2738static struct notifier_block __cpuinitdata pageset_notifier =
e7c8d5c9
CL
2739 { &pageset_cpuup_callback, NULL, 0 };
2740
78d9955b 2741void __init setup_per_cpu_pageset(void)
e7c8d5c9
CL
2742{
2743 int err;
2744
2745 /* Initialize per_cpu_pageset for cpu 0.
2746 * A cpuup callback will do this for every cpu
2747 * as it comes online
2748 */
2749 err = process_zones(smp_processor_id());
2750 BUG_ON(err);
2751 register_cpu_notifier(&pageset_notifier);
2752}
2753
2754#endif
2755
577a32f6 2756static noinline __init_refok
cca448fe 2757int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
ed8ece2e
DH
2758{
2759 int i;
2760 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe 2761 size_t alloc_size;
ed8ece2e
DH
2762
2763 /*
2764 * The per-page waitqueue mechanism uses hashed waitqueues
2765 * per zone.
2766 */
02b694de
YG
2767 zone->wait_table_hash_nr_entries =
2768 wait_table_hash_nr_entries(zone_size_pages);
2769 zone->wait_table_bits =
2770 wait_table_bits(zone->wait_table_hash_nr_entries);
cca448fe
YG
2771 alloc_size = zone->wait_table_hash_nr_entries
2772 * sizeof(wait_queue_head_t);
2773
2774 if (system_state == SYSTEM_BOOTING) {
2775 zone->wait_table = (wait_queue_head_t *)
2776 alloc_bootmem_node(pgdat, alloc_size);
2777 } else {
2778 /*
2779 * This case means that a zone whose size was 0 gets new memory
2780 * via memory hot-add.
2781 * But it may be the case that a new node was hot-added. In
2782 * this case vmalloc() will not be able to use this new node's
2783 * memory - this wait_table must be initialized to use this new
2784 * node itself as well.
2785 * To use this new node's memory, further consideration will be
2786 * necessary.
2787 */
8691f3a7 2788 zone->wait_table = vmalloc(alloc_size);
cca448fe
YG
2789 }
2790 if (!zone->wait_table)
2791 return -ENOMEM;
ed8ece2e 2792
02b694de 2793 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
ed8ece2e 2794 init_waitqueue_head(zone->wait_table + i);
cca448fe
YG
2795
2796 return 0;
ed8ece2e
DH
2797}
2798
c09b4240 2799static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e
DH
2800{
2801 int cpu;
2802 unsigned long batch = zone_batchsize(zone);
2803
2804 for (cpu = 0; cpu < NR_CPUS; cpu++) {
2805#ifdef CONFIG_NUMA
2806 /* Early boot. Slab allocator not functional yet */
23316bc8 2807 zone_pcp(zone, cpu) = &boot_pageset[cpu];
ed8ece2e
DH
2808 setup_pageset(&boot_pageset[cpu],0);
2809#else
2810 setup_pageset(zone_pcp(zone,cpu), batch);
2811#endif
2812 }
f5335c0f
AB
2813 if (zone->present_pages)
2814 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
2815 zone->name, zone->present_pages, batch);
ed8ece2e
DH
2816}
2817
718127cc
YG
2818__meminit int init_currently_empty_zone(struct zone *zone,
2819 unsigned long zone_start_pfn,
a2f3aa02
DH
2820 unsigned long size,
2821 enum memmap_context context)
ed8ece2e
DH
2822{
2823 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe
YG
2824 int ret;
2825 ret = zone_wait_table_init(zone, size);
2826 if (ret)
2827 return ret;
ed8ece2e
DH
2828 pgdat->nr_zones = zone_idx(zone) + 1;
2829
ed8ece2e
DH
2830 zone->zone_start_pfn = zone_start_pfn;
2831
2832 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
2833
1e548deb 2834 zone_init_free_lists(zone);
718127cc
YG
2835
2836 return 0;
ed8ece2e
DH
2837}
2838
c713216d
MG
2839#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2840/*
2841 * Basic iterator support. Return the first range of PFNs for a node
2842 * Note: nid == MAX_NUMNODES returns first region regardless of node
2843 */
a3142c8e 2844static int __meminit first_active_region_index_in_nid(int nid)
c713216d
MG
2845{
2846 int i;
2847
2848 for (i = 0; i < nr_nodemap_entries; i++)
2849 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
2850 return i;
2851
2852 return -1;
2853}
2854
2855/*
2856 * Basic iterator support. Return the next active range of PFNs for a node
183ff22b 2857 * Note: nid == MAX_NUMNODES returns next region regardless of node
c713216d 2858 */
a3142c8e 2859static int __meminit next_active_region_index_in_nid(int index, int nid)
c713216d
MG
2860{
2861 for (index = index + 1; index < nr_nodemap_entries; index++)
2862 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
2863 return index;
2864
2865 return -1;
2866}
2867
2868#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
2869/*
2870 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
2871 * Architectures may implement their own version but if add_active_range()
2872 * was used and there are no special requirements, this is a convenient
2873 * alternative
2874 */
6f076f5d 2875int __meminit early_pfn_to_nid(unsigned long pfn)
c713216d
MG
2876{
2877 int i;
2878
2879 for (i = 0; i < nr_nodemap_entries; i++) {
2880 unsigned long start_pfn = early_node_map[i].start_pfn;
2881 unsigned long end_pfn = early_node_map[i].end_pfn;
2882
2883 if (start_pfn <= pfn && pfn < end_pfn)
2884 return early_node_map[i].nid;
2885 }
2886
2887 return 0;
2888}
2889#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
2890
2891/* Basic iterator support to walk early_node_map[] */
2892#define for_each_active_range_index_in_nid(i, nid) \
2893 for (i = first_active_region_index_in_nid(nid); i != -1; \
2894 i = next_active_region_index_in_nid(i, nid))
2895
2896/**
2897 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
88ca3b94
RD
2898 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
2899 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
c713216d
MG
2900 *
2901 * If an architecture guarantees that all ranges registered with
2902 * add_active_ranges() contain no holes and may be freed, this
2903 * this function may be used instead of calling free_bootmem() manually.
2904 */
2905void __init free_bootmem_with_active_regions(int nid,
2906 unsigned long max_low_pfn)
2907{
2908 int i;
2909
2910 for_each_active_range_index_in_nid(i, nid) {
2911 unsigned long size_pages = 0;
2912 unsigned long end_pfn = early_node_map[i].end_pfn;
2913
2914 if (early_node_map[i].start_pfn >= max_low_pfn)
2915 continue;
2916
2917 if (end_pfn > max_low_pfn)
2918 end_pfn = max_low_pfn;
2919
2920 size_pages = end_pfn - early_node_map[i].start_pfn;
2921 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
2922 PFN_PHYS(early_node_map[i].start_pfn),
2923 size_pages << PAGE_SHIFT);
2924 }
2925}
2926
2927/**
2928 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 2929 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d
MG
2930 *
2931 * If an architecture guarantees that all ranges registered with
2932 * add_active_ranges() contain no holes and may be freed, this
88ca3b94 2933 * function may be used instead of calling memory_present() manually.
c713216d
MG
2934 */
2935void __init sparse_memory_present_with_active_regions(int nid)
2936{
2937 int i;
2938
2939 for_each_active_range_index_in_nid(i, nid)
2940 memory_present(early_node_map[i].nid,
2941 early_node_map[i].start_pfn,
2942 early_node_map[i].end_pfn);
2943}
2944
fb01439c
MG
2945/**
2946 * push_node_boundaries - Push node boundaries to at least the requested boundary
2947 * @nid: The nid of the node to push the boundary for
2948 * @start_pfn: The start pfn of the node
2949 * @end_pfn: The end pfn of the node
2950 *
2951 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd
2952 * time. Specifically, on x86_64, SRAT will report ranges that can potentially
2953 * be hotplugged even though no physical memory exists. This function allows
2954 * an arch to push out the node boundaries so mem_map is allocated that can
2955 * be used later.
2956 */
2957#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
2958void __init push_node_boundaries(unsigned int nid,
2959 unsigned long start_pfn, unsigned long end_pfn)
2960{
2961 printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n",
2962 nid, start_pfn, end_pfn);
2963
2964 /* Initialise the boundary for this node if necessary */
2965 if (node_boundary_end_pfn[nid] == 0)
2966 node_boundary_start_pfn[nid] = -1UL;
2967
2968 /* Update the boundaries */
2969 if (node_boundary_start_pfn[nid] > start_pfn)
2970 node_boundary_start_pfn[nid] = start_pfn;
2971 if (node_boundary_end_pfn[nid] < end_pfn)
2972 node_boundary_end_pfn[nid] = end_pfn;
2973}
2974
2975/* If necessary, push the node boundary out for reserve hotadd */
98011f56 2976static void __meminit account_node_boundary(unsigned int nid,
fb01439c
MG
2977 unsigned long *start_pfn, unsigned long *end_pfn)
2978{
2979 printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
2980 nid, *start_pfn, *end_pfn);
2981
2982 /* Return if boundary information has not been provided */
2983 if (node_boundary_end_pfn[nid] == 0)
2984 return;
2985
2986 /* Check the boundaries and update if necessary */
2987 if (node_boundary_start_pfn[nid] < *start_pfn)
2988 *start_pfn = node_boundary_start_pfn[nid];
2989 if (node_boundary_end_pfn[nid] > *end_pfn)
2990 *end_pfn = node_boundary_end_pfn[nid];
2991}
2992#else
2993void __init push_node_boundaries(unsigned int nid,
2994 unsigned long start_pfn, unsigned long end_pfn) {}
2995
98011f56 2996static void __meminit account_node_boundary(unsigned int nid,
fb01439c
MG
2997 unsigned long *start_pfn, unsigned long *end_pfn) {}
2998#endif
2999
3000
c713216d
MG
3001/**
3002 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
3003 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3004 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3005 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
3006 *
3007 * It returns the start and end page frame of a node based on information
3008 * provided by an arch calling add_active_range(). If called for a node
3009 * with no available memory, a warning is printed and the start and end
88ca3b94 3010 * PFNs will be 0.
c713216d 3011 */
a3142c8e 3012void __meminit get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
3013 unsigned long *start_pfn, unsigned long *end_pfn)
3014{
3015 int i;
3016 *start_pfn = -1UL;
3017 *end_pfn = 0;
3018
3019 for_each_active_range_index_in_nid(i, nid) {
3020 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3021 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3022 }
3023
633c0666 3024 if (*start_pfn == -1UL)
c713216d 3025 *start_pfn = 0;
fb01439c
MG
3026
3027 /* Push the node boundaries out if requested */
3028 account_node_boundary(nid, start_pfn, end_pfn);
c713216d
MG
3029}
3030
2a1e274a
MG
3031/*
3032 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3033 * assumption is made that zones within a node are ordered in monotonic
3034 * increasing memory addresses so that the "highest" populated zone is used
3035 */
3036void __init find_usable_zone_for_movable(void)
3037{
3038 int zone_index;
3039 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3040 if (zone_index == ZONE_MOVABLE)
3041 continue;
3042
3043 if (arch_zone_highest_possible_pfn[zone_index] >
3044 arch_zone_lowest_possible_pfn[zone_index])
3045 break;
3046 }
3047
3048 VM_BUG_ON(zone_index == -1);
3049 movable_zone = zone_index;
3050}
3051
3052/*
3053 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3054 * because it is sized independant of architecture. Unlike the other zones,
3055 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3056 * in each node depending on the size of each node and how evenly kernelcore
3057 * is distributed. This helper function adjusts the zone ranges
3058 * provided by the architecture for a given node by using the end of the
3059 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3060 * zones within a node are in order of monotonic increases memory addresses
3061 */
3062void __meminit adjust_zone_range_for_zone_movable(int nid,
3063 unsigned long zone_type,
3064 unsigned long node_start_pfn,
3065 unsigned long node_end_pfn,
3066 unsigned long *zone_start_pfn,
3067 unsigned long *zone_end_pfn)
3068{
3069 /* Only adjust if ZONE_MOVABLE is on this node */
3070 if (zone_movable_pfn[nid]) {
3071 /* Size ZONE_MOVABLE */
3072 if (zone_type == ZONE_MOVABLE) {
3073 *zone_start_pfn = zone_movable_pfn[nid];
3074 *zone_end_pfn = min(node_end_pfn,
3075 arch_zone_highest_possible_pfn[movable_zone]);
3076
3077 /* Adjust for ZONE_MOVABLE starting within this range */
3078 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3079 *zone_end_pfn > zone_movable_pfn[nid]) {
3080 *zone_end_pfn = zone_movable_pfn[nid];
3081
3082 /* Check if this whole range is within ZONE_MOVABLE */
3083 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3084 *zone_start_pfn = *zone_end_pfn;
3085 }
3086}
3087
c713216d
MG
3088/*
3089 * Return the number of pages a zone spans in a node, including holes
3090 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3091 */
6ea6e688 3092static unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
3093 unsigned long zone_type,
3094 unsigned long *ignored)
3095{
3096 unsigned long node_start_pfn, node_end_pfn;
3097 unsigned long zone_start_pfn, zone_end_pfn;
3098
3099 /* Get the start and end of the node and zone */
3100 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3101 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3102 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2a1e274a
MG
3103 adjust_zone_range_for_zone_movable(nid, zone_type,
3104 node_start_pfn, node_end_pfn,
3105 &zone_start_pfn, &zone_end_pfn);
c713216d
MG
3106
3107 /* Check that this node has pages within the zone's required range */
3108 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3109 return 0;
3110
3111 /* Move the zone boundaries inside the node if necessary */
3112 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3113 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3114
3115 /* Return the spanned pages */
3116 return zone_end_pfn - zone_start_pfn;
3117}
3118
3119/*
3120 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 3121 * then all holes in the requested range will be accounted for.
c713216d 3122 */
a3142c8e 3123unsigned long __meminit __absent_pages_in_range(int nid,
c713216d
MG
3124 unsigned long range_start_pfn,
3125 unsigned long range_end_pfn)
3126{
3127 int i = 0;
3128 unsigned long prev_end_pfn = 0, hole_pages = 0;
3129 unsigned long start_pfn;
3130
3131 /* Find the end_pfn of the first active range of pfns in the node */
3132 i = first_active_region_index_in_nid(nid);
3133 if (i == -1)
3134 return 0;
3135
b5445f95
MG
3136 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3137
9c7cd687
MG
3138 /* Account for ranges before physical memory on this node */
3139 if (early_node_map[i].start_pfn > range_start_pfn)
b5445f95 3140 hole_pages = prev_end_pfn - range_start_pfn;
c713216d
MG
3141
3142 /* Find all holes for the zone within the node */
3143 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3144
3145 /* No need to continue if prev_end_pfn is outside the zone */
3146 if (prev_end_pfn >= range_end_pfn)
3147 break;
3148
3149 /* Make sure the end of the zone is not within the hole */
3150 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3151 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3152
3153 /* Update the hole size cound and move on */
3154 if (start_pfn > range_start_pfn) {
3155 BUG_ON(prev_end_pfn > start_pfn);
3156 hole_pages += start_pfn - prev_end_pfn;
3157 }
3158 prev_end_pfn = early_node_map[i].end_pfn;
3159 }
3160
9c7cd687
MG
3161 /* Account for ranges past physical memory on this node */
3162 if (range_end_pfn > prev_end_pfn)
0c6cb974 3163 hole_pages += range_end_pfn -
9c7cd687
MG
3164 max(range_start_pfn, prev_end_pfn);
3165
c713216d
MG
3166 return hole_pages;
3167}
3168
3169/**
3170 * absent_pages_in_range - Return number of page frames in holes within a range
3171 * @start_pfn: The start PFN to start searching for holes
3172 * @end_pfn: The end PFN to stop searching for holes
3173 *
88ca3b94 3174 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
3175 */
3176unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3177 unsigned long end_pfn)
3178{
3179 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3180}
3181
3182/* Return the number of page frames in holes in a zone on a node */
6ea6e688 3183static unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
3184 unsigned long zone_type,
3185 unsigned long *ignored)
3186{
9c7cd687
MG
3187 unsigned long node_start_pfn, node_end_pfn;
3188 unsigned long zone_start_pfn, zone_end_pfn;
3189
3190 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3191 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3192 node_start_pfn);
3193 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3194 node_end_pfn);
3195
2a1e274a
MG
3196 adjust_zone_range_for_zone_movable(nid, zone_type,
3197 node_start_pfn, node_end_pfn,
3198 &zone_start_pfn, &zone_end_pfn);
9c7cd687 3199 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
c713216d 3200}
0e0b864e 3201
c713216d 3202#else
6ea6e688 3203static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
3204 unsigned long zone_type,
3205 unsigned long *zones_size)
3206{
3207 return zones_size[zone_type];
3208}
3209
6ea6e688 3210static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
3211 unsigned long zone_type,
3212 unsigned long *zholes_size)
3213{
3214 if (!zholes_size)
3215 return 0;
3216
3217 return zholes_size[zone_type];
3218}
0e0b864e 3219
c713216d
MG
3220#endif
3221
a3142c8e 3222static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
c713216d
MG
3223 unsigned long *zones_size, unsigned long *zholes_size)
3224{
3225 unsigned long realtotalpages, totalpages = 0;
3226 enum zone_type i;
3227
3228 for (i = 0; i < MAX_NR_ZONES; i++)
3229 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3230 zones_size);
3231 pgdat->node_spanned_pages = totalpages;
3232
3233 realtotalpages = totalpages;
3234 for (i = 0; i < MAX_NR_ZONES; i++)
3235 realtotalpages -=
3236 zone_absent_pages_in_node(pgdat->node_id, i,
3237 zholes_size);
3238 pgdat->node_present_pages = realtotalpages;
3239 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3240 realtotalpages);
3241}
3242
835c134e
MG
3243#ifndef CONFIG_SPARSEMEM
3244/*
3245 * Calculate the size of the zone->blockflags rounded to an unsigned long
d9c23400
MG
3246 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3247 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
835c134e
MG
3248 * round what is now in bits to nearest long in bits, then return it in
3249 * bytes.
3250 */
3251static unsigned long __init usemap_size(unsigned long zonesize)
3252{
3253 unsigned long usemapsize;
3254
d9c23400
MG
3255 usemapsize = roundup(zonesize, pageblock_nr_pages);
3256 usemapsize = usemapsize >> pageblock_order;
835c134e
MG
3257 usemapsize *= NR_PAGEBLOCK_BITS;
3258 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3259
3260 return usemapsize / 8;
3261}
3262
3263static void __init setup_usemap(struct pglist_data *pgdat,
3264 struct zone *zone, unsigned long zonesize)
3265{
3266 unsigned long usemapsize = usemap_size(zonesize);
3267 zone->pageblock_flags = NULL;
3268 if (usemapsize) {
3269 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3270 memset(zone->pageblock_flags, 0, usemapsize);
3271 }
3272}
3273#else
3274static void inline setup_usemap(struct pglist_data *pgdat,
3275 struct zone *zone, unsigned long zonesize) {}
3276#endif /* CONFIG_SPARSEMEM */
3277
d9c23400 3278#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
ba72cb8c
MG
3279
3280/* Return a sensible default order for the pageblock size. */
3281static inline int pageblock_default_order(void)
3282{
3283 if (HPAGE_SHIFT > PAGE_SHIFT)
3284 return HUGETLB_PAGE_ORDER;
3285
3286 return MAX_ORDER-1;
3287}
3288
d9c23400
MG
3289/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3290static inline void __init set_pageblock_order(unsigned int order)
3291{
3292 /* Check that pageblock_nr_pages has not already been setup */
3293 if (pageblock_order)
3294 return;
3295
3296 /*
3297 * Assume the largest contiguous order of interest is a huge page.
3298 * This value may be variable depending on boot parameters on IA64
3299 */
3300 pageblock_order = order;
3301}
3302#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3303
ba72cb8c
MG
3304/*
3305 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3306 * and pageblock_default_order() are unused as pageblock_order is set
3307 * at compile-time. See include/linux/pageblock-flags.h for the values of
3308 * pageblock_order based on the kernel config
3309 */
3310static inline int pageblock_default_order(unsigned int order)
3311{
3312 return MAX_ORDER-1;
3313}
d9c23400
MG
3314#define set_pageblock_order(x) do {} while (0)
3315
3316#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3317
1da177e4
LT
3318/*
3319 * Set up the zone data structures:
3320 * - mark all pages reserved
3321 * - mark all memory queues empty
3322 * - clear the memory bitmaps
3323 */
b5a0e011 3324static void __paginginit free_area_init_core(struct pglist_data *pgdat,
1da177e4
LT
3325 unsigned long *zones_size, unsigned long *zholes_size)
3326{
2f1b6248 3327 enum zone_type j;
ed8ece2e 3328 int nid = pgdat->node_id;
1da177e4 3329 unsigned long zone_start_pfn = pgdat->node_start_pfn;
718127cc 3330 int ret;
1da177e4 3331
208d54e5 3332 pgdat_resize_init(pgdat);
1da177e4
LT
3333 pgdat->nr_zones = 0;
3334 init_waitqueue_head(&pgdat->kswapd_wait);
3335 pgdat->kswapd_max_order = 0;
3336
3337 for (j = 0; j < MAX_NR_ZONES; j++) {
3338 struct zone *zone = pgdat->node_zones + j;
0e0b864e 3339 unsigned long size, realsize, memmap_pages;
1da177e4 3340
c713216d
MG
3341 size = zone_spanned_pages_in_node(nid, j, zones_size);
3342 realsize = size - zone_absent_pages_in_node(nid, j,
3343 zholes_size);
1da177e4 3344
0e0b864e
MG
3345 /*
3346 * Adjust realsize so that it accounts for how much memory
3347 * is used by this zone for memmap. This affects the watermark
3348 * and per-cpu initialisations
3349 */
3350 memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
3351 if (realsize >= memmap_pages) {
3352 realsize -= memmap_pages;
3353 printk(KERN_DEBUG
3354 " %s zone: %lu pages used for memmap\n",
3355 zone_names[j], memmap_pages);
3356 } else
3357 printk(KERN_WARNING
3358 " %s zone: %lu pages exceeds realsize %lu\n",
3359 zone_names[j], memmap_pages, realsize);
3360
6267276f
CL
3361 /* Account for reserved pages */
3362 if (j == 0 && realsize > dma_reserve) {
0e0b864e 3363 realsize -= dma_reserve;
6267276f
CL
3364 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
3365 zone_names[0], dma_reserve);
0e0b864e
MG
3366 }
3367
98d2b0eb 3368 if (!is_highmem_idx(j))
1da177e4
LT
3369 nr_kernel_pages += realsize;
3370 nr_all_pages += realsize;
3371
3372 zone->spanned_pages = size;
3373 zone->present_pages = realsize;
9614634f 3374#ifdef CONFIG_NUMA
d5f541ed 3375 zone->node = nid;
8417bba4 3376 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
9614634f 3377 / 100;
0ff38490 3378 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
9614634f 3379#endif
1da177e4
LT
3380 zone->name = zone_names[j];
3381 spin_lock_init(&zone->lock);
3382 spin_lock_init(&zone->lru_lock);
bdc8cb98 3383 zone_seqlock_init(zone);
1da177e4 3384 zone->zone_pgdat = pgdat;
1da177e4 3385
3bb1a852 3386 zone->prev_priority = DEF_PRIORITY;
1da177e4 3387
ed8ece2e 3388 zone_pcp_init(zone);
1da177e4
LT
3389 INIT_LIST_HEAD(&zone->active_list);
3390 INIT_LIST_HEAD(&zone->inactive_list);
3391 zone->nr_scan_active = 0;
3392 zone->nr_scan_inactive = 0;
2244b95a 3393 zap_zone_vm_stats(zone);
e815af95 3394 zone->flags = 0;
1da177e4
LT
3395 if (!size)
3396 continue;
3397
ba72cb8c 3398 set_pageblock_order(pageblock_default_order());
835c134e 3399 setup_usemap(pgdat, zone, size);
a2f3aa02
DH
3400 ret = init_currently_empty_zone(zone, zone_start_pfn,
3401 size, MEMMAP_EARLY);
718127cc 3402 BUG_ON(ret);
1da177e4 3403 zone_start_pfn += size;
1da177e4
LT
3404 }
3405}
3406
577a32f6 3407static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 3408{
1da177e4
LT
3409 /* Skip empty nodes */
3410 if (!pgdat->node_spanned_pages)
3411 return;
3412
d41dee36 3413#ifdef CONFIG_FLAT_NODE_MEM_MAP
1da177e4
LT
3414 /* ia64 gets its own node_mem_map, before this, without bootmem */
3415 if (!pgdat->node_mem_map) {
e984bb43 3416 unsigned long size, start, end;
d41dee36
AW
3417 struct page *map;
3418
e984bb43
BP
3419 /*
3420 * The zone's endpoints aren't required to be MAX_ORDER
3421 * aligned but the node_mem_map endpoints must be in order
3422 * for the buddy allocator to function correctly.
3423 */
3424 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3425 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3426 end = ALIGN(end, MAX_ORDER_NR_PAGES);
3427 size = (end - start) * sizeof(struct page);
6f167ec7
DH
3428 map = alloc_remap(pgdat->node_id, size);
3429 if (!map)
3430 map = alloc_bootmem_node(pgdat, size);
e984bb43 3431 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
1da177e4 3432 }
12d810c1 3433#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
3434 /*
3435 * With no DISCONTIG, the global mem_map is just set as node 0's
3436 */
c713216d 3437 if (pgdat == NODE_DATA(0)) {
1da177e4 3438 mem_map = NODE_DATA(0)->node_mem_map;
c713216d
MG
3439#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3440 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
467bc461 3441 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
c713216d
MG
3442#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3443 }
1da177e4 3444#endif
d41dee36 3445#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
3446}
3447
b5a0e011 3448void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat,
1da177e4
LT
3449 unsigned long *zones_size, unsigned long node_start_pfn,
3450 unsigned long *zholes_size)
3451{
3452 pgdat->node_id = nid;
3453 pgdat->node_start_pfn = node_start_pfn;
c713216d 3454 calculate_node_totalpages(pgdat, zones_size, zholes_size);
1da177e4
LT
3455
3456 alloc_node_mem_map(pgdat);
3457
3458 free_area_init_core(pgdat, zones_size, zholes_size);
3459}
3460
c713216d 3461#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
418508c1
MS
3462
3463#if MAX_NUMNODES > 1
3464/*
3465 * Figure out the number of possible node ids.
3466 */
3467static void __init setup_nr_node_ids(void)
3468{
3469 unsigned int node;
3470 unsigned int highest = 0;
3471
3472 for_each_node_mask(node, node_possible_map)
3473 highest = node;
3474 nr_node_ids = highest + 1;
3475}
3476#else
3477static inline void setup_nr_node_ids(void)
3478{
3479}
3480#endif
3481
c713216d
MG
3482/**
3483 * add_active_range - Register a range of PFNs backed by physical memory
3484 * @nid: The node ID the range resides on
3485 * @start_pfn: The start PFN of the available physical memory
3486 * @end_pfn: The end PFN of the available physical memory
3487 *
3488 * These ranges are stored in an early_node_map[] and later used by
3489 * free_area_init_nodes() to calculate zone sizes and holes. If the
3490 * range spans a memory hole, it is up to the architecture to ensure
3491 * the memory is not freed by the bootmem allocator. If possible
3492 * the range being registered will be merged with existing ranges.
3493 */
3494void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3495 unsigned long end_pfn)
3496{
3497 int i;
3498
3499 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) "
3500 "%d entries of %d used\n",
3501 nid, start_pfn, end_pfn,
3502 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3503
3504 /* Merge with existing active regions if possible */
3505 for (i = 0; i < nr_nodemap_entries; i++) {
3506 if (early_node_map[i].nid != nid)
3507 continue;
3508
3509 /* Skip if an existing region covers this new one */
3510 if (start_pfn >= early_node_map[i].start_pfn &&
3511 end_pfn <= early_node_map[i].end_pfn)
3512 return;
3513
3514 /* Merge forward if suitable */
3515 if (start_pfn <= early_node_map[i].end_pfn &&
3516 end_pfn > early_node_map[i].end_pfn) {
3517 early_node_map[i].end_pfn = end_pfn;
3518 return;
3519 }
3520
3521 /* Merge backward if suitable */
3522 if (start_pfn < early_node_map[i].end_pfn &&
3523 end_pfn >= early_node_map[i].start_pfn) {
3524 early_node_map[i].start_pfn = start_pfn;
3525 return;
3526 }
3527 }
3528
3529 /* Check that early_node_map is large enough */
3530 if (i >= MAX_ACTIVE_REGIONS) {
3531 printk(KERN_CRIT "More than %d memory regions, truncating\n",
3532 MAX_ACTIVE_REGIONS);
3533 return;
3534 }
3535
3536 early_node_map[i].nid = nid;
3537 early_node_map[i].start_pfn = start_pfn;
3538 early_node_map[i].end_pfn = end_pfn;
3539 nr_nodemap_entries = i + 1;
3540}
3541
3542/**
3543 * shrink_active_range - Shrink an existing registered range of PFNs
3544 * @nid: The node id the range is on that should be shrunk
3545 * @old_end_pfn: The old end PFN of the range
3546 * @new_end_pfn: The new PFN of the range
3547 *
3548 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3549 * The map is kept at the end physical page range that has already been
3550 * registered with add_active_range(). This function allows an arch to shrink
3551 * an existing registered range.
3552 */
3553void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
3554 unsigned long new_end_pfn)
3555{
3556 int i;
3557
3558 /* Find the old active region end and shrink */
3559 for_each_active_range_index_in_nid(i, nid)
3560 if (early_node_map[i].end_pfn == old_end_pfn) {
3561 early_node_map[i].end_pfn = new_end_pfn;
3562 break;
3563 }
3564}
3565
3566/**
3567 * remove_all_active_ranges - Remove all currently registered regions
88ca3b94 3568 *
c713216d
MG
3569 * During discovery, it may be found that a table like SRAT is invalid
3570 * and an alternative discovery method must be used. This function removes
3571 * all currently registered regions.
3572 */
88ca3b94 3573void __init remove_all_active_ranges(void)
c713216d
MG
3574{
3575 memset(early_node_map, 0, sizeof(early_node_map));
3576 nr_nodemap_entries = 0;
fb01439c
MG
3577#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
3578 memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
3579 memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
3580#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
c713216d
MG
3581}
3582
3583/* Compare two active node_active_regions */
3584static int __init cmp_node_active_region(const void *a, const void *b)
3585{
3586 struct node_active_region *arange = (struct node_active_region *)a;
3587 struct node_active_region *brange = (struct node_active_region *)b;
3588
3589 /* Done this way to avoid overflows */
3590 if (arange->start_pfn > brange->start_pfn)
3591 return 1;
3592 if (arange->start_pfn < brange->start_pfn)
3593 return -1;
3594
3595 return 0;
3596}
3597
3598/* sort the node_map by start_pfn */
3599static void __init sort_node_map(void)
3600{
3601 sort(early_node_map, (size_t)nr_nodemap_entries,
3602 sizeof(struct node_active_region),
3603 cmp_node_active_region, NULL);
3604}
3605
a6af2bc3 3606/* Find the lowest pfn for a node */
c713216d
MG
3607unsigned long __init find_min_pfn_for_node(unsigned long nid)
3608{
3609 int i;
a6af2bc3 3610 unsigned long min_pfn = ULONG_MAX;
1abbfb41 3611
c713216d
MG
3612 /* Assuming a sorted map, the first range found has the starting pfn */
3613 for_each_active_range_index_in_nid(i, nid)
a6af2bc3 3614 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
c713216d 3615
a6af2bc3
MG
3616 if (min_pfn == ULONG_MAX) {
3617 printk(KERN_WARNING
3618 "Could not find start_pfn for node %lu\n", nid);
3619 return 0;
3620 }
3621
3622 return min_pfn;
c713216d
MG
3623}
3624
3625/**
3626 * find_min_pfn_with_active_regions - Find the minimum PFN registered
3627 *
3628 * It returns the minimum PFN based on information provided via
88ca3b94 3629 * add_active_range().
c713216d
MG
3630 */
3631unsigned long __init find_min_pfn_with_active_regions(void)
3632{
3633 return find_min_pfn_for_node(MAX_NUMNODES);
3634}
3635
3636/**
3637 * find_max_pfn_with_active_regions - Find the maximum PFN registered
3638 *
3639 * It returns the maximum PFN based on information provided via
88ca3b94 3640 * add_active_range().
c713216d
MG
3641 */
3642unsigned long __init find_max_pfn_with_active_regions(void)
3643{
3644 int i;
3645 unsigned long max_pfn = 0;
3646
3647 for (i = 0; i < nr_nodemap_entries; i++)
3648 max_pfn = max(max_pfn, early_node_map[i].end_pfn);
3649
3650 return max_pfn;
3651}
3652
37b07e41
LS
3653/*
3654 * early_calculate_totalpages()
3655 * Sum pages in active regions for movable zone.
3656 * Populate N_HIGH_MEMORY for calculating usable_nodes.
3657 */
484f51f8 3658static unsigned long __init early_calculate_totalpages(void)
7e63efef
MG
3659{
3660 int i;
3661 unsigned long totalpages = 0;
3662
37b07e41
LS
3663 for (i = 0; i < nr_nodemap_entries; i++) {
3664 unsigned long pages = early_node_map[i].end_pfn -
7e63efef 3665 early_node_map[i].start_pfn;
37b07e41
LS
3666 totalpages += pages;
3667 if (pages)
3668 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
3669 }
3670 return totalpages;
7e63efef
MG
3671}
3672
2a1e274a
MG
3673/*
3674 * Find the PFN the Movable zone begins in each node. Kernel memory
3675 * is spread evenly between nodes as long as the nodes have enough
3676 * memory. When they don't, some nodes will have more kernelcore than
3677 * others
3678 */
3679void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3680{
3681 int i, nid;
3682 unsigned long usable_startpfn;
3683 unsigned long kernelcore_node, kernelcore_remaining;
37b07e41
LS
3684 unsigned long totalpages = early_calculate_totalpages();
3685 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
2a1e274a 3686
7e63efef
MG
3687 /*
3688 * If movablecore was specified, calculate what size of
3689 * kernelcore that corresponds so that memory usable for
3690 * any allocation type is evenly spread. If both kernelcore
3691 * and movablecore are specified, then the value of kernelcore
3692 * will be used for required_kernelcore if it's greater than
3693 * what movablecore would have allowed.
3694 */
3695 if (required_movablecore) {
7e63efef
MG
3696 unsigned long corepages;
3697
3698 /*
3699 * Round-up so that ZONE_MOVABLE is at least as large as what
3700 * was requested by the user
3701 */
3702 required_movablecore =
3703 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
3704 corepages = totalpages - required_movablecore;
3705
3706 required_kernelcore = max(required_kernelcore, corepages);
3707 }
3708
2a1e274a
MG
3709 /* If kernelcore was not specified, there is no ZONE_MOVABLE */
3710 if (!required_kernelcore)
3711 return;
3712
3713 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
3714 find_usable_zone_for_movable();
3715 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
3716
3717restart:
3718 /* Spread kernelcore memory as evenly as possible throughout nodes */
3719 kernelcore_node = required_kernelcore / usable_nodes;
37b07e41 3720 for_each_node_state(nid, N_HIGH_MEMORY) {
2a1e274a
MG
3721 /*
3722 * Recalculate kernelcore_node if the division per node
3723 * now exceeds what is necessary to satisfy the requested
3724 * amount of memory for the kernel
3725 */
3726 if (required_kernelcore < kernelcore_node)
3727 kernelcore_node = required_kernelcore / usable_nodes;
3728
3729 /*
3730 * As the map is walked, we track how much memory is usable
3731 * by the kernel using kernelcore_remaining. When it is
3732 * 0, the rest of the node is usable by ZONE_MOVABLE
3733 */
3734 kernelcore_remaining = kernelcore_node;
3735
3736 /* Go through each range of PFNs within this node */
3737 for_each_active_range_index_in_nid(i, nid) {
3738 unsigned long start_pfn, end_pfn;
3739 unsigned long size_pages;
3740
3741 start_pfn = max(early_node_map[i].start_pfn,
3742 zone_movable_pfn[nid]);
3743 end_pfn = early_node_map[i].end_pfn;
3744 if (start_pfn >= end_pfn)
3745 continue;
3746
3747 /* Account for what is only usable for kernelcore */
3748 if (start_pfn < usable_startpfn) {
3749 unsigned long kernel_pages;
3750 kernel_pages = min(end_pfn, usable_startpfn)
3751 - start_pfn;
3752
3753 kernelcore_remaining -= min(kernel_pages,
3754 kernelcore_remaining);
3755 required_kernelcore -= min(kernel_pages,
3756 required_kernelcore);
3757
3758 /* Continue if range is now fully accounted */
3759 if (end_pfn <= usable_startpfn) {
3760
3761 /*
3762 * Push zone_movable_pfn to the end so
3763 * that if we have to rebalance
3764 * kernelcore across nodes, we will
3765 * not double account here
3766 */
3767 zone_movable_pfn[nid] = end_pfn;
3768 continue;
3769 }
3770 start_pfn = usable_startpfn;
3771 }
3772
3773 /*
3774 * The usable PFN range for ZONE_MOVABLE is from
3775 * start_pfn->end_pfn. Calculate size_pages as the
3776 * number of pages used as kernelcore
3777 */
3778 size_pages = end_pfn - start_pfn;
3779 if (size_pages > kernelcore_remaining)
3780 size_pages = kernelcore_remaining;
3781 zone_movable_pfn[nid] = start_pfn + size_pages;
3782
3783 /*
3784 * Some kernelcore has been met, update counts and
3785 * break if the kernelcore for this node has been
3786 * satisified
3787 */
3788 required_kernelcore -= min(required_kernelcore,
3789 size_pages);
3790 kernelcore_remaining -= size_pages;
3791 if (!kernelcore_remaining)
3792 break;
3793 }
3794 }
3795
3796 /*
3797 * If there is still required_kernelcore, we do another pass with one
3798 * less node in the count. This will push zone_movable_pfn[nid] further
3799 * along on the nodes that still have memory until kernelcore is
3800 * satisified
3801 */
3802 usable_nodes--;
3803 if (usable_nodes && required_kernelcore > usable_nodes)
3804 goto restart;
3805
3806 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
3807 for (nid = 0; nid < MAX_NUMNODES; nid++)
3808 zone_movable_pfn[nid] =
3809 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
3810}
3811
37b07e41
LS
3812/* Any regular memory on that node ? */
3813static void check_for_regular_memory(pg_data_t *pgdat)
3814{
3815#ifdef CONFIG_HIGHMEM
3816 enum zone_type zone_type;
3817
3818 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
3819 struct zone *zone = &pgdat->node_zones[zone_type];
3820 if (zone->present_pages)
3821 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
3822 }
3823#endif
3824}
3825
c713216d
MG
3826/**
3827 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 3828 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
3829 *
3830 * This will call free_area_init_node() for each active node in the system.
3831 * Using the page ranges provided by add_active_range(), the size of each
3832 * zone in each node and their holes is calculated. If the maximum PFN
3833 * between two adjacent zones match, it is assumed that the zone is empty.
3834 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
3835 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
3836 * starts where the previous one ended. For example, ZONE_DMA32 starts
3837 * at arch_max_dma_pfn.
3838 */
3839void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3840{
3841 unsigned long nid;
3842 enum zone_type i;
3843
a6af2bc3
MG
3844 /* Sort early_node_map as initialisation assumes it is sorted */
3845 sort_node_map();
3846
c713216d
MG
3847 /* Record where the zone boundaries are */
3848 memset(arch_zone_lowest_possible_pfn, 0,
3849 sizeof(arch_zone_lowest_possible_pfn));
3850 memset(arch_zone_highest_possible_pfn, 0,
3851 sizeof(arch_zone_highest_possible_pfn));
3852 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
3853 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
3854 for (i = 1; i < MAX_NR_ZONES; i++) {
2a1e274a
MG
3855 if (i == ZONE_MOVABLE)
3856 continue;
c713216d
MG
3857 arch_zone_lowest_possible_pfn[i] =
3858 arch_zone_highest_possible_pfn[i-1];
3859 arch_zone_highest_possible_pfn[i] =
3860 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
3861 }
2a1e274a
MG
3862 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
3863 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
3864
3865 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
3866 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
3867 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
c713216d 3868
c713216d
MG
3869 /* Print out the zone ranges */
3870 printk("Zone PFN ranges:\n");
2a1e274a
MG
3871 for (i = 0; i < MAX_NR_ZONES; i++) {
3872 if (i == ZONE_MOVABLE)
3873 continue;
c713216d
MG
3874 printk(" %-8s %8lu -> %8lu\n",
3875 zone_names[i],
3876 arch_zone_lowest_possible_pfn[i],
3877 arch_zone_highest_possible_pfn[i]);
2a1e274a
MG
3878 }
3879
3880 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
3881 printk("Movable zone start PFN for each node\n");
3882 for (i = 0; i < MAX_NUMNODES; i++) {
3883 if (zone_movable_pfn[i])
3884 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
3885 }
c713216d
MG
3886
3887 /* Print out the early_node_map[] */
3888 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
3889 for (i = 0; i < nr_nodemap_entries; i++)
3890 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid,
3891 early_node_map[i].start_pfn,
3892 early_node_map[i].end_pfn);
3893
3894 /* Initialise every node */
8ef82866 3895 setup_nr_node_ids();
c713216d
MG
3896 for_each_online_node(nid) {
3897 pg_data_t *pgdat = NODE_DATA(nid);
3898 free_area_init_node(nid, pgdat, NULL,
3899 find_min_pfn_for_node(nid), NULL);
37b07e41
LS
3900
3901 /* Any memory on that node */
3902 if (pgdat->node_present_pages)
3903 node_set_state(nid, N_HIGH_MEMORY);
3904 check_for_regular_memory(pgdat);
c713216d
MG
3905 }
3906}
2a1e274a 3907
7e63efef 3908static int __init cmdline_parse_core(char *p, unsigned long *core)
2a1e274a
MG
3909{
3910 unsigned long long coremem;
3911 if (!p)
3912 return -EINVAL;
3913
3914 coremem = memparse(p, &p);
7e63efef 3915 *core = coremem >> PAGE_SHIFT;
2a1e274a 3916
7e63efef 3917 /* Paranoid check that UL is enough for the coremem value */
2a1e274a
MG
3918 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
3919
3920 return 0;
3921}
ed7ed365 3922
7e63efef
MG
3923/*
3924 * kernelcore=size sets the amount of memory for use for allocations that
3925 * cannot be reclaimed or migrated.
3926 */
3927static int __init cmdline_parse_kernelcore(char *p)
3928{
3929 return cmdline_parse_core(p, &required_kernelcore);
3930}
3931
3932/*
3933 * movablecore=size sets the amount of memory for use for allocations that
3934 * can be reclaimed or migrated.
3935 */
3936static int __init cmdline_parse_movablecore(char *p)
3937{
3938 return cmdline_parse_core(p, &required_movablecore);
3939}
3940
ed7ed365 3941early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 3942early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 3943
c713216d
MG
3944#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3945
0e0b864e 3946/**
88ca3b94
RD
3947 * set_dma_reserve - set the specified number of pages reserved in the first zone
3948 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e
MG
3949 *
3950 * The per-cpu batchsize and zone watermarks are determined by present_pages.
3951 * In the DMA zone, a significant percentage may be consumed by kernel image
3952 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
3953 * function may optionally be used to account for unfreeable pages in the
3954 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
3955 * smaller per-cpu batchsize.
0e0b864e
MG
3956 */
3957void __init set_dma_reserve(unsigned long new_dma_reserve)
3958{
3959 dma_reserve = new_dma_reserve;
3960}
3961
93b7504e 3962#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
3963static bootmem_data_t contig_bootmem_data;
3964struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
3965
3966EXPORT_SYMBOL(contig_page_data);
93b7504e 3967#endif
1da177e4
LT
3968
3969void __init free_area_init(unsigned long *zones_size)
3970{
93b7504e 3971 free_area_init_node(0, NODE_DATA(0), zones_size,
1da177e4
LT
3972 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
3973}
1da177e4 3974
1da177e4
LT
3975static int page_alloc_cpu_notify(struct notifier_block *self,
3976 unsigned long action, void *hcpu)
3977{
3978 int cpu = (unsigned long)hcpu;
1da177e4 3979
8bb78442 3980 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
9f8f2172
CL
3981 drain_pages(cpu);
3982
3983 /*
3984 * Spill the event counters of the dead processor
3985 * into the current processors event counters.
3986 * This artificially elevates the count of the current
3987 * processor.
3988 */
f8891e5e 3989 vm_events_fold_cpu(cpu);
9f8f2172
CL
3990
3991 /*
3992 * Zero the differential counters of the dead processor
3993 * so that the vm statistics are consistent.
3994 *
3995 * This is only okay since the processor is dead and cannot
3996 * race with what we are doing.
3997 */
2244b95a 3998 refresh_cpu_vm_stats(cpu);
1da177e4
LT
3999 }
4000 return NOTIFY_OK;
4001}
1da177e4
LT
4002
4003void __init page_alloc_init(void)
4004{
4005 hotcpu_notifier(page_alloc_cpu_notify, 0);
4006}
4007
cb45b0e9
HA
4008/*
4009 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4010 * or min_free_kbytes changes.
4011 */
4012static void calculate_totalreserve_pages(void)
4013{
4014 struct pglist_data *pgdat;
4015 unsigned long reserve_pages = 0;
2f6726e5 4016 enum zone_type i, j;
cb45b0e9
HA
4017
4018 for_each_online_pgdat(pgdat) {
4019 for (i = 0; i < MAX_NR_ZONES; i++) {
4020 struct zone *zone = pgdat->node_zones + i;
4021 unsigned long max = 0;
4022
4023 /* Find valid and maximum lowmem_reserve in the zone */
4024 for (j = i; j < MAX_NR_ZONES; j++) {
4025 if (zone->lowmem_reserve[j] > max)
4026 max = zone->lowmem_reserve[j];
4027 }
4028
4029 /* we treat pages_high as reserved pages. */
4030 max += zone->pages_high;
4031
4032 if (max > zone->present_pages)
4033 max = zone->present_pages;
4034 reserve_pages += max;
4035 }
4036 }
4037 totalreserve_pages = reserve_pages;
4038}
4039
1da177e4
LT
4040/*
4041 * setup_per_zone_lowmem_reserve - called whenever
4042 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
4043 * has a correct pages reserved value, so an adequate number of
4044 * pages are left in the zone after a successful __alloc_pages().
4045 */
4046static void setup_per_zone_lowmem_reserve(void)
4047{
4048 struct pglist_data *pgdat;
2f6726e5 4049 enum zone_type j, idx;
1da177e4 4050
ec936fc5 4051 for_each_online_pgdat(pgdat) {
1da177e4
LT
4052 for (j = 0; j < MAX_NR_ZONES; j++) {
4053 struct zone *zone = pgdat->node_zones + j;
4054 unsigned long present_pages = zone->present_pages;
4055
4056 zone->lowmem_reserve[j] = 0;
4057
2f6726e5
CL
4058 idx = j;
4059 while (idx) {
1da177e4
LT
4060 struct zone *lower_zone;
4061
2f6726e5
CL
4062 idx--;
4063
1da177e4
LT
4064 if (sysctl_lowmem_reserve_ratio[idx] < 1)
4065 sysctl_lowmem_reserve_ratio[idx] = 1;
4066
4067 lower_zone = pgdat->node_zones + idx;
4068 lower_zone->lowmem_reserve[j] = present_pages /
4069 sysctl_lowmem_reserve_ratio[idx];
4070 present_pages += lower_zone->present_pages;
4071 }
4072 }
4073 }
cb45b0e9
HA
4074
4075 /* update totalreserve_pages */
4076 calculate_totalreserve_pages();
1da177e4
LT
4077}
4078
88ca3b94
RD
4079/**
4080 * setup_per_zone_pages_min - called when min_free_kbytes changes.
4081 *
4082 * Ensures that the pages_{min,low,high} values for each zone are set correctly
4083 * with respect to min_free_kbytes.
1da177e4 4084 */
3947be19 4085void setup_per_zone_pages_min(void)
1da177e4
LT
4086{
4087 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4088 unsigned long lowmem_pages = 0;
4089 struct zone *zone;
4090 unsigned long flags;
4091
4092 /* Calculate total number of !ZONE_HIGHMEM pages */
4093 for_each_zone(zone) {
4094 if (!is_highmem(zone))
4095 lowmem_pages += zone->present_pages;
4096 }
4097
4098 for_each_zone(zone) {
ac924c60
AM
4099 u64 tmp;
4100
1da177e4 4101 spin_lock_irqsave(&zone->lru_lock, flags);
ac924c60
AM
4102 tmp = (u64)pages_min * zone->present_pages;
4103 do_div(tmp, lowmem_pages);
1da177e4
LT
4104 if (is_highmem(zone)) {
4105 /*
669ed175
NP
4106 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4107 * need highmem pages, so cap pages_min to a small
4108 * value here.
4109 *
4110 * The (pages_high-pages_low) and (pages_low-pages_min)
4111 * deltas controls asynch page reclaim, and so should
4112 * not be capped for highmem.
1da177e4
LT
4113 */
4114 int min_pages;
4115
4116 min_pages = zone->present_pages / 1024;
4117 if (min_pages < SWAP_CLUSTER_MAX)
4118 min_pages = SWAP_CLUSTER_MAX;
4119 if (min_pages > 128)
4120 min_pages = 128;
4121 zone->pages_min = min_pages;
4122 } else {
669ed175
NP
4123 /*
4124 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
4125 * proportionate to the zone's size.
4126 */
669ed175 4127 zone->pages_min = tmp;
1da177e4
LT
4128 }
4129
ac924c60
AM
4130 zone->pages_low = zone->pages_min + (tmp >> 2);
4131 zone->pages_high = zone->pages_min + (tmp >> 1);
56fd56b8 4132 setup_zone_migrate_reserve(zone);
1da177e4
LT
4133 spin_unlock_irqrestore(&zone->lru_lock, flags);
4134 }
cb45b0e9
HA
4135
4136 /* update totalreserve_pages */
4137 calculate_totalreserve_pages();
1da177e4
LT
4138}
4139
4140/*
4141 * Initialise min_free_kbytes.
4142 *
4143 * For small machines we want it small (128k min). For large machines
4144 * we want it large (64MB max). But it is not linear, because network
4145 * bandwidth does not increase linearly with machine size. We use
4146 *
4147 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4148 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
4149 *
4150 * which yields
4151 *
4152 * 16MB: 512k
4153 * 32MB: 724k
4154 * 64MB: 1024k
4155 * 128MB: 1448k
4156 * 256MB: 2048k
4157 * 512MB: 2896k
4158 * 1024MB: 4096k
4159 * 2048MB: 5792k
4160 * 4096MB: 8192k
4161 * 8192MB: 11584k
4162 * 16384MB: 16384k
4163 */
4164static int __init init_per_zone_pages_min(void)
4165{
4166 unsigned long lowmem_kbytes;
4167
4168 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4169
4170 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4171 if (min_free_kbytes < 128)
4172 min_free_kbytes = 128;
4173 if (min_free_kbytes > 65536)
4174 min_free_kbytes = 65536;
4175 setup_per_zone_pages_min();
4176 setup_per_zone_lowmem_reserve();
4177 return 0;
4178}
4179module_init(init_per_zone_pages_min)
4180
4181/*
4182 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4183 * that we can call two helper functions whenever min_free_kbytes
4184 * changes.
4185 */
4186int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4187 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4188{
4189 proc_dointvec(table, write, file, buffer, length, ppos);
3b1d92c5
MG
4190 if (write)
4191 setup_per_zone_pages_min();
1da177e4
LT
4192 return 0;
4193}
4194
9614634f
CL
4195#ifdef CONFIG_NUMA
4196int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4197 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4198{
4199 struct zone *zone;
4200 int rc;
4201
4202 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4203 if (rc)
4204 return rc;
4205
4206 for_each_zone(zone)
8417bba4 4207 zone->min_unmapped_pages = (zone->present_pages *
9614634f
CL
4208 sysctl_min_unmapped_ratio) / 100;
4209 return 0;
4210}
0ff38490
CL
4211
4212int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4213 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4214{
4215 struct zone *zone;
4216 int rc;
4217
4218 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4219 if (rc)
4220 return rc;
4221
4222 for_each_zone(zone)
4223 zone->min_slab_pages = (zone->present_pages *
4224 sysctl_min_slab_ratio) / 100;
4225 return 0;
4226}
9614634f
CL
4227#endif
4228
1da177e4
LT
4229/*
4230 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4231 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4232 * whenever sysctl_lowmem_reserve_ratio changes.
4233 *
4234 * The reserve ratio obviously has absolutely no relation with the
4235 * pages_min watermarks. The lowmem reserve ratio can only make sense
4236 * if in function of the boot time zone sizes.
4237 */
4238int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4239 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4240{
4241 proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4242 setup_per_zone_lowmem_reserve();
4243 return 0;
4244}
4245
8ad4b1fb
RS
4246/*
4247 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4248 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
4249 * can have before it gets flushed back to buddy allocator.
4250 */
4251
4252int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4253 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4254{
4255 struct zone *zone;
4256 unsigned int cpu;
4257 int ret;
4258
4259 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4260 if (!write || (ret == -EINVAL))
4261 return ret;
4262 for_each_zone(zone) {
4263 for_each_online_cpu(cpu) {
4264 unsigned long high;
4265 high = zone->present_pages / percpu_pagelist_fraction;
4266 setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4267 }
4268 }
4269 return 0;
4270}
4271
f034b5d4 4272int hashdist = HASHDIST_DEFAULT;
1da177e4
LT
4273
4274#ifdef CONFIG_NUMA
4275static int __init set_hashdist(char *str)
4276{
4277 if (!str)
4278 return 0;
4279 hashdist = simple_strtoul(str, &str, 0);
4280 return 1;
4281}
4282__setup("hashdist=", set_hashdist);
4283#endif
4284
4285/*
4286 * allocate a large system hash table from bootmem
4287 * - it is assumed that the hash table must contain an exact power-of-2
4288 * quantity of entries
4289 * - limit is the number of hash buckets, not the total allocation size
4290 */
4291void *__init alloc_large_system_hash(const char *tablename,
4292 unsigned long bucketsize,
4293 unsigned long numentries,
4294 int scale,
4295 int flags,
4296 unsigned int *_hash_shift,
4297 unsigned int *_hash_mask,
4298 unsigned long limit)
4299{
4300 unsigned long long max = limit;
4301 unsigned long log2qty, size;
4302 void *table = NULL;
4303
4304 /* allow the kernel cmdline to have a say */
4305 if (!numentries) {
4306 /* round applicable memory size up to nearest megabyte */
04903664 4307 numentries = nr_kernel_pages;
1da177e4
LT
4308 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4309 numentries >>= 20 - PAGE_SHIFT;
4310 numentries <<= 20 - PAGE_SHIFT;
4311
4312 /* limit to 1 bucket per 2^scale bytes of low memory */
4313 if (scale > PAGE_SHIFT)
4314 numentries >>= (scale - PAGE_SHIFT);
4315 else
4316 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
4317
4318 /* Make sure we've got at least a 0-order allocation.. */
4319 if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4320 numentries = PAGE_SIZE / bucketsize;
1da177e4 4321 }
6e692ed3 4322 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
4323
4324 /* limit allocation size to 1/16 total memory by default */
4325 if (max == 0) {
4326 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4327 do_div(max, bucketsize);
4328 }
4329
4330 if (numentries > max)
4331 numentries = max;
4332
f0d1b0b3 4333 log2qty = ilog2(numentries);
1da177e4
LT
4334
4335 do {
4336 size = bucketsize << log2qty;
4337 if (flags & HASH_EARLY)
4338 table = alloc_bootmem(size);
4339 else if (hashdist)
4340 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4341 else {
4342 unsigned long order;
4343 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
4344 ;
4345 table = (void*) __get_free_pages(GFP_ATOMIC, order);
1037b83b
ED
4346 /*
4347 * If bucketsize is not a power-of-two, we may free
4348 * some pages at the end of hash table.
4349 */
4350 if (table) {
4351 unsigned long alloc_end = (unsigned long)table +
4352 (PAGE_SIZE << order);
4353 unsigned long used = (unsigned long)table +
4354 PAGE_ALIGN(size);
4355 split_page(virt_to_page(table), order);
4356 while (used < alloc_end) {
4357 free_page(used);
4358 used += PAGE_SIZE;
4359 }
4360 }
1da177e4
LT
4361 }
4362 } while (!table && size > PAGE_SIZE && --log2qty);
4363
4364 if (!table)
4365 panic("Failed to allocate %s hash table\n", tablename);
4366
b49ad484 4367 printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
1da177e4
LT
4368 tablename,
4369 (1U << log2qty),
f0d1b0b3 4370 ilog2(size) - PAGE_SHIFT,
1da177e4
LT
4371 size);
4372
4373 if (_hash_shift)
4374 *_hash_shift = log2qty;
4375 if (_hash_mask)
4376 *_hash_mask = (1 << log2qty) - 1;
4377
4378 return table;
4379}
a117e66e
KH
4380
4381#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
a117e66e
KH
4382struct page *pfn_to_page(unsigned long pfn)
4383{
67de6482 4384 return __pfn_to_page(pfn);
a117e66e
KH
4385}
4386unsigned long page_to_pfn(struct page *page)
4387{
67de6482 4388 return __page_to_pfn(page);
a117e66e 4389}
a117e66e
KH
4390EXPORT_SYMBOL(pfn_to_page);
4391EXPORT_SYMBOL(page_to_pfn);
4392#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
6220ec78 4393
835c134e
MG
4394/* Return a pointer to the bitmap storing bits affecting a block of pages */
4395static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4396 unsigned long pfn)
4397{
4398#ifdef CONFIG_SPARSEMEM
4399 return __pfn_to_section(pfn)->pageblock_flags;
4400#else
4401 return zone->pageblock_flags;
4402#endif /* CONFIG_SPARSEMEM */
4403}
4404
4405static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4406{
4407#ifdef CONFIG_SPARSEMEM
4408 pfn &= (PAGES_PER_SECTION-1);
d9c23400 4409 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
4410#else
4411 pfn = pfn - zone->zone_start_pfn;
d9c23400 4412 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
4413#endif /* CONFIG_SPARSEMEM */
4414}
4415
4416/**
d9c23400 4417 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
835c134e
MG
4418 * @page: The page within the block of interest
4419 * @start_bitidx: The first bit of interest to retrieve
4420 * @end_bitidx: The last bit of interest
4421 * returns pageblock_bits flags
4422 */
4423unsigned long get_pageblock_flags_group(struct page *page,
4424 int start_bitidx, int end_bitidx)
4425{
4426 struct zone *zone;
4427 unsigned long *bitmap;
4428 unsigned long pfn, bitidx;
4429 unsigned long flags = 0;
4430 unsigned long value = 1;
4431
4432 zone = page_zone(page);
4433 pfn = page_to_pfn(page);
4434 bitmap = get_pageblock_bitmap(zone, pfn);
4435 bitidx = pfn_to_bitidx(zone, pfn);
4436
4437 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4438 if (test_bit(bitidx + start_bitidx, bitmap))
4439 flags |= value;
6220ec78 4440
835c134e
MG
4441 return flags;
4442}
4443
4444/**
d9c23400 4445 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
835c134e
MG
4446 * @page: The page within the block of interest
4447 * @start_bitidx: The first bit of interest
4448 * @end_bitidx: The last bit of interest
4449 * @flags: The flags to set
4450 */
4451void set_pageblock_flags_group(struct page *page, unsigned long flags,
4452 int start_bitidx, int end_bitidx)
4453{
4454 struct zone *zone;
4455 unsigned long *bitmap;
4456 unsigned long pfn, bitidx;
4457 unsigned long value = 1;
4458
4459 zone = page_zone(page);
4460 pfn = page_to_pfn(page);
4461 bitmap = get_pageblock_bitmap(zone, pfn);
4462 bitidx = pfn_to_bitidx(zone, pfn);
4463
4464 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4465 if (flags & value)
4466 __set_bit(bitidx + start_bitidx, bitmap);
4467 else
4468 __clear_bit(bitidx + start_bitidx, bitmap);
4469}
a5d76b54
KH
4470
4471/*
4472 * This is designed as sub function...plz see page_isolation.c also.
4473 * set/clear page block's type to be ISOLATE.
4474 * page allocater never alloc memory from ISOLATE block.
4475 */
4476
4477int set_migratetype_isolate(struct page *page)
4478{
4479 struct zone *zone;
4480 unsigned long flags;
4481 int ret = -EBUSY;
4482
4483 zone = page_zone(page);
4484 spin_lock_irqsave(&zone->lock, flags);
4485 /*
4486 * In future, more migrate types will be able to be isolation target.
4487 */
4488 if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
4489 goto out;
4490 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
4491 move_freepages_block(zone, page, MIGRATE_ISOLATE);
4492 ret = 0;
4493out:
4494 spin_unlock_irqrestore(&zone->lock, flags);
4495 if (!ret)
9f8f2172 4496 drain_all_pages();
a5d76b54
KH
4497 return ret;
4498}
4499
4500void unset_migratetype_isolate(struct page *page)
4501{
4502 struct zone *zone;
4503 unsigned long flags;
4504 zone = page_zone(page);
4505 spin_lock_irqsave(&zone->lock, flags);
4506 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
4507 goto out;
4508 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4509 move_freepages_block(zone, page, MIGRATE_MOVABLE);
4510out:
4511 spin_unlock_irqrestore(&zone->lock, flags);
4512}
0c0e6195
KH
4513
4514#ifdef CONFIG_MEMORY_HOTREMOVE
4515/*
4516 * All pages in the range must be isolated before calling this.
4517 */
4518void
4519__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
4520{
4521 struct page *page;
4522 struct zone *zone;
4523 int order, i;
4524 unsigned long pfn;
4525 unsigned long flags;
4526 /* find the first valid pfn */
4527 for (pfn = start_pfn; pfn < end_pfn; pfn++)
4528 if (pfn_valid(pfn))
4529 break;
4530 if (pfn == end_pfn)
4531 return;
4532 zone = page_zone(pfn_to_page(pfn));
4533 spin_lock_irqsave(&zone->lock, flags);
4534 pfn = start_pfn;
4535 while (pfn < end_pfn) {
4536 if (!pfn_valid(pfn)) {
4537 pfn++;
4538 continue;
4539 }
4540 page = pfn_to_page(pfn);
4541 BUG_ON(page_count(page));
4542 BUG_ON(!PageBuddy(page));
4543 order = page_order(page);
4544#ifdef CONFIG_DEBUG_VM
4545 printk(KERN_INFO "remove from free list %lx %d %lx\n",
4546 pfn, 1 << order, end_pfn);
4547#endif
4548 list_del(&page->lru);
4549 rmv_page_order(page);
4550 zone->free_area[order].nr_free--;
4551 __mod_zone_page_state(zone, NR_FREE_PAGES,
4552 - (1UL << order));
4553 for (i = 0; i < (1 << order); i++)
4554 SetPageReserved((page+i));
4555 pfn += (1 << order);
4556 }
4557 spin_unlock_irqrestore(&zone->lock, flags);
4558}
4559#endif