page allocator: break up the allocator entry point into fast and slow paths
[linux-2.6-block.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
10ed273f 22#include <linux/jiffies.h>
1da177e4
LT
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
9f158333 25#include <linux/kernel.h>
1da177e4
LT
26#include <linux/module.h>
27#include <linux/suspend.h>
28#include <linux/pagevec.h>
29#include <linux/blkdev.h>
30#include <linux/slab.h>
5a3135c2 31#include <linux/oom.h>
1da177e4
LT
32#include <linux/notifier.h>
33#include <linux/topology.h>
34#include <linux/sysctl.h>
35#include <linux/cpu.h>
36#include <linux/cpuset.h>
bdc8cb98 37#include <linux/memory_hotplug.h>
1da177e4
LT
38#include <linux/nodemask.h>
39#include <linux/vmalloc.h>
4be38e35 40#include <linux/mempolicy.h>
6811378e 41#include <linux/stop_machine.h>
c713216d
MG
42#include <linux/sort.h>
43#include <linux/pfn.h>
3fcfab16 44#include <linux/backing-dev.h>
933e312e 45#include <linux/fault-inject.h>
a5d76b54 46#include <linux/page-isolation.h>
52d4b9ac 47#include <linux/page_cgroup.h>
3ac7fe5a 48#include <linux/debugobjects.h>
dbb1f81c 49#include <linux/kmemleak.h>
1da177e4
LT
50
51#include <asm/tlbflush.h>
ac924c60 52#include <asm/div64.h>
1da177e4
LT
53#include "internal.h"
54
55/*
13808910 56 * Array of node states.
1da177e4 57 */
13808910
CL
58nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
59 [N_POSSIBLE] = NODE_MASK_ALL,
60 [N_ONLINE] = { { [0] = 1UL } },
61#ifndef CONFIG_NUMA
62 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
63#ifdef CONFIG_HIGHMEM
64 [N_HIGH_MEMORY] = { { [0] = 1UL } },
65#endif
66 [N_CPU] = { { [0] = 1UL } },
67#endif /* NUMA */
68};
69EXPORT_SYMBOL(node_states);
70
6c231b7b 71unsigned long totalram_pages __read_mostly;
cb45b0e9 72unsigned long totalreserve_pages __read_mostly;
22b31eec 73unsigned long highest_memmap_pfn __read_mostly;
8ad4b1fb 74int percpu_pagelist_fraction;
1da177e4 75
d9c23400
MG
76#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
77int pageblock_order __read_mostly;
78#endif
79
d98c7a09 80static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 81
1da177e4
LT
82/*
83 * results with 256, 32 in the lowmem_reserve sysctl:
84 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
85 * 1G machine -> (16M dma, 784M normal, 224M high)
86 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
87 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
88 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
89 *
90 * TBD: should special case ZONE_DMA32 machines here - in those we normally
91 * don't need any ZONE_NORMAL reservation
1da177e4 92 */
2f1b6248 93int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
4b51d669 94#ifdef CONFIG_ZONE_DMA
2f1b6248 95 256,
4b51d669 96#endif
fb0e7942 97#ifdef CONFIG_ZONE_DMA32
2f1b6248 98 256,
fb0e7942 99#endif
e53ef38d 100#ifdef CONFIG_HIGHMEM
2a1e274a 101 32,
e53ef38d 102#endif
2a1e274a 103 32,
2f1b6248 104};
1da177e4
LT
105
106EXPORT_SYMBOL(totalram_pages);
1da177e4 107
15ad7cdc 108static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 109#ifdef CONFIG_ZONE_DMA
2f1b6248 110 "DMA",
4b51d669 111#endif
fb0e7942 112#ifdef CONFIG_ZONE_DMA32
2f1b6248 113 "DMA32",
fb0e7942 114#endif
2f1b6248 115 "Normal",
e53ef38d 116#ifdef CONFIG_HIGHMEM
2a1e274a 117 "HighMem",
e53ef38d 118#endif
2a1e274a 119 "Movable",
2f1b6248
CL
120};
121
1da177e4
LT
122int min_free_kbytes = 1024;
123
86356ab1
YG
124unsigned long __meminitdata nr_kernel_pages;
125unsigned long __meminitdata nr_all_pages;
a3142c8e 126static unsigned long __meminitdata dma_reserve;
1da177e4 127
c713216d
MG
128#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
129 /*
183ff22b 130 * MAX_ACTIVE_REGIONS determines the maximum number of distinct
c713216d
MG
131 * ranges of memory (RAM) that may be registered with add_active_range().
132 * Ranges passed to add_active_range() will be merged if possible
133 * so the number of times add_active_range() can be called is
134 * related to the number of nodes and the number of holes
135 */
136 #ifdef CONFIG_MAX_ACTIVE_REGIONS
137 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
138 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
139 #else
140 #if MAX_NUMNODES >= 32
141 /* If there can be many nodes, allow up to 50 holes per node */
142 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
143 #else
144 /* By default, allow up to 256 distinct regions */
145 #define MAX_ACTIVE_REGIONS 256
146 #endif
147 #endif
148
98011f56
JB
149 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
150 static int __meminitdata nr_nodemap_entries;
151 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
152 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
b69a7288 153 static unsigned long __initdata required_kernelcore;
484f51f8 154 static unsigned long __initdata required_movablecore;
b69a7288 155 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
2a1e274a
MG
156
157 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
158 int movable_zone;
159 EXPORT_SYMBOL(movable_zone);
c713216d
MG
160#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
161
418508c1
MS
162#if MAX_NUMNODES > 1
163int nr_node_ids __read_mostly = MAX_NUMNODES;
164EXPORT_SYMBOL(nr_node_ids);
165#endif
166
9ef9acb0
MG
167int page_group_by_mobility_disabled __read_mostly;
168
b2a0ac88
MG
169static void set_pageblock_migratetype(struct page *page, int migratetype)
170{
171 set_pageblock_flags_group(page, (unsigned long)migratetype,
172 PB_migrate, PB_migrate_end);
173}
174
13e7444b 175#ifdef CONFIG_DEBUG_VM
c6a57e19 176static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 177{
bdc8cb98
DH
178 int ret = 0;
179 unsigned seq;
180 unsigned long pfn = page_to_pfn(page);
c6a57e19 181
bdc8cb98
DH
182 do {
183 seq = zone_span_seqbegin(zone);
184 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
185 ret = 1;
186 else if (pfn < zone->zone_start_pfn)
187 ret = 1;
188 } while (zone_span_seqretry(zone, seq));
189
190 return ret;
c6a57e19
DH
191}
192
193static int page_is_consistent(struct zone *zone, struct page *page)
194{
14e07298 195 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 196 return 0;
1da177e4 197 if (zone != page_zone(page))
c6a57e19
DH
198 return 0;
199
200 return 1;
201}
202/*
203 * Temporary debugging check for pages not lying within a given zone.
204 */
205static int bad_range(struct zone *zone, struct page *page)
206{
207 if (page_outside_zone_boundaries(zone, page))
1da177e4 208 return 1;
c6a57e19
DH
209 if (!page_is_consistent(zone, page))
210 return 1;
211
1da177e4
LT
212 return 0;
213}
13e7444b
NP
214#else
215static inline int bad_range(struct zone *zone, struct page *page)
216{
217 return 0;
218}
219#endif
220
224abf92 221static void bad_page(struct page *page)
1da177e4 222{
d936cf9b
HD
223 static unsigned long resume;
224 static unsigned long nr_shown;
225 static unsigned long nr_unshown;
226
227 /*
228 * Allow a burst of 60 reports, then keep quiet for that minute;
229 * or allow a steady drip of one report per second.
230 */
231 if (nr_shown == 60) {
232 if (time_before(jiffies, resume)) {
233 nr_unshown++;
234 goto out;
235 }
236 if (nr_unshown) {
1e9e6365
HD
237 printk(KERN_ALERT
238 "BUG: Bad page state: %lu messages suppressed\n",
d936cf9b
HD
239 nr_unshown);
240 nr_unshown = 0;
241 }
242 nr_shown = 0;
243 }
244 if (nr_shown++ == 0)
245 resume = jiffies + 60 * HZ;
246
1e9e6365 247 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
3dc14741 248 current->comm, page_to_pfn(page));
1e9e6365 249 printk(KERN_ALERT
3dc14741
HD
250 "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
251 page, (void *)page->flags, page_count(page),
252 page_mapcount(page), page->mapping, page->index);
3dc14741 253
1da177e4 254 dump_stack();
d936cf9b 255out:
8cc3b392
HD
256 /* Leave bad fields for debug, except PageBuddy could make trouble */
257 __ClearPageBuddy(page);
9f158333 258 add_taint(TAINT_BAD_PAGE);
1da177e4
LT
259}
260
1da177e4
LT
261/*
262 * Higher-order pages are called "compound pages". They are structured thusly:
263 *
264 * The first PAGE_SIZE page is called the "head page".
265 *
266 * The remaining PAGE_SIZE pages are called "tail pages".
267 *
268 * All pages have PG_compound set. All pages have their ->private pointing at
269 * the head page (even the head page has this).
270 *
41d78ba5
HD
271 * The first tail page's ->lru.next holds the address of the compound page's
272 * put_page() function. Its ->lru.prev holds the order of allocation.
273 * This usage means that zero-order pages may not be compound.
1da177e4 274 */
d98c7a09
HD
275
276static void free_compound_page(struct page *page)
277{
d85f3385 278 __free_pages_ok(page, compound_order(page));
d98c7a09
HD
279}
280
01ad1c08 281void prep_compound_page(struct page *page, unsigned long order)
18229df5
AW
282{
283 int i;
284 int nr_pages = 1 << order;
285
286 set_compound_page_dtor(page, free_compound_page);
287 set_compound_order(page, order);
288 __SetPageHead(page);
289 for (i = 1; i < nr_pages; i++) {
290 struct page *p = page + i;
291
292 __SetPageTail(p);
293 p->first_page = page;
294 }
295}
296
297#ifdef CONFIG_HUGETLBFS
298void prep_compound_gigantic_page(struct page *page, unsigned long order)
1da177e4
LT
299{
300 int i;
301 int nr_pages = 1 << order;
6babc32c 302 struct page *p = page + 1;
1da177e4 303
33f2ef89 304 set_compound_page_dtor(page, free_compound_page);
d85f3385 305 set_compound_order(page, order);
6d777953 306 __SetPageHead(page);
18229df5 307 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
d85f3385 308 __SetPageTail(p);
d85f3385 309 p->first_page = page;
1da177e4
LT
310 }
311}
18229df5 312#endif
1da177e4 313
8cc3b392 314static int destroy_compound_page(struct page *page, unsigned long order)
1da177e4
LT
315{
316 int i;
317 int nr_pages = 1 << order;
8cc3b392 318 int bad = 0;
1da177e4 319
8cc3b392
HD
320 if (unlikely(compound_order(page) != order) ||
321 unlikely(!PageHead(page))) {
224abf92 322 bad_page(page);
8cc3b392
HD
323 bad++;
324 }
1da177e4 325
6d777953 326 __ClearPageHead(page);
8cc3b392 327
18229df5
AW
328 for (i = 1; i < nr_pages; i++) {
329 struct page *p = page + i;
1da177e4 330
e713a21d 331 if (unlikely(!PageTail(p) || (p->first_page != page))) {
224abf92 332 bad_page(page);
8cc3b392
HD
333 bad++;
334 }
d85f3385 335 __ClearPageTail(p);
1da177e4 336 }
8cc3b392
HD
337
338 return bad;
1da177e4 339}
1da177e4 340
17cf4406
NP
341static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
342{
343 int i;
344
6626c5d5
AM
345 /*
346 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
347 * and __GFP_HIGHMEM from hard or soft interrupt context.
348 */
725d704e 349 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
17cf4406
NP
350 for (i = 0; i < (1 << order); i++)
351 clear_highpage(page + i);
352}
353
6aa3001b
AM
354static inline void set_page_order(struct page *page, int order)
355{
4c21e2f2 356 set_page_private(page, order);
676165a8 357 __SetPageBuddy(page);
1da177e4
LT
358}
359
360static inline void rmv_page_order(struct page *page)
361{
676165a8 362 __ClearPageBuddy(page);
4c21e2f2 363 set_page_private(page, 0);
1da177e4
LT
364}
365
366/*
367 * Locate the struct page for both the matching buddy in our
368 * pair (buddy1) and the combined O(n+1) page they form (page).
369 *
370 * 1) Any buddy B1 will have an order O twin B2 which satisfies
371 * the following equation:
372 * B2 = B1 ^ (1 << O)
373 * For example, if the starting buddy (buddy2) is #8 its order
374 * 1 buddy is #10:
375 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
376 *
377 * 2) Any buddy B will have an order O+1 parent P which
378 * satisfies the following equation:
379 * P = B & ~(1 << O)
380 *
d6e05edc 381 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
1da177e4
LT
382 */
383static inline struct page *
384__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
385{
386 unsigned long buddy_idx = page_idx ^ (1 << order);
387
388 return page + (buddy_idx - page_idx);
389}
390
391static inline unsigned long
392__find_combined_index(unsigned long page_idx, unsigned int order)
393{
394 return (page_idx & ~(1 << order));
395}
396
397/*
398 * This function checks whether a page is free && is the buddy
399 * we can do coalesce a page and its buddy if
13e7444b 400 * (a) the buddy is not in a hole &&
676165a8 401 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
402 * (c) a page and its buddy have the same order &&
403 * (d) a page and its buddy are in the same zone.
676165a8
NP
404 *
405 * For recording whether a page is in the buddy system, we use PG_buddy.
406 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
1da177e4 407 *
676165a8 408 * For recording page's order, we use page_private(page).
1da177e4 409 */
cb2b95e1
AW
410static inline int page_is_buddy(struct page *page, struct page *buddy,
411 int order)
1da177e4 412{
14e07298 413 if (!pfn_valid_within(page_to_pfn(buddy)))
13e7444b 414 return 0;
13e7444b 415
cb2b95e1
AW
416 if (page_zone_id(page) != page_zone_id(buddy))
417 return 0;
418
419 if (PageBuddy(buddy) && page_order(buddy) == order) {
420 BUG_ON(page_count(buddy) != 0);
6aa3001b 421 return 1;
676165a8 422 }
6aa3001b 423 return 0;
1da177e4
LT
424}
425
426/*
427 * Freeing function for a buddy system allocator.
428 *
429 * The concept of a buddy system is to maintain direct-mapped table
430 * (containing bit values) for memory blocks of various "orders".
431 * The bottom level table contains the map for the smallest allocatable
432 * units of memory (here, pages), and each level above it describes
433 * pairs of units from the levels below, hence, "buddies".
434 * At a high level, all that happens here is marking the table entry
435 * at the bottom level available, and propagating the changes upward
436 * as necessary, plus some accounting needed to play nicely with other
437 * parts of the VM system.
438 * At each level, we keep a list of pages, which are heads of continuous
676165a8 439 * free pages of length of (1 << order) and marked with PG_buddy. Page's
4c21e2f2 440 * order is recorded in page_private(page) field.
1da177e4
LT
441 * So when we are allocating or freeing one, we can derive the state of the
442 * other. That is, if we allocate a small block, and both were
443 * free, the remainder of the region must be split into blocks.
444 * If a block is freed, and its buddy is also free, then this
445 * triggers coalescing into a block of larger size.
446 *
447 * -- wli
448 */
449
48db57f8 450static inline void __free_one_page(struct page *page,
1da177e4
LT
451 struct zone *zone, unsigned int order)
452{
453 unsigned long page_idx;
454 int order_size = 1 << order;
b2a0ac88 455 int migratetype = get_pageblock_migratetype(page);
1da177e4 456
224abf92 457 if (unlikely(PageCompound(page)))
8cc3b392
HD
458 if (unlikely(destroy_compound_page(page, order)))
459 return;
1da177e4
LT
460
461 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
462
725d704e
NP
463 VM_BUG_ON(page_idx & (order_size - 1));
464 VM_BUG_ON(bad_range(zone, page));
1da177e4 465
d23ad423 466 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
1da177e4
LT
467 while (order < MAX_ORDER-1) {
468 unsigned long combined_idx;
1da177e4
LT
469 struct page *buddy;
470
1da177e4 471 buddy = __page_find_buddy(page, page_idx, order);
cb2b95e1 472 if (!page_is_buddy(page, buddy, order))
3c82d0ce 473 break;
13e7444b 474
3c82d0ce 475 /* Our buddy is free, merge with it and move up one order. */
1da177e4 476 list_del(&buddy->lru);
b2a0ac88 477 zone->free_area[order].nr_free--;
1da177e4 478 rmv_page_order(buddy);
13e7444b 479 combined_idx = __find_combined_index(page_idx, order);
1da177e4
LT
480 page = page + (combined_idx - page_idx);
481 page_idx = combined_idx;
482 order++;
483 }
484 set_page_order(page, order);
b2a0ac88
MG
485 list_add(&page->lru,
486 &zone->free_area[order].free_list[migratetype]);
1da177e4
LT
487 zone->free_area[order].nr_free++;
488}
489
224abf92 490static inline int free_pages_check(struct page *page)
1da177e4 491{
985737cf 492 free_page_mlock(page);
92be2e33
NP
493 if (unlikely(page_mapcount(page) |
494 (page->mapping != NULL) |
495 (page_count(page) != 0) |
8cc3b392 496 (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
224abf92 497 bad_page(page);
79f4b7bf 498 return 1;
8cc3b392 499 }
79f4b7bf
HD
500 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
501 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
502 return 0;
1da177e4
LT
503}
504
505/*
506 * Frees a list of pages.
507 * Assumes all pages on list are in same zone, and of same order.
207f36ee 508 * count is the number of pages to free.
1da177e4
LT
509 *
510 * If the zone was previously in an "all pages pinned" state then look to
511 * see if this freeing clears that state.
512 *
513 * And clear the zone's pages_scanned counter, to hold off the "all pages are
514 * pinned" detection logic.
515 */
48db57f8
NP
516static void free_pages_bulk(struct zone *zone, int count,
517 struct list_head *list, int order)
1da177e4 518{
c54ad30c 519 spin_lock(&zone->lock);
e815af95 520 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
1da177e4 521 zone->pages_scanned = 0;
48db57f8
NP
522 while (count--) {
523 struct page *page;
524
725d704e 525 VM_BUG_ON(list_empty(list));
1da177e4 526 page = list_entry(list->prev, struct page, lru);
48db57f8 527 /* have to delete it as __free_one_page list manipulates */
1da177e4 528 list_del(&page->lru);
48db57f8 529 __free_one_page(page, zone, order);
1da177e4 530 }
c54ad30c 531 spin_unlock(&zone->lock);
1da177e4
LT
532}
533
48db57f8 534static void free_one_page(struct zone *zone, struct page *page, int order)
1da177e4 535{
006d22d9 536 spin_lock(&zone->lock);
e815af95 537 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
006d22d9 538 zone->pages_scanned = 0;
0798e519 539 __free_one_page(page, zone, order);
006d22d9 540 spin_unlock(&zone->lock);
48db57f8
NP
541}
542
543static void __free_pages_ok(struct page *page, unsigned int order)
544{
545 unsigned long flags;
1da177e4 546 int i;
8cc3b392 547 int bad = 0;
1da177e4 548
1da177e4 549 for (i = 0 ; i < (1 << order) ; ++i)
8cc3b392
HD
550 bad += free_pages_check(page + i);
551 if (bad)
689bcebf
HD
552 return;
553
3ac7fe5a 554 if (!PageHighMem(page)) {
9858db50 555 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
3ac7fe5a
TG
556 debug_check_no_obj_freed(page_address(page),
557 PAGE_SIZE << order);
558 }
dafb1367 559 arch_free_page(page, order);
48db57f8 560 kernel_map_pages(page, 1 << order, 0);
dafb1367 561
c54ad30c 562 local_irq_save(flags);
f8891e5e 563 __count_vm_events(PGFREE, 1 << order);
48db57f8 564 free_one_page(page_zone(page), page, order);
c54ad30c 565 local_irq_restore(flags);
1da177e4
LT
566}
567
a226f6c8
DH
568/*
569 * permit the bootmem allocator to evade page validation on high-order frees
570 */
af370fb8 571void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
a226f6c8
DH
572{
573 if (order == 0) {
574 __ClearPageReserved(page);
575 set_page_count(page, 0);
7835e98b 576 set_page_refcounted(page);
545b1ea9 577 __free_page(page);
a226f6c8 578 } else {
a226f6c8
DH
579 int loop;
580
545b1ea9 581 prefetchw(page);
a226f6c8
DH
582 for (loop = 0; loop < BITS_PER_LONG; loop++) {
583 struct page *p = &page[loop];
584
545b1ea9
NP
585 if (loop + 1 < BITS_PER_LONG)
586 prefetchw(p + 1);
a226f6c8
DH
587 __ClearPageReserved(p);
588 set_page_count(p, 0);
589 }
590
7835e98b 591 set_page_refcounted(page);
545b1ea9 592 __free_pages(page, order);
a226f6c8
DH
593 }
594}
595
1da177e4
LT
596
597/*
598 * The order of subdivision here is critical for the IO subsystem.
599 * Please do not alter this order without good reasons and regression
600 * testing. Specifically, as large blocks of memory are subdivided,
601 * the order in which smaller blocks are delivered depends on the order
602 * they're subdivided in this function. This is the primary factor
603 * influencing the order in which pages are delivered to the IO
604 * subsystem according to empirical testing, and this is also justified
605 * by considering the behavior of a buddy system containing a single
606 * large block of memory acted on by a series of small allocations.
607 * This behavior is a critical factor in sglist merging's success.
608 *
609 * -- wli
610 */
085cc7d5 611static inline void expand(struct zone *zone, struct page *page,
b2a0ac88
MG
612 int low, int high, struct free_area *area,
613 int migratetype)
1da177e4
LT
614{
615 unsigned long size = 1 << high;
616
617 while (high > low) {
618 area--;
619 high--;
620 size >>= 1;
725d704e 621 VM_BUG_ON(bad_range(zone, &page[size]));
b2a0ac88 622 list_add(&page[size].lru, &area->free_list[migratetype]);
1da177e4
LT
623 area->nr_free++;
624 set_page_order(&page[size], high);
625 }
1da177e4
LT
626}
627
1da177e4
LT
628/*
629 * This page is about to be returned from the page allocator
630 */
17cf4406 631static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
1da177e4 632{
92be2e33
NP
633 if (unlikely(page_mapcount(page) |
634 (page->mapping != NULL) |
635 (page_count(page) != 0) |
8cc3b392 636 (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
224abf92 637 bad_page(page);
689bcebf 638 return 1;
8cc3b392 639 }
689bcebf 640
4c21e2f2 641 set_page_private(page, 0);
7835e98b 642 set_page_refcounted(page);
cc102509
NP
643
644 arch_alloc_page(page, order);
1da177e4 645 kernel_map_pages(page, 1 << order, 1);
17cf4406
NP
646
647 if (gfp_flags & __GFP_ZERO)
648 prep_zero_page(page, order, gfp_flags);
649
650 if (order && (gfp_flags & __GFP_COMP))
651 prep_compound_page(page, order);
652
689bcebf 653 return 0;
1da177e4
LT
654}
655
56fd56b8
MG
656/*
657 * Go through the free lists for the given migratetype and remove
658 * the smallest available page from the freelists
659 */
660static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
661 int migratetype)
662{
663 unsigned int current_order;
664 struct free_area * area;
665 struct page *page;
666
667 /* Find a page of the appropriate size in the preferred list */
668 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
669 area = &(zone->free_area[current_order]);
670 if (list_empty(&area->free_list[migratetype]))
671 continue;
672
673 page = list_entry(area->free_list[migratetype].next,
674 struct page, lru);
675 list_del(&page->lru);
676 rmv_page_order(page);
677 area->nr_free--;
678 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
679 expand(zone, page, order, current_order, area, migratetype);
680 return page;
681 }
682
683 return NULL;
684}
685
686
b2a0ac88
MG
687/*
688 * This array describes the order lists are fallen back to when
689 * the free lists for the desirable migrate type are depleted
690 */
691static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
64c5e135
MG
692 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
693 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
694 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
695 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
b2a0ac88
MG
696};
697
c361be55
MG
698/*
699 * Move the free pages in a range to the free lists of the requested type.
d9c23400 700 * Note that start_page and end_pages are not aligned on a pageblock
c361be55
MG
701 * boundary. If alignment is required, use move_freepages_block()
702 */
b69a7288
AB
703static int move_freepages(struct zone *zone,
704 struct page *start_page, struct page *end_page,
705 int migratetype)
c361be55
MG
706{
707 struct page *page;
708 unsigned long order;
d100313f 709 int pages_moved = 0;
c361be55
MG
710
711#ifndef CONFIG_HOLES_IN_ZONE
712 /*
713 * page_zone is not safe to call in this context when
714 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
715 * anyway as we check zone boundaries in move_freepages_block().
716 * Remove at a later date when no bug reports exist related to
ac0e5b7a 717 * grouping pages by mobility
c361be55
MG
718 */
719 BUG_ON(page_zone(start_page) != page_zone(end_page));
720#endif
721
722 for (page = start_page; page <= end_page;) {
344c790e
AL
723 /* Make sure we are not inadvertently changing nodes */
724 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
725
c361be55
MG
726 if (!pfn_valid_within(page_to_pfn(page))) {
727 page++;
728 continue;
729 }
730
731 if (!PageBuddy(page)) {
732 page++;
733 continue;
734 }
735
736 order = page_order(page);
737 list_del(&page->lru);
738 list_add(&page->lru,
739 &zone->free_area[order].free_list[migratetype]);
740 page += 1 << order;
d100313f 741 pages_moved += 1 << order;
c361be55
MG
742 }
743
d100313f 744 return pages_moved;
c361be55
MG
745}
746
b69a7288
AB
747static int move_freepages_block(struct zone *zone, struct page *page,
748 int migratetype)
c361be55
MG
749{
750 unsigned long start_pfn, end_pfn;
751 struct page *start_page, *end_page;
752
753 start_pfn = page_to_pfn(page);
d9c23400 754 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
c361be55 755 start_page = pfn_to_page(start_pfn);
d9c23400
MG
756 end_page = start_page + pageblock_nr_pages - 1;
757 end_pfn = start_pfn + pageblock_nr_pages - 1;
c361be55
MG
758
759 /* Do not cross zone boundaries */
760 if (start_pfn < zone->zone_start_pfn)
761 start_page = page;
762 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
763 return 0;
764
765 return move_freepages(zone, start_page, end_page, migratetype);
766}
767
b2a0ac88
MG
768/* Remove an element from the buddy allocator from the fallback list */
769static struct page *__rmqueue_fallback(struct zone *zone, int order,
770 int start_migratetype)
771{
772 struct free_area * area;
773 int current_order;
774 struct page *page;
775 int migratetype, i;
776
777 /* Find the largest possible block of pages in the other list */
778 for (current_order = MAX_ORDER-1; current_order >= order;
779 --current_order) {
780 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
781 migratetype = fallbacks[start_migratetype][i];
782
56fd56b8
MG
783 /* MIGRATE_RESERVE handled later if necessary */
784 if (migratetype == MIGRATE_RESERVE)
785 continue;
e010487d 786
b2a0ac88
MG
787 area = &(zone->free_area[current_order]);
788 if (list_empty(&area->free_list[migratetype]))
789 continue;
790
791 page = list_entry(area->free_list[migratetype].next,
792 struct page, lru);
793 area->nr_free--;
794
795 /*
c361be55 796 * If breaking a large block of pages, move all free
46dafbca
MG
797 * pages to the preferred allocation list. If falling
798 * back for a reclaimable kernel allocation, be more
799 * agressive about taking ownership of free pages
b2a0ac88 800 */
d9c23400 801 if (unlikely(current_order >= (pageblock_order >> 1)) ||
46dafbca
MG
802 start_migratetype == MIGRATE_RECLAIMABLE) {
803 unsigned long pages;
804 pages = move_freepages_block(zone, page,
805 start_migratetype);
806
807 /* Claim the whole block if over half of it is free */
d9c23400 808 if (pages >= (1 << (pageblock_order-1)))
46dafbca
MG
809 set_pageblock_migratetype(page,
810 start_migratetype);
811
b2a0ac88 812 migratetype = start_migratetype;
c361be55 813 }
b2a0ac88
MG
814
815 /* Remove the page from the freelists */
816 list_del(&page->lru);
817 rmv_page_order(page);
818 __mod_zone_page_state(zone, NR_FREE_PAGES,
819 -(1UL << order));
820
d9c23400 821 if (current_order == pageblock_order)
b2a0ac88
MG
822 set_pageblock_migratetype(page,
823 start_migratetype);
824
825 expand(zone, page, order, current_order, area, migratetype);
826 return page;
827 }
828 }
829
56fd56b8
MG
830 /* Use MIGRATE_RESERVE rather than fail an allocation */
831 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
b2a0ac88
MG
832}
833
56fd56b8 834/*
1da177e4
LT
835 * Do the hard work of removing an element from the buddy allocator.
836 * Call me with the zone->lock already held.
837 */
b2a0ac88
MG
838static struct page *__rmqueue(struct zone *zone, unsigned int order,
839 int migratetype)
1da177e4 840{
1da177e4
LT
841 struct page *page;
842
56fd56b8 843 page = __rmqueue_smallest(zone, order, migratetype);
b2a0ac88 844
56fd56b8
MG
845 if (unlikely(!page))
846 page = __rmqueue_fallback(zone, order, migratetype);
b2a0ac88
MG
847
848 return page;
1da177e4
LT
849}
850
851/*
852 * Obtain a specified number of elements from the buddy allocator, all under
853 * a single hold of the lock, for efficiency. Add them to the supplied list.
854 * Returns the number of new pages which were placed at *list.
855 */
856static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88
MG
857 unsigned long count, struct list_head *list,
858 int migratetype)
1da177e4 859{
1da177e4 860 int i;
1da177e4 861
c54ad30c 862 spin_lock(&zone->lock);
1da177e4 863 for (i = 0; i < count; ++i) {
b2a0ac88 864 struct page *page = __rmqueue(zone, order, migratetype);
085cc7d5 865 if (unlikely(page == NULL))
1da177e4 866 break;
81eabcbe
MG
867
868 /*
869 * Split buddy pages returned by expand() are received here
870 * in physical page order. The page is added to the callers and
871 * list and the list head then moves forward. From the callers
872 * perspective, the linked list is ordered by page number in
873 * some conditions. This is useful for IO devices that can
874 * merge IO requests if the physical pages are ordered
875 * properly.
876 */
535131e6
MG
877 list_add(&page->lru, list);
878 set_page_private(page, migratetype);
81eabcbe 879 list = &page->lru;
1da177e4 880 }
c54ad30c 881 spin_unlock(&zone->lock);
085cc7d5 882 return i;
1da177e4
LT
883}
884
4ae7c039 885#ifdef CONFIG_NUMA
8fce4d8e 886/*
4037d452
CL
887 * Called from the vmstat counter updater to drain pagesets of this
888 * currently executing processor on remote nodes after they have
889 * expired.
890 *
879336c3
CL
891 * Note that this function must be called with the thread pinned to
892 * a single processor.
8fce4d8e 893 */
4037d452 894void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 895{
4ae7c039 896 unsigned long flags;
4037d452 897 int to_drain;
4ae7c039 898
4037d452
CL
899 local_irq_save(flags);
900 if (pcp->count >= pcp->batch)
901 to_drain = pcp->batch;
902 else
903 to_drain = pcp->count;
904 free_pages_bulk(zone, to_drain, &pcp->list, 0);
905 pcp->count -= to_drain;
906 local_irq_restore(flags);
4ae7c039
CL
907}
908#endif
909
9f8f2172
CL
910/*
911 * Drain pages of the indicated processor.
912 *
913 * The processor must either be the current processor and the
914 * thread pinned to the current processor or a processor that
915 * is not online.
916 */
917static void drain_pages(unsigned int cpu)
1da177e4 918{
c54ad30c 919 unsigned long flags;
1da177e4 920 struct zone *zone;
1da177e4 921
ee99c71c 922 for_each_populated_zone(zone) {
1da177e4 923 struct per_cpu_pageset *pset;
3dfa5721 924 struct per_cpu_pages *pcp;
1da177e4 925
e7c8d5c9 926 pset = zone_pcp(zone, cpu);
3dfa5721
CL
927
928 pcp = &pset->pcp;
929 local_irq_save(flags);
930 free_pages_bulk(zone, pcp->count, &pcp->list, 0);
931 pcp->count = 0;
932 local_irq_restore(flags);
1da177e4
LT
933 }
934}
1da177e4 935
9f8f2172
CL
936/*
937 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
938 */
939void drain_local_pages(void *arg)
940{
941 drain_pages(smp_processor_id());
942}
943
944/*
945 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
946 */
947void drain_all_pages(void)
948{
15c8b6c1 949 on_each_cpu(drain_local_pages, NULL, 1);
9f8f2172
CL
950}
951
296699de 952#ifdef CONFIG_HIBERNATION
1da177e4
LT
953
954void mark_free_pages(struct zone *zone)
955{
f623f0db
RW
956 unsigned long pfn, max_zone_pfn;
957 unsigned long flags;
b2a0ac88 958 int order, t;
1da177e4
LT
959 struct list_head *curr;
960
961 if (!zone->spanned_pages)
962 return;
963
964 spin_lock_irqsave(&zone->lock, flags);
f623f0db
RW
965
966 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
967 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
968 if (pfn_valid(pfn)) {
969 struct page *page = pfn_to_page(pfn);
970
7be98234
RW
971 if (!swsusp_page_is_forbidden(page))
972 swsusp_unset_page_free(page);
f623f0db 973 }
1da177e4 974
b2a0ac88
MG
975 for_each_migratetype_order(order, t) {
976 list_for_each(curr, &zone->free_area[order].free_list[t]) {
f623f0db 977 unsigned long i;
1da177e4 978
f623f0db
RW
979 pfn = page_to_pfn(list_entry(curr, struct page, lru));
980 for (i = 0; i < (1UL << order); i++)
7be98234 981 swsusp_set_page_free(pfn_to_page(pfn + i));
f623f0db 982 }
b2a0ac88 983 }
1da177e4
LT
984 spin_unlock_irqrestore(&zone->lock, flags);
985}
e2c55dc8 986#endif /* CONFIG_PM */
1da177e4 987
1da177e4
LT
988/*
989 * Free a 0-order page
990 */
920c7a5d 991static void free_hot_cold_page(struct page *page, int cold)
1da177e4
LT
992{
993 struct zone *zone = page_zone(page);
994 struct per_cpu_pages *pcp;
995 unsigned long flags;
996
1da177e4
LT
997 if (PageAnon(page))
998 page->mapping = NULL;
224abf92 999 if (free_pages_check(page))
689bcebf
HD
1000 return;
1001
3ac7fe5a 1002 if (!PageHighMem(page)) {
9858db50 1003 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
3ac7fe5a
TG
1004 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
1005 }
dafb1367 1006 arch_free_page(page, 0);
689bcebf
HD
1007 kernel_map_pages(page, 1, 0);
1008
3dfa5721 1009 pcp = &zone_pcp(zone, get_cpu())->pcp;
1da177e4 1010 local_irq_save(flags);
f8891e5e 1011 __count_vm_event(PGFREE);
3dfa5721
CL
1012 if (cold)
1013 list_add_tail(&page->lru, &pcp->list);
1014 else
1015 list_add(&page->lru, &pcp->list);
535131e6 1016 set_page_private(page, get_pageblock_migratetype(page));
1da177e4 1017 pcp->count++;
48db57f8
NP
1018 if (pcp->count >= pcp->high) {
1019 free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
1020 pcp->count -= pcp->batch;
1021 }
1da177e4
LT
1022 local_irq_restore(flags);
1023 put_cpu();
1024}
1025
920c7a5d 1026void free_hot_page(struct page *page)
1da177e4
LT
1027{
1028 free_hot_cold_page(page, 0);
1029}
1030
920c7a5d 1031void free_cold_page(struct page *page)
1da177e4
LT
1032{
1033 free_hot_cold_page(page, 1);
1034}
1035
8dfcc9ba
NP
1036/*
1037 * split_page takes a non-compound higher-order page, and splits it into
1038 * n (1<<order) sub-pages: page[0..n]
1039 * Each sub-page must be freed individually.
1040 *
1041 * Note: this is probably too low level an operation for use in drivers.
1042 * Please consult with lkml before using this in your driver.
1043 */
1044void split_page(struct page *page, unsigned int order)
1045{
1046 int i;
1047
725d704e
NP
1048 VM_BUG_ON(PageCompound(page));
1049 VM_BUG_ON(!page_count(page));
7835e98b
NP
1050 for (i = 1; i < (1 << order); i++)
1051 set_page_refcounted(page + i);
8dfcc9ba 1052}
8dfcc9ba 1053
1da177e4
LT
1054/*
1055 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
1056 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1057 * or two.
1058 */
18ea7e71 1059static struct page *buffered_rmqueue(struct zone *preferred_zone,
a74609fa 1060 struct zone *zone, int order, gfp_t gfp_flags)
1da177e4
LT
1061{
1062 unsigned long flags;
689bcebf 1063 struct page *page;
1da177e4 1064 int cold = !!(gfp_flags & __GFP_COLD);
a74609fa 1065 int cpu;
64c5e135 1066 int migratetype = allocflags_to_migratetype(gfp_flags);
1da177e4 1067
689bcebf 1068again:
a74609fa 1069 cpu = get_cpu();
48db57f8 1070 if (likely(order == 0)) {
1da177e4
LT
1071 struct per_cpu_pages *pcp;
1072
3dfa5721 1073 pcp = &zone_pcp(zone, cpu)->pcp;
1da177e4 1074 local_irq_save(flags);
a74609fa 1075 if (!pcp->count) {
941c7105 1076 pcp->count = rmqueue_bulk(zone, 0,
b2a0ac88 1077 pcp->batch, &pcp->list, migratetype);
a74609fa
NP
1078 if (unlikely(!pcp->count))
1079 goto failed;
1da177e4 1080 }
b92a6edd 1081
535131e6 1082 /* Find a page of the appropriate migrate type */
3dfa5721
CL
1083 if (cold) {
1084 list_for_each_entry_reverse(page, &pcp->list, lru)
1085 if (page_private(page) == migratetype)
1086 break;
1087 } else {
1088 list_for_each_entry(page, &pcp->list, lru)
1089 if (page_private(page) == migratetype)
1090 break;
1091 }
535131e6 1092
b92a6edd
MG
1093 /* Allocate more to the pcp list if necessary */
1094 if (unlikely(&page->lru == &pcp->list)) {
535131e6
MG
1095 pcp->count += rmqueue_bulk(zone, 0,
1096 pcp->batch, &pcp->list, migratetype);
1097 page = list_entry(pcp->list.next, struct page, lru);
535131e6 1098 }
b92a6edd
MG
1099
1100 list_del(&page->lru);
1101 pcp->count--;
7fb1d9fc 1102 } else {
1da177e4 1103 spin_lock_irqsave(&zone->lock, flags);
b2a0ac88 1104 page = __rmqueue(zone, order, migratetype);
a74609fa
NP
1105 spin_unlock(&zone->lock);
1106 if (!page)
1107 goto failed;
1da177e4
LT
1108 }
1109
f8891e5e 1110 __count_zone_vm_events(PGALLOC, zone, 1 << order);
18ea7e71 1111 zone_statistics(preferred_zone, zone);
a74609fa
NP
1112 local_irq_restore(flags);
1113 put_cpu();
1da177e4 1114
725d704e 1115 VM_BUG_ON(bad_range(zone, page));
17cf4406 1116 if (prep_new_page(page, order, gfp_flags))
a74609fa 1117 goto again;
1da177e4 1118 return page;
a74609fa
NP
1119
1120failed:
1121 local_irq_restore(flags);
1122 put_cpu();
1123 return NULL;
1da177e4
LT
1124}
1125
7fb1d9fc 1126#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
3148890b
NP
1127#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
1128#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
1129#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
1130#define ALLOC_HARDER 0x10 /* try to alloc harder */
1131#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
1132#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
7fb1d9fc 1133
933e312e
AM
1134#ifdef CONFIG_FAIL_PAGE_ALLOC
1135
1136static struct fail_page_alloc_attr {
1137 struct fault_attr attr;
1138
1139 u32 ignore_gfp_highmem;
1140 u32 ignore_gfp_wait;
54114994 1141 u32 min_order;
933e312e
AM
1142
1143#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1144
1145 struct dentry *ignore_gfp_highmem_file;
1146 struct dentry *ignore_gfp_wait_file;
54114994 1147 struct dentry *min_order_file;
933e312e
AM
1148
1149#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1150
1151} fail_page_alloc = {
1152 .attr = FAULT_ATTR_INITIALIZER,
6b1b60f4
DM
1153 .ignore_gfp_wait = 1,
1154 .ignore_gfp_highmem = 1,
54114994 1155 .min_order = 1,
933e312e
AM
1156};
1157
1158static int __init setup_fail_page_alloc(char *str)
1159{
1160 return setup_fault_attr(&fail_page_alloc.attr, str);
1161}
1162__setup("fail_page_alloc=", setup_fail_page_alloc);
1163
1164static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1165{
54114994
AM
1166 if (order < fail_page_alloc.min_order)
1167 return 0;
933e312e
AM
1168 if (gfp_mask & __GFP_NOFAIL)
1169 return 0;
1170 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1171 return 0;
1172 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1173 return 0;
1174
1175 return should_fail(&fail_page_alloc.attr, 1 << order);
1176}
1177
1178#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1179
1180static int __init fail_page_alloc_debugfs(void)
1181{
1182 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1183 struct dentry *dir;
1184 int err;
1185
1186 err = init_fault_attr_dentries(&fail_page_alloc.attr,
1187 "fail_page_alloc");
1188 if (err)
1189 return err;
1190 dir = fail_page_alloc.attr.dentries.dir;
1191
1192 fail_page_alloc.ignore_gfp_wait_file =
1193 debugfs_create_bool("ignore-gfp-wait", mode, dir,
1194 &fail_page_alloc.ignore_gfp_wait);
1195
1196 fail_page_alloc.ignore_gfp_highmem_file =
1197 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1198 &fail_page_alloc.ignore_gfp_highmem);
54114994
AM
1199 fail_page_alloc.min_order_file =
1200 debugfs_create_u32("min-order", mode, dir,
1201 &fail_page_alloc.min_order);
933e312e
AM
1202
1203 if (!fail_page_alloc.ignore_gfp_wait_file ||
54114994
AM
1204 !fail_page_alloc.ignore_gfp_highmem_file ||
1205 !fail_page_alloc.min_order_file) {
933e312e
AM
1206 err = -ENOMEM;
1207 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1208 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
54114994 1209 debugfs_remove(fail_page_alloc.min_order_file);
933e312e
AM
1210 cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1211 }
1212
1213 return err;
1214}
1215
1216late_initcall(fail_page_alloc_debugfs);
1217
1218#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1219
1220#else /* CONFIG_FAIL_PAGE_ALLOC */
1221
1222static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1223{
1224 return 0;
1225}
1226
1227#endif /* CONFIG_FAIL_PAGE_ALLOC */
1228
1da177e4
LT
1229/*
1230 * Return 1 if free pages are above 'mark'. This takes into account the order
1231 * of the allocation.
1232 */
1233int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
7fb1d9fc 1234 int classzone_idx, int alloc_flags)
1da177e4
LT
1235{
1236 /* free_pages my go negative - that's OK */
d23ad423
CL
1237 long min = mark;
1238 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1da177e4
LT
1239 int o;
1240
7fb1d9fc 1241 if (alloc_flags & ALLOC_HIGH)
1da177e4 1242 min -= min / 2;
7fb1d9fc 1243 if (alloc_flags & ALLOC_HARDER)
1da177e4
LT
1244 min -= min / 4;
1245
1246 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1247 return 0;
1248 for (o = 0; o < order; o++) {
1249 /* At the next order, this order's pages become unavailable */
1250 free_pages -= z->free_area[o].nr_free << o;
1251
1252 /* Require fewer higher order pages to be free */
1253 min >>= 1;
1254
1255 if (free_pages <= min)
1256 return 0;
1257 }
1258 return 1;
1259}
1260
9276b1bc
PJ
1261#ifdef CONFIG_NUMA
1262/*
1263 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1264 * skip over zones that are not allowed by the cpuset, or that have
1265 * been recently (in last second) found to be nearly full. See further
1266 * comments in mmzone.h. Reduces cache footprint of zonelist scans
183ff22b 1267 * that have to skip over a lot of full or unallowed zones.
9276b1bc
PJ
1268 *
1269 * If the zonelist cache is present in the passed in zonelist, then
1270 * returns a pointer to the allowed node mask (either the current
37b07e41 1271 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
9276b1bc
PJ
1272 *
1273 * If the zonelist cache is not available for this zonelist, does
1274 * nothing and returns NULL.
1275 *
1276 * If the fullzones BITMAP in the zonelist cache is stale (more than
1277 * a second since last zap'd) then we zap it out (clear its bits.)
1278 *
1279 * We hold off even calling zlc_setup, until after we've checked the
1280 * first zone in the zonelist, on the theory that most allocations will
1281 * be satisfied from that first zone, so best to examine that zone as
1282 * quickly as we can.
1283 */
1284static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1285{
1286 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1287 nodemask_t *allowednodes; /* zonelist_cache approximation */
1288
1289 zlc = zonelist->zlcache_ptr;
1290 if (!zlc)
1291 return NULL;
1292
f05111f5 1293 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
9276b1bc
PJ
1294 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1295 zlc->last_full_zap = jiffies;
1296 }
1297
1298 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1299 &cpuset_current_mems_allowed :
37b07e41 1300 &node_states[N_HIGH_MEMORY];
9276b1bc
PJ
1301 return allowednodes;
1302}
1303
1304/*
1305 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1306 * if it is worth looking at further for free memory:
1307 * 1) Check that the zone isn't thought to be full (doesn't have its
1308 * bit set in the zonelist_cache fullzones BITMAP).
1309 * 2) Check that the zones node (obtained from the zonelist_cache
1310 * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1311 * Return true (non-zero) if zone is worth looking at further, or
1312 * else return false (zero) if it is not.
1313 *
1314 * This check -ignores- the distinction between various watermarks,
1315 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
1316 * found to be full for any variation of these watermarks, it will
1317 * be considered full for up to one second by all requests, unless
1318 * we are so low on memory on all allowed nodes that we are forced
1319 * into the second scan of the zonelist.
1320 *
1321 * In the second scan we ignore this zonelist cache and exactly
1322 * apply the watermarks to all zones, even it is slower to do so.
1323 * We are low on memory in the second scan, and should leave no stone
1324 * unturned looking for a free page.
1325 */
dd1a239f 1326static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1327 nodemask_t *allowednodes)
1328{
1329 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1330 int i; /* index of *z in zonelist zones */
1331 int n; /* node that zone *z is on */
1332
1333 zlc = zonelist->zlcache_ptr;
1334 if (!zlc)
1335 return 1;
1336
dd1a239f 1337 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1338 n = zlc->z_to_n[i];
1339
1340 /* This zone is worth trying if it is allowed but not full */
1341 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1342}
1343
1344/*
1345 * Given 'z' scanning a zonelist, set the corresponding bit in
1346 * zlc->fullzones, so that subsequent attempts to allocate a page
1347 * from that zone don't waste time re-examining it.
1348 */
dd1a239f 1349static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1350{
1351 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1352 int i; /* index of *z in zonelist zones */
1353
1354 zlc = zonelist->zlcache_ptr;
1355 if (!zlc)
1356 return;
1357
dd1a239f 1358 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1359
1360 set_bit(i, zlc->fullzones);
1361}
1362
1363#else /* CONFIG_NUMA */
1364
1365static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1366{
1367 return NULL;
1368}
1369
dd1a239f 1370static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1371 nodemask_t *allowednodes)
1372{
1373 return 1;
1374}
1375
dd1a239f 1376static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1377{
1378}
1379#endif /* CONFIG_NUMA */
1380
7fb1d9fc 1381/*
0798e519 1382 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
1383 * a page.
1384 */
1385static struct page *
19770b32 1386get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
54a6eb5c 1387 struct zonelist *zonelist, int high_zoneidx, int alloc_flags)
753ee728 1388{
dd1a239f 1389 struct zoneref *z;
7fb1d9fc 1390 struct page *page = NULL;
54a6eb5c 1391 int classzone_idx;
18ea7e71 1392 struct zone *zone, *preferred_zone;
9276b1bc
PJ
1393 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1394 int zlc_active = 0; /* set if using zonelist_cache */
1395 int did_zlc_setup = 0; /* just call zlc_setup() one time */
54a6eb5c 1396
19770b32
MG
1397 (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
1398 &preferred_zone);
7eb54824
AW
1399 if (!preferred_zone)
1400 return NULL;
1401
19770b32 1402 classzone_idx = zone_idx(preferred_zone);
7fb1d9fc 1403
b3c466ce
MG
1404 if (WARN_ON_ONCE(order >= MAX_ORDER))
1405 return NULL;
1406
9276b1bc 1407zonelist_scan:
7fb1d9fc 1408 /*
9276b1bc 1409 * Scan zonelist, looking for a zone with enough free.
7fb1d9fc
RS
1410 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1411 */
19770b32
MG
1412 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1413 high_zoneidx, nodemask) {
9276b1bc
PJ
1414 if (NUMA_BUILD && zlc_active &&
1415 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1416 continue;
7fb1d9fc 1417 if ((alloc_flags & ALLOC_CPUSET) &&
02a0e53d 1418 !cpuset_zone_allowed_softwall(zone, gfp_mask))
9276b1bc 1419 goto try_next_zone;
7fb1d9fc
RS
1420
1421 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
3148890b
NP
1422 unsigned long mark;
1423 if (alloc_flags & ALLOC_WMARK_MIN)
1192d526 1424 mark = zone->pages_min;
3148890b 1425 else if (alloc_flags & ALLOC_WMARK_LOW)
1192d526 1426 mark = zone->pages_low;
3148890b 1427 else
1192d526 1428 mark = zone->pages_high;
0798e519
PJ
1429 if (!zone_watermark_ok(zone, order, mark,
1430 classzone_idx, alloc_flags)) {
9eeff239 1431 if (!zone_reclaim_mode ||
1192d526 1432 !zone_reclaim(zone, gfp_mask, order))
9276b1bc 1433 goto this_zone_full;
0798e519 1434 }
7fb1d9fc
RS
1435 }
1436
18ea7e71 1437 page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
0798e519 1438 if (page)
7fb1d9fc 1439 break;
9276b1bc
PJ
1440this_zone_full:
1441 if (NUMA_BUILD)
1442 zlc_mark_zone_full(zonelist, z);
1443try_next_zone:
1444 if (NUMA_BUILD && !did_zlc_setup) {
1445 /* we do zlc_setup after the first zone is tried */
1446 allowednodes = zlc_setup(zonelist, alloc_flags);
1447 zlc_active = 1;
1448 did_zlc_setup = 1;
1449 }
54a6eb5c 1450 }
9276b1bc
PJ
1451
1452 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1453 /* Disable zlc cache for second zonelist scan */
1454 zlc_active = 0;
1455 goto zonelist_scan;
1456 }
7fb1d9fc 1457 return page;
753ee728
MH
1458}
1459
11e33f6a
MG
1460static inline int
1461should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1462 unsigned long pages_reclaimed)
1da177e4 1463{
11e33f6a
MG
1464 /* Do not loop if specifically requested */
1465 if (gfp_mask & __GFP_NORETRY)
1466 return 0;
1da177e4 1467
11e33f6a
MG
1468 /*
1469 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1470 * means __GFP_NOFAIL, but that may not be true in other
1471 * implementations.
1472 */
1473 if (order <= PAGE_ALLOC_COSTLY_ORDER)
1474 return 1;
1475
1476 /*
1477 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1478 * specified, then we retry until we no longer reclaim any pages
1479 * (above), or we've reclaimed an order of pages at least as
1480 * large as the allocation's order. In both cases, if the
1481 * allocation still fails, we stop retrying.
1482 */
1483 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1484 return 1;
cf40bd16 1485
11e33f6a
MG
1486 /*
1487 * Don't let big-order allocations loop unless the caller
1488 * explicitly requests that.
1489 */
1490 if (gfp_mask & __GFP_NOFAIL)
1491 return 1;
1da177e4 1492
11e33f6a
MG
1493 return 0;
1494}
933e312e 1495
11e33f6a
MG
1496static inline struct page *
1497__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1498 struct zonelist *zonelist, enum zone_type high_zoneidx,
1499 nodemask_t *nodemask)
1500{
1501 struct page *page;
1502
1503 /* Acquire the OOM killer lock for the zones in zonelist */
1504 if (!try_set_zone_oom(zonelist, gfp_mask)) {
1505 schedule_timeout_uninterruptible(1);
1da177e4
LT
1506 return NULL;
1507 }
6b1de916 1508
11e33f6a
MG
1509 /*
1510 * Go through the zonelist yet one more time, keep very high watermark
1511 * here, this is only to catch a parallel oom killing, we must fail if
1512 * we're still under heavy pressure.
1513 */
1514 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1515 order, zonelist, high_zoneidx,
1516 ALLOC_WMARK_HIGH|ALLOC_CPUSET);
7fb1d9fc 1517 if (page)
11e33f6a
MG
1518 goto out;
1519
1520 /* The OOM killer will not help higher order allocs */
1521 if (order > PAGE_ALLOC_COSTLY_ORDER)
1522 goto out;
1523
1524 /* Exhausted what can be done so it's blamo time */
1525 out_of_memory(zonelist, gfp_mask, order);
1526
1527out:
1528 clear_zonelist_oom(zonelist, gfp_mask);
1529 return page;
1530}
1531
1532/* The really slow allocator path where we enter direct reclaim */
1533static inline struct page *
1534__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1535 struct zonelist *zonelist, enum zone_type high_zoneidx,
1536 nodemask_t *nodemask, int alloc_flags, unsigned long *did_some_progress)
1537{
1538 struct page *page = NULL;
1539 struct reclaim_state reclaim_state;
1540 struct task_struct *p = current;
1541
1542 cond_resched();
1543
1544 /* We now go into synchronous reclaim */
1545 cpuset_memory_pressure_bump();
1546
1547 /*
1548 * The task's cpuset might have expanded its set of allowable nodes
1549 */
1550 p->flags |= PF_MEMALLOC;
1551 lockdep_set_current_reclaim_state(gfp_mask);
1552 reclaim_state.reclaimed_slab = 0;
1553 p->reclaim_state = &reclaim_state;
1554
1555 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1556
1557 p->reclaim_state = NULL;
1558 lockdep_clear_current_reclaim_state();
1559 p->flags &= ~PF_MEMALLOC;
1560
1561 cond_resched();
1562
1563 if (order != 0)
1564 drain_all_pages();
1565
1566 if (likely(*did_some_progress))
1567 page = get_page_from_freelist(gfp_mask, nodemask, order,
1568 zonelist, high_zoneidx, alloc_flags);
1569 return page;
1570}
1571
1572static inline int
1573is_allocation_high_priority(struct task_struct *p, gfp_t gfp_mask)
1574{
1575 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
1576 && !in_interrupt())
1577 return 1;
1578 return 0;
1579}
1580
1581/*
1582 * This is called in the allocator slow-path if the allocation request is of
1583 * sufficient urgency to ignore watermarks and take other desperate measures
1584 */
1585static inline struct page *
1586__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1587 struct zonelist *zonelist, enum zone_type high_zoneidx,
1588 nodemask_t *nodemask)
1589{
1590 struct page *page;
1591
1592 do {
1593 page = get_page_from_freelist(gfp_mask, nodemask, order,
1594 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
1595
1596 if (!page && gfp_mask & __GFP_NOFAIL)
1597 congestion_wait(WRITE, HZ/50);
1598 } while (!page && (gfp_mask & __GFP_NOFAIL));
1599
1600 return page;
1601}
1602
1603static inline
1604void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1605 enum zone_type high_zoneidx)
1606{
1607 struct zoneref *z;
1608 struct zone *zone;
1609
1610 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1611 wakeup_kswapd(zone, order);
1612}
1613
1614static inline struct page *
1615__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1616 struct zonelist *zonelist, enum zone_type high_zoneidx,
1617 nodemask_t *nodemask)
1618{
1619 const gfp_t wait = gfp_mask & __GFP_WAIT;
1620 struct page *page = NULL;
1621 int alloc_flags;
1622 unsigned long pages_reclaimed = 0;
1623 unsigned long did_some_progress;
1624 struct task_struct *p = current;
1da177e4 1625
952f3b51
CL
1626 /*
1627 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1628 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1629 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1630 * using a larger set of nodes after it has established that the
1631 * allowed per node queues are empty and that nodes are
1632 * over allocated.
1633 */
1634 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1635 goto nopage;
1636
11e33f6a 1637 wake_all_kswapd(order, zonelist, high_zoneidx);
1da177e4 1638
9bf2229f 1639 /*
7fb1d9fc
RS
1640 * OK, we're below the kswapd watermark and have kicked background
1641 * reclaim. Now things get more complex, so set up alloc_flags according
1642 * to how we want to proceed.
1643 *
1644 * The caller may dip into page reserves a bit more if the caller
1645 * cannot run direct reclaim, or if the caller has realtime scheduling
4eac915d
PJ
1646 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1647 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
9bf2229f 1648 */
3148890b 1649 alloc_flags = ALLOC_WMARK_MIN;
7fb1d9fc
RS
1650 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1651 alloc_flags |= ALLOC_HARDER;
1652 if (gfp_mask & __GFP_HIGH)
1653 alloc_flags |= ALLOC_HIGH;
bdd804f4
PJ
1654 if (wait)
1655 alloc_flags |= ALLOC_CPUSET;
1da177e4 1656
11e33f6a 1657restart:
1da177e4
LT
1658 /*
1659 * Go through the zonelist again. Let __GFP_HIGH and allocations
7fb1d9fc 1660 * coming from realtime tasks go deeper into reserves.
1da177e4
LT
1661 *
1662 * This is the last chance, in general, before the goto nopage.
1663 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
9bf2229f 1664 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1da177e4 1665 */
19770b32 1666 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
54a6eb5c 1667 high_zoneidx, alloc_flags);
7fb1d9fc
RS
1668 if (page)
1669 goto got_pg;
1da177e4 1670
b43a57bb 1671rebalance:
11e33f6a
MG
1672 /* Allocate without watermarks if the context allows */
1673 if (is_allocation_high_priority(p, gfp_mask)) {
1674 /* Do not dip into emergency reserves if specified */
b84a35be 1675 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
11e33f6a
MG
1676 page = __alloc_pages_high_priority(gfp_mask, order,
1677 zonelist, high_zoneidx, nodemask);
7fb1d9fc
RS
1678 if (page)
1679 goto got_pg;
1da177e4 1680 }
11e33f6a
MG
1681
1682 /* Ensure no recursion into the allocator */
1da177e4
LT
1683 goto nopage;
1684 }
1685
1686 /* Atomic allocations - we can't balance anything */
1687 if (!wait)
1688 goto nopage;
1689
11e33f6a
MG
1690 /* Try direct reclaim and then allocating */
1691 page = __alloc_pages_direct_reclaim(gfp_mask, order,
1692 zonelist, high_zoneidx,
1693 nodemask,
1694 alloc_flags, &did_some_progress);
1695 if (page)
1696 goto got_pg;
1da177e4 1697
11e33f6a
MG
1698 /*
1699 * If we failed to make any progress reclaiming, then we are
1700 * running out of options and have to consider going OOM
1701 */
1702 if (!did_some_progress) {
1703 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1704 page = __alloc_pages_may_oom(gfp_mask, order,
1705 zonelist, high_zoneidx,
1706 nodemask);
1707 if (page)
1708 goto got_pg;
1da177e4 1709
11e33f6a
MG
1710 /*
1711 * The OOM killer does not trigger for high-order allocations
1712 * but if no progress is being made, there are no other
1713 * options and retrying is unlikely to help
1714 */
1715 if (order > PAGE_ALLOC_COSTLY_ORDER)
1716 goto nopage;
e2c55dc8 1717
ff0ceb9d
DR
1718 goto restart;
1719 }
1da177e4
LT
1720 }
1721
11e33f6a 1722 /* Check if we should retry the allocation */
a41f24ea 1723 pages_reclaimed += did_some_progress;
11e33f6a
MG
1724 if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
1725 /* Wait for some write requests to complete then retry */
3fcfab16 1726 congestion_wait(WRITE, HZ/50);
1da177e4
LT
1727 goto rebalance;
1728 }
1729
1730nopage:
1731 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1732 printk(KERN_WARNING "%s: page allocation failure."
1733 " order:%d, mode:0x%x\n",
1734 p->comm, order, gfp_mask);
1735 dump_stack();
578c2fd6 1736 show_mem();
1da177e4 1737 }
1da177e4 1738got_pg:
1da177e4 1739 return page;
11e33f6a
MG
1740
1741}
1742
1743/*
1744 * This is the 'heart' of the zoned buddy allocator.
1745 */
1746struct page *
1747__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1748 struct zonelist *zonelist, nodemask_t *nodemask)
1749{
1750 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1751 struct page *page;
1752
1753 lockdep_trace_alloc(gfp_mask);
1754
1755 might_sleep_if(gfp_mask & __GFP_WAIT);
1756
1757 if (should_fail_alloc_page(gfp_mask, order))
1758 return NULL;
1759
1760 /*
1761 * Check the zones suitable for the gfp_mask contain at least one
1762 * valid zone. It's possible to have an empty zonelist as a result
1763 * of GFP_THISNODE and a memoryless node
1764 */
1765 if (unlikely(!zonelist->_zonerefs->zone))
1766 return NULL;
1767
1768 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1769 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET);
1770 if (unlikely(!page))
1771 page = __alloc_pages_slowpath(gfp_mask, order,
1772 zonelist, high_zoneidx, nodemask);
1773
1774 return page;
1da177e4 1775}
d239171e 1776EXPORT_SYMBOL(__alloc_pages_nodemask);
1da177e4
LT
1777
1778/*
1779 * Common helper functions.
1780 */
920c7a5d 1781unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4
LT
1782{
1783 struct page * page;
1784 page = alloc_pages(gfp_mask, order);
1785 if (!page)
1786 return 0;
1787 return (unsigned long) page_address(page);
1788}
1789
1790EXPORT_SYMBOL(__get_free_pages);
1791
920c7a5d 1792unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4
LT
1793{
1794 struct page * page;
1795
1796 /*
1797 * get_zeroed_page() returns a 32-bit address, which cannot represent
1798 * a highmem page
1799 */
725d704e 1800 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1da177e4
LT
1801
1802 page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1803 if (page)
1804 return (unsigned long) page_address(page);
1805 return 0;
1806}
1807
1808EXPORT_SYMBOL(get_zeroed_page);
1809
1810void __pagevec_free(struct pagevec *pvec)
1811{
1812 int i = pagevec_count(pvec);
1813
1814 while (--i >= 0)
1815 free_hot_cold_page(pvec->pages[i], pvec->cold);
1816}
1817
920c7a5d 1818void __free_pages(struct page *page, unsigned int order)
1da177e4 1819{
b5810039 1820 if (put_page_testzero(page)) {
1da177e4
LT
1821 if (order == 0)
1822 free_hot_page(page);
1823 else
1824 __free_pages_ok(page, order);
1825 }
1826}
1827
1828EXPORT_SYMBOL(__free_pages);
1829
920c7a5d 1830void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
1831{
1832 if (addr != 0) {
725d704e 1833 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
1834 __free_pages(virt_to_page((void *)addr), order);
1835 }
1836}
1837
1838EXPORT_SYMBOL(free_pages);
1839
2be0ffe2
TT
1840/**
1841 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
1842 * @size: the number of bytes to allocate
1843 * @gfp_mask: GFP flags for the allocation
1844 *
1845 * This function is similar to alloc_pages(), except that it allocates the
1846 * minimum number of pages to satisfy the request. alloc_pages() can only
1847 * allocate memory in power-of-two pages.
1848 *
1849 * This function is also limited by MAX_ORDER.
1850 *
1851 * Memory allocated by this function must be released by free_pages_exact().
1852 */
1853void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
1854{
1855 unsigned int order = get_order(size);
1856 unsigned long addr;
1857
1858 addr = __get_free_pages(gfp_mask, order);
1859 if (addr) {
1860 unsigned long alloc_end = addr + (PAGE_SIZE << order);
1861 unsigned long used = addr + PAGE_ALIGN(size);
1862
1863 split_page(virt_to_page(addr), order);
1864 while (used < alloc_end) {
1865 free_page(used);
1866 used += PAGE_SIZE;
1867 }
1868 }
1869
1870 return (void *)addr;
1871}
1872EXPORT_SYMBOL(alloc_pages_exact);
1873
1874/**
1875 * free_pages_exact - release memory allocated via alloc_pages_exact()
1876 * @virt: the value returned by alloc_pages_exact.
1877 * @size: size of allocation, same value as passed to alloc_pages_exact().
1878 *
1879 * Release the memory allocated by a previous call to alloc_pages_exact.
1880 */
1881void free_pages_exact(void *virt, size_t size)
1882{
1883 unsigned long addr = (unsigned long)virt;
1884 unsigned long end = addr + PAGE_ALIGN(size);
1885
1886 while (addr < end) {
1887 free_page(addr);
1888 addr += PAGE_SIZE;
1889 }
1890}
1891EXPORT_SYMBOL(free_pages_exact);
1892
1da177e4
LT
1893static unsigned int nr_free_zone_pages(int offset)
1894{
dd1a239f 1895 struct zoneref *z;
54a6eb5c
MG
1896 struct zone *zone;
1897
e310fd43 1898 /* Just pick one node, since fallback list is circular */
1da177e4
LT
1899 unsigned int sum = 0;
1900
0e88460d 1901 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1da177e4 1902
54a6eb5c 1903 for_each_zone_zonelist(zone, z, zonelist, offset) {
e310fd43
MB
1904 unsigned long size = zone->present_pages;
1905 unsigned long high = zone->pages_high;
1906 if (size > high)
1907 sum += size - high;
1da177e4
LT
1908 }
1909
1910 return sum;
1911}
1912
1913/*
1914 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1915 */
1916unsigned int nr_free_buffer_pages(void)
1917{
af4ca457 1918 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 1919}
c2f1a551 1920EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4
LT
1921
1922/*
1923 * Amount of free RAM allocatable within all zones
1924 */
1925unsigned int nr_free_pagecache_pages(void)
1926{
2a1e274a 1927 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1da177e4 1928}
08e0f6a9
CL
1929
1930static inline void show_node(struct zone *zone)
1da177e4 1931{
08e0f6a9 1932 if (NUMA_BUILD)
25ba77c1 1933 printk("Node %d ", zone_to_nid(zone));
1da177e4 1934}
1da177e4 1935
1da177e4
LT
1936void si_meminfo(struct sysinfo *val)
1937{
1938 val->totalram = totalram_pages;
1939 val->sharedram = 0;
d23ad423 1940 val->freeram = global_page_state(NR_FREE_PAGES);
1da177e4 1941 val->bufferram = nr_blockdev_pages();
1da177e4
LT
1942 val->totalhigh = totalhigh_pages;
1943 val->freehigh = nr_free_highpages();
1da177e4
LT
1944 val->mem_unit = PAGE_SIZE;
1945}
1946
1947EXPORT_SYMBOL(si_meminfo);
1948
1949#ifdef CONFIG_NUMA
1950void si_meminfo_node(struct sysinfo *val, int nid)
1951{
1952 pg_data_t *pgdat = NODE_DATA(nid);
1953
1954 val->totalram = pgdat->node_present_pages;
d23ad423 1955 val->freeram = node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 1956#ifdef CONFIG_HIGHMEM
1da177e4 1957 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
d23ad423
CL
1958 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
1959 NR_FREE_PAGES);
98d2b0eb
CL
1960#else
1961 val->totalhigh = 0;
1962 val->freehigh = 0;
1963#endif
1da177e4
LT
1964 val->mem_unit = PAGE_SIZE;
1965}
1966#endif
1967
1968#define K(x) ((x) << (PAGE_SHIFT-10))
1969
1970/*
1971 * Show free area list (used inside shift_scroll-lock stuff)
1972 * We also calculate the percentage fragmentation. We do this by counting the
1973 * memory on each free list with the exception of the first item on the list.
1974 */
1975void show_free_areas(void)
1976{
c7241913 1977 int cpu;
1da177e4
LT
1978 struct zone *zone;
1979
ee99c71c 1980 for_each_populated_zone(zone) {
c7241913
JS
1981 show_node(zone);
1982 printk("%s per-cpu:\n", zone->name);
1da177e4 1983
6b482c67 1984 for_each_online_cpu(cpu) {
1da177e4
LT
1985 struct per_cpu_pageset *pageset;
1986
e7c8d5c9 1987 pageset = zone_pcp(zone, cpu);
1da177e4 1988
3dfa5721
CL
1989 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
1990 cpu, pageset->pcp.high,
1991 pageset->pcp.batch, pageset->pcp.count);
1da177e4
LT
1992 }
1993 }
1994
7b854121
LS
1995 printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
1996 " inactive_file:%lu"
1997//TODO: check/adjust line lengths
1998#ifdef CONFIG_UNEVICTABLE_LRU
1999 " unevictable:%lu"
2000#endif
2001 " dirty:%lu writeback:%lu unstable:%lu\n"
d23ad423 2002 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
4f98a2fe
RR
2003 global_page_state(NR_ACTIVE_ANON),
2004 global_page_state(NR_ACTIVE_FILE),
2005 global_page_state(NR_INACTIVE_ANON),
2006 global_page_state(NR_INACTIVE_FILE),
7b854121
LS
2007#ifdef CONFIG_UNEVICTABLE_LRU
2008 global_page_state(NR_UNEVICTABLE),
2009#endif
b1e7a8fd 2010 global_page_state(NR_FILE_DIRTY),
ce866b34 2011 global_page_state(NR_WRITEBACK),
fd39fc85 2012 global_page_state(NR_UNSTABLE_NFS),
d23ad423 2013 global_page_state(NR_FREE_PAGES),
972d1a7b
CL
2014 global_page_state(NR_SLAB_RECLAIMABLE) +
2015 global_page_state(NR_SLAB_UNRECLAIMABLE),
65ba55f5 2016 global_page_state(NR_FILE_MAPPED),
a25700a5
AM
2017 global_page_state(NR_PAGETABLE),
2018 global_page_state(NR_BOUNCE));
1da177e4 2019
ee99c71c 2020 for_each_populated_zone(zone) {
1da177e4
LT
2021 int i;
2022
2023 show_node(zone);
2024 printk("%s"
2025 " free:%lukB"
2026 " min:%lukB"
2027 " low:%lukB"
2028 " high:%lukB"
4f98a2fe
RR
2029 " active_anon:%lukB"
2030 " inactive_anon:%lukB"
2031 " active_file:%lukB"
2032 " inactive_file:%lukB"
7b854121
LS
2033#ifdef CONFIG_UNEVICTABLE_LRU
2034 " unevictable:%lukB"
2035#endif
1da177e4
LT
2036 " present:%lukB"
2037 " pages_scanned:%lu"
2038 " all_unreclaimable? %s"
2039 "\n",
2040 zone->name,
d23ad423 2041 K(zone_page_state(zone, NR_FREE_PAGES)),
1da177e4
LT
2042 K(zone->pages_min),
2043 K(zone->pages_low),
2044 K(zone->pages_high),
4f98a2fe
RR
2045 K(zone_page_state(zone, NR_ACTIVE_ANON)),
2046 K(zone_page_state(zone, NR_INACTIVE_ANON)),
2047 K(zone_page_state(zone, NR_ACTIVE_FILE)),
2048 K(zone_page_state(zone, NR_INACTIVE_FILE)),
7b854121
LS
2049#ifdef CONFIG_UNEVICTABLE_LRU
2050 K(zone_page_state(zone, NR_UNEVICTABLE)),
2051#endif
1da177e4
LT
2052 K(zone->present_pages),
2053 zone->pages_scanned,
e815af95 2054 (zone_is_all_unreclaimable(zone) ? "yes" : "no")
1da177e4
LT
2055 );
2056 printk("lowmem_reserve[]:");
2057 for (i = 0; i < MAX_NR_ZONES; i++)
2058 printk(" %lu", zone->lowmem_reserve[i]);
2059 printk("\n");
2060 }
2061
ee99c71c 2062 for_each_populated_zone(zone) {
8f9de51a 2063 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1da177e4
LT
2064
2065 show_node(zone);
2066 printk("%s: ", zone->name);
1da177e4
LT
2067
2068 spin_lock_irqsave(&zone->lock, flags);
2069 for (order = 0; order < MAX_ORDER; order++) {
8f9de51a
KK
2070 nr[order] = zone->free_area[order].nr_free;
2071 total += nr[order] << order;
1da177e4
LT
2072 }
2073 spin_unlock_irqrestore(&zone->lock, flags);
8f9de51a
KK
2074 for (order = 0; order < MAX_ORDER; order++)
2075 printk("%lu*%lukB ", nr[order], K(1UL) << order);
1da177e4
LT
2076 printk("= %lukB\n", K(total));
2077 }
2078
e6f3602d
LW
2079 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2080
1da177e4
LT
2081 show_swap_cache_info();
2082}
2083
19770b32
MG
2084static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2085{
2086 zoneref->zone = zone;
2087 zoneref->zone_idx = zone_idx(zone);
2088}
2089
1da177e4
LT
2090/*
2091 * Builds allocation fallback zone lists.
1a93205b
CL
2092 *
2093 * Add all populated zones of a node to the zonelist.
1da177e4 2094 */
f0c0b2b8
KH
2095static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2096 int nr_zones, enum zone_type zone_type)
1da177e4 2097{
1a93205b
CL
2098 struct zone *zone;
2099
98d2b0eb 2100 BUG_ON(zone_type >= MAX_NR_ZONES);
2f6726e5 2101 zone_type++;
02a68a5e
CL
2102
2103 do {
2f6726e5 2104 zone_type--;
070f8032 2105 zone = pgdat->node_zones + zone_type;
1a93205b 2106 if (populated_zone(zone)) {
dd1a239f
MG
2107 zoneref_set_zone(zone,
2108 &zonelist->_zonerefs[nr_zones++]);
070f8032 2109 check_highest_zone(zone_type);
1da177e4 2110 }
02a68a5e 2111
2f6726e5 2112 } while (zone_type);
070f8032 2113 return nr_zones;
1da177e4
LT
2114}
2115
f0c0b2b8
KH
2116
2117/*
2118 * zonelist_order:
2119 * 0 = automatic detection of better ordering.
2120 * 1 = order by ([node] distance, -zonetype)
2121 * 2 = order by (-zonetype, [node] distance)
2122 *
2123 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2124 * the same zonelist. So only NUMA can configure this param.
2125 */
2126#define ZONELIST_ORDER_DEFAULT 0
2127#define ZONELIST_ORDER_NODE 1
2128#define ZONELIST_ORDER_ZONE 2
2129
2130/* zonelist order in the kernel.
2131 * set_zonelist_order() will set this to NODE or ZONE.
2132 */
2133static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2134static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2135
2136
1da177e4 2137#ifdef CONFIG_NUMA
f0c0b2b8
KH
2138/* The value user specified ....changed by config */
2139static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2140/* string for sysctl */
2141#define NUMA_ZONELIST_ORDER_LEN 16
2142char numa_zonelist_order[16] = "default";
2143
2144/*
2145 * interface for configure zonelist ordering.
2146 * command line option "numa_zonelist_order"
2147 * = "[dD]efault - default, automatic configuration.
2148 * = "[nN]ode - order by node locality, then by zone within node
2149 * = "[zZ]one - order by zone, then by locality within zone
2150 */
2151
2152static int __parse_numa_zonelist_order(char *s)
2153{
2154 if (*s == 'd' || *s == 'D') {
2155 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2156 } else if (*s == 'n' || *s == 'N') {
2157 user_zonelist_order = ZONELIST_ORDER_NODE;
2158 } else if (*s == 'z' || *s == 'Z') {
2159 user_zonelist_order = ZONELIST_ORDER_ZONE;
2160 } else {
2161 printk(KERN_WARNING
2162 "Ignoring invalid numa_zonelist_order value: "
2163 "%s\n", s);
2164 return -EINVAL;
2165 }
2166 return 0;
2167}
2168
2169static __init int setup_numa_zonelist_order(char *s)
2170{
2171 if (s)
2172 return __parse_numa_zonelist_order(s);
2173 return 0;
2174}
2175early_param("numa_zonelist_order", setup_numa_zonelist_order);
2176
2177/*
2178 * sysctl handler for numa_zonelist_order
2179 */
2180int numa_zonelist_order_handler(ctl_table *table, int write,
2181 struct file *file, void __user *buffer, size_t *length,
2182 loff_t *ppos)
2183{
2184 char saved_string[NUMA_ZONELIST_ORDER_LEN];
2185 int ret;
2186
2187 if (write)
2188 strncpy(saved_string, (char*)table->data,
2189 NUMA_ZONELIST_ORDER_LEN);
2190 ret = proc_dostring(table, write, file, buffer, length, ppos);
2191 if (ret)
2192 return ret;
2193 if (write) {
2194 int oldval = user_zonelist_order;
2195 if (__parse_numa_zonelist_order((char*)table->data)) {
2196 /*
2197 * bogus value. restore saved string
2198 */
2199 strncpy((char*)table->data, saved_string,
2200 NUMA_ZONELIST_ORDER_LEN);
2201 user_zonelist_order = oldval;
2202 } else if (oldval != user_zonelist_order)
2203 build_all_zonelists();
2204 }
2205 return 0;
2206}
2207
2208
1da177e4 2209#define MAX_NODE_LOAD (num_online_nodes())
f0c0b2b8
KH
2210static int node_load[MAX_NUMNODES];
2211
1da177e4 2212/**
4dc3b16b 2213 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
2214 * @node: node whose fallback list we're appending
2215 * @used_node_mask: nodemask_t of already used nodes
2216 *
2217 * We use a number of factors to determine which is the next node that should
2218 * appear on a given node's fallback list. The node should not have appeared
2219 * already in @node's fallback list, and it should be the next closest node
2220 * according to the distance array (which contains arbitrary distance values
2221 * from each node to each node in the system), and should also prefer nodes
2222 * with no CPUs, since presumably they'll have very little allocation pressure
2223 * on them otherwise.
2224 * It returns -1 if no node is found.
2225 */
f0c0b2b8 2226static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 2227{
4cf808eb 2228 int n, val;
1da177e4
LT
2229 int min_val = INT_MAX;
2230 int best_node = -1;
a70f7302 2231 const struct cpumask *tmp = cpumask_of_node(0);
1da177e4 2232
4cf808eb
LT
2233 /* Use the local node if we haven't already */
2234 if (!node_isset(node, *used_node_mask)) {
2235 node_set(node, *used_node_mask);
2236 return node;
2237 }
1da177e4 2238
37b07e41 2239 for_each_node_state(n, N_HIGH_MEMORY) {
1da177e4
LT
2240
2241 /* Don't want a node to appear more than once */
2242 if (node_isset(n, *used_node_mask))
2243 continue;
2244
1da177e4
LT
2245 /* Use the distance array to find the distance */
2246 val = node_distance(node, n);
2247
4cf808eb
LT
2248 /* Penalize nodes under us ("prefer the next node") */
2249 val += (n < node);
2250
1da177e4 2251 /* Give preference to headless and unused nodes */
a70f7302
RR
2252 tmp = cpumask_of_node(n);
2253 if (!cpumask_empty(tmp))
1da177e4
LT
2254 val += PENALTY_FOR_NODE_WITH_CPUS;
2255
2256 /* Slight preference for less loaded node */
2257 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2258 val += node_load[n];
2259
2260 if (val < min_val) {
2261 min_val = val;
2262 best_node = n;
2263 }
2264 }
2265
2266 if (best_node >= 0)
2267 node_set(best_node, *used_node_mask);
2268
2269 return best_node;
2270}
2271
f0c0b2b8
KH
2272
2273/*
2274 * Build zonelists ordered by node and zones within node.
2275 * This results in maximum locality--normal zone overflows into local
2276 * DMA zone, if any--but risks exhausting DMA zone.
2277 */
2278static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
1da177e4 2279{
f0c0b2b8 2280 int j;
1da177e4 2281 struct zonelist *zonelist;
f0c0b2b8 2282
54a6eb5c 2283 zonelist = &pgdat->node_zonelists[0];
dd1a239f 2284 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
54a6eb5c
MG
2285 ;
2286 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2287 MAX_NR_ZONES - 1);
dd1a239f
MG
2288 zonelist->_zonerefs[j].zone = NULL;
2289 zonelist->_zonerefs[j].zone_idx = 0;
f0c0b2b8
KH
2290}
2291
523b9458
CL
2292/*
2293 * Build gfp_thisnode zonelists
2294 */
2295static void build_thisnode_zonelists(pg_data_t *pgdat)
2296{
523b9458
CL
2297 int j;
2298 struct zonelist *zonelist;
2299
54a6eb5c
MG
2300 zonelist = &pgdat->node_zonelists[1];
2301 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
dd1a239f
MG
2302 zonelist->_zonerefs[j].zone = NULL;
2303 zonelist->_zonerefs[j].zone_idx = 0;
523b9458
CL
2304}
2305
f0c0b2b8
KH
2306/*
2307 * Build zonelists ordered by zone and nodes within zones.
2308 * This results in conserving DMA zone[s] until all Normal memory is
2309 * exhausted, but results in overflowing to remote node while memory
2310 * may still exist in local DMA zone.
2311 */
2312static int node_order[MAX_NUMNODES];
2313
2314static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2315{
f0c0b2b8
KH
2316 int pos, j, node;
2317 int zone_type; /* needs to be signed */
2318 struct zone *z;
2319 struct zonelist *zonelist;
2320
54a6eb5c
MG
2321 zonelist = &pgdat->node_zonelists[0];
2322 pos = 0;
2323 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2324 for (j = 0; j < nr_nodes; j++) {
2325 node = node_order[j];
2326 z = &NODE_DATA(node)->node_zones[zone_type];
2327 if (populated_zone(z)) {
dd1a239f
MG
2328 zoneref_set_zone(z,
2329 &zonelist->_zonerefs[pos++]);
54a6eb5c 2330 check_highest_zone(zone_type);
f0c0b2b8
KH
2331 }
2332 }
f0c0b2b8 2333 }
dd1a239f
MG
2334 zonelist->_zonerefs[pos].zone = NULL;
2335 zonelist->_zonerefs[pos].zone_idx = 0;
f0c0b2b8
KH
2336}
2337
2338static int default_zonelist_order(void)
2339{
2340 int nid, zone_type;
2341 unsigned long low_kmem_size,total_size;
2342 struct zone *z;
2343 int average_size;
2344 /*
2345 * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2346 * If they are really small and used heavily, the system can fall
2347 * into OOM very easily.
2348 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2349 */
2350 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2351 low_kmem_size = 0;
2352 total_size = 0;
2353 for_each_online_node(nid) {
2354 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2355 z = &NODE_DATA(nid)->node_zones[zone_type];
2356 if (populated_zone(z)) {
2357 if (zone_type < ZONE_NORMAL)
2358 low_kmem_size += z->present_pages;
2359 total_size += z->present_pages;
2360 }
2361 }
2362 }
2363 if (!low_kmem_size || /* there are no DMA area. */
2364 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2365 return ZONELIST_ORDER_NODE;
2366 /*
2367 * look into each node's config.
2368 * If there is a node whose DMA/DMA32 memory is very big area on
2369 * local memory, NODE_ORDER may be suitable.
2370 */
37b07e41
LS
2371 average_size = total_size /
2372 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
f0c0b2b8
KH
2373 for_each_online_node(nid) {
2374 low_kmem_size = 0;
2375 total_size = 0;
2376 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2377 z = &NODE_DATA(nid)->node_zones[zone_type];
2378 if (populated_zone(z)) {
2379 if (zone_type < ZONE_NORMAL)
2380 low_kmem_size += z->present_pages;
2381 total_size += z->present_pages;
2382 }
2383 }
2384 if (low_kmem_size &&
2385 total_size > average_size && /* ignore small node */
2386 low_kmem_size > total_size * 70/100)
2387 return ZONELIST_ORDER_NODE;
2388 }
2389 return ZONELIST_ORDER_ZONE;
2390}
2391
2392static void set_zonelist_order(void)
2393{
2394 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2395 current_zonelist_order = default_zonelist_order();
2396 else
2397 current_zonelist_order = user_zonelist_order;
2398}
2399
2400static void build_zonelists(pg_data_t *pgdat)
2401{
2402 int j, node, load;
2403 enum zone_type i;
1da177e4 2404 nodemask_t used_mask;
f0c0b2b8
KH
2405 int local_node, prev_node;
2406 struct zonelist *zonelist;
2407 int order = current_zonelist_order;
1da177e4
LT
2408
2409 /* initialize zonelists */
523b9458 2410 for (i = 0; i < MAX_ZONELISTS; i++) {
1da177e4 2411 zonelist = pgdat->node_zonelists + i;
dd1a239f
MG
2412 zonelist->_zonerefs[0].zone = NULL;
2413 zonelist->_zonerefs[0].zone_idx = 0;
1da177e4
LT
2414 }
2415
2416 /* NUMA-aware ordering of nodes */
2417 local_node = pgdat->node_id;
2418 load = num_online_nodes();
2419 prev_node = local_node;
2420 nodes_clear(used_mask);
f0c0b2b8
KH
2421
2422 memset(node_load, 0, sizeof(node_load));
2423 memset(node_order, 0, sizeof(node_order));
2424 j = 0;
2425
1da177e4 2426 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
9eeff239
CL
2427 int distance = node_distance(local_node, node);
2428
2429 /*
2430 * If another node is sufficiently far away then it is better
2431 * to reclaim pages in a zone before going off node.
2432 */
2433 if (distance > RECLAIM_DISTANCE)
2434 zone_reclaim_mode = 1;
2435
1da177e4
LT
2436 /*
2437 * We don't want to pressure a particular node.
2438 * So adding penalty to the first node in same
2439 * distance group to make it round-robin.
2440 */
9eeff239 2441 if (distance != node_distance(local_node, prev_node))
f0c0b2b8
KH
2442 node_load[node] = load;
2443
1da177e4
LT
2444 prev_node = node;
2445 load--;
f0c0b2b8
KH
2446 if (order == ZONELIST_ORDER_NODE)
2447 build_zonelists_in_node_order(pgdat, node);
2448 else
2449 node_order[j++] = node; /* remember order */
2450 }
1da177e4 2451
f0c0b2b8
KH
2452 if (order == ZONELIST_ORDER_ZONE) {
2453 /* calculate node order -- i.e., DMA last! */
2454 build_zonelists_in_zone_order(pgdat, j);
1da177e4 2455 }
523b9458
CL
2456
2457 build_thisnode_zonelists(pgdat);
1da177e4
LT
2458}
2459
9276b1bc 2460/* Construct the zonelist performance cache - see further mmzone.h */
f0c0b2b8 2461static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 2462{
54a6eb5c
MG
2463 struct zonelist *zonelist;
2464 struct zonelist_cache *zlc;
dd1a239f 2465 struct zoneref *z;
9276b1bc 2466
54a6eb5c
MG
2467 zonelist = &pgdat->node_zonelists[0];
2468 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2469 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
dd1a239f
MG
2470 for (z = zonelist->_zonerefs; z->zone; z++)
2471 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
9276b1bc
PJ
2472}
2473
f0c0b2b8 2474
1da177e4
LT
2475#else /* CONFIG_NUMA */
2476
f0c0b2b8
KH
2477static void set_zonelist_order(void)
2478{
2479 current_zonelist_order = ZONELIST_ORDER_ZONE;
2480}
2481
2482static void build_zonelists(pg_data_t *pgdat)
1da177e4 2483{
19655d34 2484 int node, local_node;
54a6eb5c
MG
2485 enum zone_type j;
2486 struct zonelist *zonelist;
1da177e4
LT
2487
2488 local_node = pgdat->node_id;
1da177e4 2489
54a6eb5c
MG
2490 zonelist = &pgdat->node_zonelists[0];
2491 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
1da177e4 2492
54a6eb5c
MG
2493 /*
2494 * Now we build the zonelist so that it contains the zones
2495 * of all the other nodes.
2496 * We don't want to pressure a particular node, so when
2497 * building the zones for node N, we make sure that the
2498 * zones coming right after the local ones are those from
2499 * node N+1 (modulo N)
2500 */
2501 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2502 if (!node_online(node))
2503 continue;
2504 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2505 MAX_NR_ZONES - 1);
1da177e4 2506 }
54a6eb5c
MG
2507 for (node = 0; node < local_node; node++) {
2508 if (!node_online(node))
2509 continue;
2510 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2511 MAX_NR_ZONES - 1);
2512 }
2513
dd1a239f
MG
2514 zonelist->_zonerefs[j].zone = NULL;
2515 zonelist->_zonerefs[j].zone_idx = 0;
1da177e4
LT
2516}
2517
9276b1bc 2518/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
f0c0b2b8 2519static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 2520{
54a6eb5c 2521 pgdat->node_zonelists[0].zlcache_ptr = NULL;
9276b1bc
PJ
2522}
2523
1da177e4
LT
2524#endif /* CONFIG_NUMA */
2525
9b1a4d38 2526/* return values int ....just for stop_machine() */
f0c0b2b8 2527static int __build_all_zonelists(void *dummy)
1da177e4 2528{
6811378e 2529 int nid;
9276b1bc
PJ
2530
2531 for_each_online_node(nid) {
7ea1530a
CL
2532 pg_data_t *pgdat = NODE_DATA(nid);
2533
2534 build_zonelists(pgdat);
2535 build_zonelist_cache(pgdat);
9276b1bc 2536 }
6811378e
YG
2537 return 0;
2538}
2539
f0c0b2b8 2540void build_all_zonelists(void)
6811378e 2541{
f0c0b2b8
KH
2542 set_zonelist_order();
2543
6811378e 2544 if (system_state == SYSTEM_BOOTING) {
423b41d7 2545 __build_all_zonelists(NULL);
68ad8df4 2546 mminit_verify_zonelist();
6811378e
YG
2547 cpuset_init_current_mems_allowed();
2548 } else {
183ff22b 2549 /* we have to stop all cpus to guarantee there is no user
6811378e 2550 of zonelist */
9b1a4d38 2551 stop_machine(__build_all_zonelists, NULL, NULL);
6811378e
YG
2552 /* cpuset refresh routine should be here */
2553 }
bd1e22b8 2554 vm_total_pages = nr_free_pagecache_pages();
9ef9acb0
MG
2555 /*
2556 * Disable grouping by mobility if the number of pages in the
2557 * system is too low to allow the mechanism to work. It would be
2558 * more accurate, but expensive to check per-zone. This check is
2559 * made on memory-hotadd so a system can start with mobility
2560 * disabled and enable it later
2561 */
d9c23400 2562 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
2563 page_group_by_mobility_disabled = 1;
2564 else
2565 page_group_by_mobility_disabled = 0;
2566
2567 printk("Built %i zonelists in %s order, mobility grouping %s. "
2568 "Total pages: %ld\n",
f0c0b2b8
KH
2569 num_online_nodes(),
2570 zonelist_order_name[current_zonelist_order],
9ef9acb0 2571 page_group_by_mobility_disabled ? "off" : "on",
f0c0b2b8
KH
2572 vm_total_pages);
2573#ifdef CONFIG_NUMA
2574 printk("Policy zone: %s\n", zone_names[policy_zone]);
2575#endif
1da177e4
LT
2576}
2577
2578/*
2579 * Helper functions to size the waitqueue hash table.
2580 * Essentially these want to choose hash table sizes sufficiently
2581 * large so that collisions trying to wait on pages are rare.
2582 * But in fact, the number of active page waitqueues on typical
2583 * systems is ridiculously low, less than 200. So this is even
2584 * conservative, even though it seems large.
2585 *
2586 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2587 * waitqueues, i.e. the size of the waitq table given the number of pages.
2588 */
2589#define PAGES_PER_WAITQUEUE 256
2590
cca448fe 2591#ifndef CONFIG_MEMORY_HOTPLUG
02b694de 2592static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1da177e4
LT
2593{
2594 unsigned long size = 1;
2595
2596 pages /= PAGES_PER_WAITQUEUE;
2597
2598 while (size < pages)
2599 size <<= 1;
2600
2601 /*
2602 * Once we have dozens or even hundreds of threads sleeping
2603 * on IO we've got bigger problems than wait queue collision.
2604 * Limit the size of the wait table to a reasonable size.
2605 */
2606 size = min(size, 4096UL);
2607
2608 return max(size, 4UL);
2609}
cca448fe
YG
2610#else
2611/*
2612 * A zone's size might be changed by hot-add, so it is not possible to determine
2613 * a suitable size for its wait_table. So we use the maximum size now.
2614 *
2615 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
2616 *
2617 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
2618 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2619 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
2620 *
2621 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2622 * or more by the traditional way. (See above). It equals:
2623 *
2624 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
2625 * ia64(16K page size) : = ( 8G + 4M)byte.
2626 * powerpc (64K page size) : = (32G +16M)byte.
2627 */
2628static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2629{
2630 return 4096UL;
2631}
2632#endif
1da177e4
LT
2633
2634/*
2635 * This is an integer logarithm so that shifts can be used later
2636 * to extract the more random high bits from the multiplicative
2637 * hash function before the remainder is taken.
2638 */
2639static inline unsigned long wait_table_bits(unsigned long size)
2640{
2641 return ffz(~size);
2642}
2643
2644#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2645
56fd56b8 2646/*
d9c23400 2647 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
56fd56b8
MG
2648 * of blocks reserved is based on zone->pages_min. The memory within the
2649 * reserve will tend to store contiguous free pages. Setting min_free_kbytes
2650 * higher will lead to a bigger reserve which will get freed as contiguous
2651 * blocks as reclaim kicks in
2652 */
2653static void setup_zone_migrate_reserve(struct zone *zone)
2654{
2655 unsigned long start_pfn, pfn, end_pfn;
2656 struct page *page;
2657 unsigned long reserve, block_migratetype;
2658
2659 /* Get the start pfn, end pfn and the number of blocks to reserve */
2660 start_pfn = zone->zone_start_pfn;
2661 end_pfn = start_pfn + zone->spanned_pages;
d9c23400
MG
2662 reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
2663 pageblock_order;
56fd56b8 2664
d9c23400 2665 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
56fd56b8
MG
2666 if (!pfn_valid(pfn))
2667 continue;
2668 page = pfn_to_page(pfn);
2669
344c790e
AL
2670 /* Watch out for overlapping nodes */
2671 if (page_to_nid(page) != zone_to_nid(zone))
2672 continue;
2673
56fd56b8
MG
2674 /* Blocks with reserved pages will never free, skip them. */
2675 if (PageReserved(page))
2676 continue;
2677
2678 block_migratetype = get_pageblock_migratetype(page);
2679
2680 /* If this block is reserved, account for it */
2681 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2682 reserve--;
2683 continue;
2684 }
2685
2686 /* Suitable for reserving if this block is movable */
2687 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2688 set_pageblock_migratetype(page, MIGRATE_RESERVE);
2689 move_freepages_block(zone, page, MIGRATE_RESERVE);
2690 reserve--;
2691 continue;
2692 }
2693
2694 /*
2695 * If the reserve is met and this is a previous reserved block,
2696 * take it back
2697 */
2698 if (block_migratetype == MIGRATE_RESERVE) {
2699 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2700 move_freepages_block(zone, page, MIGRATE_MOVABLE);
2701 }
2702 }
2703}
ac0e5b7a 2704
1da177e4
LT
2705/*
2706 * Initially all pages are reserved - free ones are freed
2707 * up by free_all_bootmem() once the early boot process is
2708 * done. Non-atomic initialization, single-pass.
2709 */
c09b4240 2710void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
a2f3aa02 2711 unsigned long start_pfn, enum memmap_context context)
1da177e4 2712{
1da177e4 2713 struct page *page;
29751f69
AW
2714 unsigned long end_pfn = start_pfn + size;
2715 unsigned long pfn;
86051ca5 2716 struct zone *z;
1da177e4 2717
22b31eec
HD
2718 if (highest_memmap_pfn < end_pfn - 1)
2719 highest_memmap_pfn = end_pfn - 1;
2720
86051ca5 2721 z = &NODE_DATA(nid)->node_zones[zone];
cbe8dd4a 2722 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
a2f3aa02
DH
2723 /*
2724 * There can be holes in boot-time mem_map[]s
2725 * handed to this function. They do not
2726 * exist on hotplugged memory.
2727 */
2728 if (context == MEMMAP_EARLY) {
2729 if (!early_pfn_valid(pfn))
2730 continue;
2731 if (!early_pfn_in_nid(pfn, nid))
2732 continue;
2733 }
d41dee36
AW
2734 page = pfn_to_page(pfn);
2735 set_page_links(page, zone, nid, pfn);
708614e6 2736 mminit_verify_page_links(page, zone, nid, pfn);
7835e98b 2737 init_page_count(page);
1da177e4
LT
2738 reset_page_mapcount(page);
2739 SetPageReserved(page);
b2a0ac88
MG
2740 /*
2741 * Mark the block movable so that blocks are reserved for
2742 * movable at startup. This will force kernel allocations
2743 * to reserve their blocks rather than leaking throughout
2744 * the address space during boot when many long-lived
56fd56b8
MG
2745 * kernel allocations are made. Later some blocks near
2746 * the start are marked MIGRATE_RESERVE by
2747 * setup_zone_migrate_reserve()
86051ca5
KH
2748 *
2749 * bitmap is created for zone's valid pfn range. but memmap
2750 * can be created for invalid pages (for alignment)
2751 * check here not to call set_pageblock_migratetype() against
2752 * pfn out of zone.
b2a0ac88 2753 */
86051ca5
KH
2754 if ((z->zone_start_pfn <= pfn)
2755 && (pfn < z->zone_start_pfn + z->spanned_pages)
2756 && !(pfn & (pageblock_nr_pages - 1)))
56fd56b8 2757 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
b2a0ac88 2758
1da177e4
LT
2759 INIT_LIST_HEAD(&page->lru);
2760#ifdef WANT_PAGE_VIRTUAL
2761 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
2762 if (!is_highmem_idx(zone))
3212c6be 2763 set_page_address(page, __va(pfn << PAGE_SHIFT));
1da177e4 2764#endif
1da177e4
LT
2765 }
2766}
2767
1e548deb 2768static void __meminit zone_init_free_lists(struct zone *zone)
1da177e4 2769{
b2a0ac88
MG
2770 int order, t;
2771 for_each_migratetype_order(order, t) {
2772 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
2773 zone->free_area[order].nr_free = 0;
2774 }
2775}
2776
2777#ifndef __HAVE_ARCH_MEMMAP_INIT
2778#define memmap_init(size, nid, zone, start_pfn) \
a2f3aa02 2779 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1da177e4
LT
2780#endif
2781
1d6f4e60 2782static int zone_batchsize(struct zone *zone)
e7c8d5c9 2783{
3a6be87f 2784#ifdef CONFIG_MMU
e7c8d5c9
CL
2785 int batch;
2786
2787 /*
2788 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 2789 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
2790 *
2791 * OK, so we don't know how big the cache is. So guess.
2792 */
2793 batch = zone->present_pages / 1024;
ba56e91c
SR
2794 if (batch * PAGE_SIZE > 512 * 1024)
2795 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
2796 batch /= 4; /* We effectively *= 4 below */
2797 if (batch < 1)
2798 batch = 1;
2799
2800 /*
0ceaacc9
NP
2801 * Clamp the batch to a 2^n - 1 value. Having a power
2802 * of 2 value was found to be more likely to have
2803 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 2804 *
0ceaacc9
NP
2805 * For example if 2 tasks are alternately allocating
2806 * batches of pages, one task can end up with a lot
2807 * of pages of one half of the possible page colors
2808 * and the other with pages of the other colors.
e7c8d5c9 2809 */
9155203a 2810 batch = rounddown_pow_of_two(batch + batch/2) - 1;
ba56e91c 2811
e7c8d5c9 2812 return batch;
3a6be87f
DH
2813
2814#else
2815 /* The deferral and batching of frees should be suppressed under NOMMU
2816 * conditions.
2817 *
2818 * The problem is that NOMMU needs to be able to allocate large chunks
2819 * of contiguous memory as there's no hardware page translation to
2820 * assemble apparent contiguous memory from discontiguous pages.
2821 *
2822 * Queueing large contiguous runs of pages for batching, however,
2823 * causes the pages to actually be freed in smaller chunks. As there
2824 * can be a significant delay between the individual batches being
2825 * recycled, this leads to the once large chunks of space being
2826 * fragmented and becoming unavailable for high-order allocations.
2827 */
2828 return 0;
2829#endif
e7c8d5c9
CL
2830}
2831
b69a7288 2832static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2caaad41
CL
2833{
2834 struct per_cpu_pages *pcp;
2835
1c6fe946
MD
2836 memset(p, 0, sizeof(*p));
2837
3dfa5721 2838 pcp = &p->pcp;
2caaad41 2839 pcp->count = 0;
2caaad41
CL
2840 pcp->high = 6 * batch;
2841 pcp->batch = max(1UL, 1 * batch);
2842 INIT_LIST_HEAD(&pcp->list);
2caaad41
CL
2843}
2844
8ad4b1fb
RS
2845/*
2846 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
2847 * to the value high for the pageset p.
2848 */
2849
2850static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2851 unsigned long high)
2852{
2853 struct per_cpu_pages *pcp;
2854
3dfa5721 2855 pcp = &p->pcp;
8ad4b1fb
RS
2856 pcp->high = high;
2857 pcp->batch = max(1UL, high/4);
2858 if ((high/4) > (PAGE_SHIFT * 8))
2859 pcp->batch = PAGE_SHIFT * 8;
2860}
2861
2862
e7c8d5c9
CL
2863#ifdef CONFIG_NUMA
2864/*
2caaad41
CL
2865 * Boot pageset table. One per cpu which is going to be used for all
2866 * zones and all nodes. The parameters will be set in such a way
2867 * that an item put on a list will immediately be handed over to
2868 * the buddy list. This is safe since pageset manipulation is done
2869 * with interrupts disabled.
2870 *
2871 * Some NUMA counter updates may also be caught by the boot pagesets.
b7c84c6a
CL
2872 *
2873 * The boot_pagesets must be kept even after bootup is complete for
2874 * unused processors and/or zones. They do play a role for bootstrapping
2875 * hotplugged processors.
2876 *
2877 * zoneinfo_show() and maybe other functions do
2878 * not check if the processor is online before following the pageset pointer.
2879 * Other parts of the kernel may not check if the zone is available.
2caaad41 2880 */
88a2a4ac 2881static struct per_cpu_pageset boot_pageset[NR_CPUS];
2caaad41
CL
2882
2883/*
2884 * Dynamically allocate memory for the
e7c8d5c9
CL
2885 * per cpu pageset array in struct zone.
2886 */
6292d9aa 2887static int __cpuinit process_zones(int cpu)
e7c8d5c9
CL
2888{
2889 struct zone *zone, *dzone;
37c0708d
CL
2890 int node = cpu_to_node(cpu);
2891
2892 node_set_state(node, N_CPU); /* this node has a cpu */
e7c8d5c9 2893
ee99c71c 2894 for_each_populated_zone(zone) {
23316bc8 2895 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
37c0708d 2896 GFP_KERNEL, node);
23316bc8 2897 if (!zone_pcp(zone, cpu))
e7c8d5c9 2898 goto bad;
e7c8d5c9 2899
23316bc8 2900 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
8ad4b1fb
RS
2901
2902 if (percpu_pagelist_fraction)
2903 setup_pagelist_highmark(zone_pcp(zone, cpu),
2904 (zone->present_pages / percpu_pagelist_fraction));
e7c8d5c9
CL
2905 }
2906
2907 return 0;
2908bad:
2909 for_each_zone(dzone) {
64191688
AM
2910 if (!populated_zone(dzone))
2911 continue;
e7c8d5c9
CL
2912 if (dzone == zone)
2913 break;
23316bc8
NP
2914 kfree(zone_pcp(dzone, cpu));
2915 zone_pcp(dzone, cpu) = NULL;
e7c8d5c9
CL
2916 }
2917 return -ENOMEM;
2918}
2919
2920static inline void free_zone_pagesets(int cpu)
2921{
e7c8d5c9
CL
2922 struct zone *zone;
2923
2924 for_each_zone(zone) {
2925 struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
2926
f3ef9ead
DR
2927 /* Free per_cpu_pageset if it is slab allocated */
2928 if (pset != &boot_pageset[cpu])
2929 kfree(pset);
e7c8d5c9 2930 zone_pcp(zone, cpu) = NULL;
e7c8d5c9 2931 }
e7c8d5c9
CL
2932}
2933
9c7b216d 2934static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
e7c8d5c9
CL
2935 unsigned long action,
2936 void *hcpu)
2937{
2938 int cpu = (long)hcpu;
2939 int ret = NOTIFY_OK;
2940
2941 switch (action) {
ce421c79 2942 case CPU_UP_PREPARE:
8bb78442 2943 case CPU_UP_PREPARE_FROZEN:
ce421c79
AW
2944 if (process_zones(cpu))
2945 ret = NOTIFY_BAD;
2946 break;
2947 case CPU_UP_CANCELED:
8bb78442 2948 case CPU_UP_CANCELED_FROZEN:
ce421c79 2949 case CPU_DEAD:
8bb78442 2950 case CPU_DEAD_FROZEN:
ce421c79
AW
2951 free_zone_pagesets(cpu);
2952 break;
2953 default:
2954 break;
e7c8d5c9
CL
2955 }
2956 return ret;
2957}
2958
74b85f37 2959static struct notifier_block __cpuinitdata pageset_notifier =
e7c8d5c9
CL
2960 { &pageset_cpuup_callback, NULL, 0 };
2961
78d9955b 2962void __init setup_per_cpu_pageset(void)
e7c8d5c9
CL
2963{
2964 int err;
2965
2966 /* Initialize per_cpu_pageset for cpu 0.
2967 * A cpuup callback will do this for every cpu
2968 * as it comes online
2969 */
2970 err = process_zones(smp_processor_id());
2971 BUG_ON(err);
2972 register_cpu_notifier(&pageset_notifier);
2973}
2974
2975#endif
2976
577a32f6 2977static noinline __init_refok
cca448fe 2978int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
ed8ece2e
DH
2979{
2980 int i;
2981 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe 2982 size_t alloc_size;
ed8ece2e
DH
2983
2984 /*
2985 * The per-page waitqueue mechanism uses hashed waitqueues
2986 * per zone.
2987 */
02b694de
YG
2988 zone->wait_table_hash_nr_entries =
2989 wait_table_hash_nr_entries(zone_size_pages);
2990 zone->wait_table_bits =
2991 wait_table_bits(zone->wait_table_hash_nr_entries);
cca448fe
YG
2992 alloc_size = zone->wait_table_hash_nr_entries
2993 * sizeof(wait_queue_head_t);
2994
cd94b9db 2995 if (!slab_is_available()) {
cca448fe
YG
2996 zone->wait_table = (wait_queue_head_t *)
2997 alloc_bootmem_node(pgdat, alloc_size);
2998 } else {
2999 /*
3000 * This case means that a zone whose size was 0 gets new memory
3001 * via memory hot-add.
3002 * But it may be the case that a new node was hot-added. In
3003 * this case vmalloc() will not be able to use this new node's
3004 * memory - this wait_table must be initialized to use this new
3005 * node itself as well.
3006 * To use this new node's memory, further consideration will be
3007 * necessary.
3008 */
8691f3a7 3009 zone->wait_table = vmalloc(alloc_size);
cca448fe
YG
3010 }
3011 if (!zone->wait_table)
3012 return -ENOMEM;
ed8ece2e 3013
02b694de 3014 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
ed8ece2e 3015 init_waitqueue_head(zone->wait_table + i);
cca448fe
YG
3016
3017 return 0;
ed8ece2e
DH
3018}
3019
c09b4240 3020static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e
DH
3021{
3022 int cpu;
3023 unsigned long batch = zone_batchsize(zone);
3024
3025 for (cpu = 0; cpu < NR_CPUS; cpu++) {
3026#ifdef CONFIG_NUMA
3027 /* Early boot. Slab allocator not functional yet */
23316bc8 3028 zone_pcp(zone, cpu) = &boot_pageset[cpu];
ed8ece2e
DH
3029 setup_pageset(&boot_pageset[cpu],0);
3030#else
3031 setup_pageset(zone_pcp(zone,cpu), batch);
3032#endif
3033 }
f5335c0f
AB
3034 if (zone->present_pages)
3035 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
3036 zone->name, zone->present_pages, batch);
ed8ece2e
DH
3037}
3038
718127cc
YG
3039__meminit int init_currently_empty_zone(struct zone *zone,
3040 unsigned long zone_start_pfn,
a2f3aa02
DH
3041 unsigned long size,
3042 enum memmap_context context)
ed8ece2e
DH
3043{
3044 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe
YG
3045 int ret;
3046 ret = zone_wait_table_init(zone, size);
3047 if (ret)
3048 return ret;
ed8ece2e
DH
3049 pgdat->nr_zones = zone_idx(zone) + 1;
3050
ed8ece2e
DH
3051 zone->zone_start_pfn = zone_start_pfn;
3052
708614e6
MG
3053 mminit_dprintk(MMINIT_TRACE, "memmap_init",
3054 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
3055 pgdat->node_id,
3056 (unsigned long)zone_idx(zone),
3057 zone_start_pfn, (zone_start_pfn + size));
3058
1e548deb 3059 zone_init_free_lists(zone);
718127cc
YG
3060
3061 return 0;
ed8ece2e
DH
3062}
3063
c713216d
MG
3064#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3065/*
3066 * Basic iterator support. Return the first range of PFNs for a node
3067 * Note: nid == MAX_NUMNODES returns first region regardless of node
3068 */
a3142c8e 3069static int __meminit first_active_region_index_in_nid(int nid)
c713216d
MG
3070{
3071 int i;
3072
3073 for (i = 0; i < nr_nodemap_entries; i++)
3074 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3075 return i;
3076
3077 return -1;
3078}
3079
3080/*
3081 * Basic iterator support. Return the next active range of PFNs for a node
183ff22b 3082 * Note: nid == MAX_NUMNODES returns next region regardless of node
c713216d 3083 */
a3142c8e 3084static int __meminit next_active_region_index_in_nid(int index, int nid)
c713216d
MG
3085{
3086 for (index = index + 1; index < nr_nodemap_entries; index++)
3087 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3088 return index;
3089
3090 return -1;
3091}
3092
3093#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3094/*
3095 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3096 * Architectures may implement their own version but if add_active_range()
3097 * was used and there are no special requirements, this is a convenient
3098 * alternative
3099 */
f2dbcfa7 3100int __meminit __early_pfn_to_nid(unsigned long pfn)
c713216d
MG
3101{
3102 int i;
3103
3104 for (i = 0; i < nr_nodemap_entries; i++) {
3105 unsigned long start_pfn = early_node_map[i].start_pfn;
3106 unsigned long end_pfn = early_node_map[i].end_pfn;
3107
3108 if (start_pfn <= pfn && pfn < end_pfn)
3109 return early_node_map[i].nid;
3110 }
cc2559bc
KH
3111 /* This is a memory hole */
3112 return -1;
c713216d
MG
3113}
3114#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3115
f2dbcfa7
KH
3116int __meminit early_pfn_to_nid(unsigned long pfn)
3117{
cc2559bc
KH
3118 int nid;
3119
3120 nid = __early_pfn_to_nid(pfn);
3121 if (nid >= 0)
3122 return nid;
3123 /* just returns 0 */
3124 return 0;
f2dbcfa7
KH
3125}
3126
cc2559bc
KH
3127#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3128bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3129{
3130 int nid;
3131
3132 nid = __early_pfn_to_nid(pfn);
3133 if (nid >= 0 && nid != node)
3134 return false;
3135 return true;
3136}
3137#endif
f2dbcfa7 3138
c713216d
MG
3139/* Basic iterator support to walk early_node_map[] */
3140#define for_each_active_range_index_in_nid(i, nid) \
3141 for (i = first_active_region_index_in_nid(nid); i != -1; \
3142 i = next_active_region_index_in_nid(i, nid))
3143
3144/**
3145 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
88ca3b94
RD
3146 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3147 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
c713216d
MG
3148 *
3149 * If an architecture guarantees that all ranges registered with
3150 * add_active_ranges() contain no holes and may be freed, this
3151 * this function may be used instead of calling free_bootmem() manually.
3152 */
3153void __init free_bootmem_with_active_regions(int nid,
3154 unsigned long max_low_pfn)
3155{
3156 int i;
3157
3158 for_each_active_range_index_in_nid(i, nid) {
3159 unsigned long size_pages = 0;
3160 unsigned long end_pfn = early_node_map[i].end_pfn;
3161
3162 if (early_node_map[i].start_pfn >= max_low_pfn)
3163 continue;
3164
3165 if (end_pfn > max_low_pfn)
3166 end_pfn = max_low_pfn;
3167
3168 size_pages = end_pfn - early_node_map[i].start_pfn;
3169 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3170 PFN_PHYS(early_node_map[i].start_pfn),
3171 size_pages << PAGE_SHIFT);
3172 }
3173}
3174
b5bc6c0e
YL
3175void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3176{
3177 int i;
d52d53b8 3178 int ret;
b5bc6c0e 3179
d52d53b8
YL
3180 for_each_active_range_index_in_nid(i, nid) {
3181 ret = work_fn(early_node_map[i].start_pfn,
3182 early_node_map[i].end_pfn, data);
3183 if (ret)
3184 break;
3185 }
b5bc6c0e 3186}
c713216d
MG
3187/**
3188 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 3189 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d
MG
3190 *
3191 * If an architecture guarantees that all ranges registered with
3192 * add_active_ranges() contain no holes and may be freed, this
88ca3b94 3193 * function may be used instead of calling memory_present() manually.
c713216d
MG
3194 */
3195void __init sparse_memory_present_with_active_regions(int nid)
3196{
3197 int i;
3198
3199 for_each_active_range_index_in_nid(i, nid)
3200 memory_present(early_node_map[i].nid,
3201 early_node_map[i].start_pfn,
3202 early_node_map[i].end_pfn);
3203}
3204
3205/**
3206 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
3207 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3208 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3209 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
3210 *
3211 * It returns the start and end page frame of a node based on information
3212 * provided by an arch calling add_active_range(). If called for a node
3213 * with no available memory, a warning is printed and the start and end
88ca3b94 3214 * PFNs will be 0.
c713216d 3215 */
a3142c8e 3216void __meminit get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
3217 unsigned long *start_pfn, unsigned long *end_pfn)
3218{
3219 int i;
3220 *start_pfn = -1UL;
3221 *end_pfn = 0;
3222
3223 for_each_active_range_index_in_nid(i, nid) {
3224 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3225 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3226 }
3227
633c0666 3228 if (*start_pfn == -1UL)
c713216d 3229 *start_pfn = 0;
c713216d
MG
3230}
3231
2a1e274a
MG
3232/*
3233 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3234 * assumption is made that zones within a node are ordered in monotonic
3235 * increasing memory addresses so that the "highest" populated zone is used
3236 */
b69a7288 3237static void __init find_usable_zone_for_movable(void)
2a1e274a
MG
3238{
3239 int zone_index;
3240 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3241 if (zone_index == ZONE_MOVABLE)
3242 continue;
3243
3244 if (arch_zone_highest_possible_pfn[zone_index] >
3245 arch_zone_lowest_possible_pfn[zone_index])
3246 break;
3247 }
3248
3249 VM_BUG_ON(zone_index == -1);
3250 movable_zone = zone_index;
3251}
3252
3253/*
3254 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3255 * because it is sized independant of architecture. Unlike the other zones,
3256 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3257 * in each node depending on the size of each node and how evenly kernelcore
3258 * is distributed. This helper function adjusts the zone ranges
3259 * provided by the architecture for a given node by using the end of the
3260 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3261 * zones within a node are in order of monotonic increases memory addresses
3262 */
b69a7288 3263static void __meminit adjust_zone_range_for_zone_movable(int nid,
2a1e274a
MG
3264 unsigned long zone_type,
3265 unsigned long node_start_pfn,
3266 unsigned long node_end_pfn,
3267 unsigned long *zone_start_pfn,
3268 unsigned long *zone_end_pfn)
3269{
3270 /* Only adjust if ZONE_MOVABLE is on this node */
3271 if (zone_movable_pfn[nid]) {
3272 /* Size ZONE_MOVABLE */
3273 if (zone_type == ZONE_MOVABLE) {
3274 *zone_start_pfn = zone_movable_pfn[nid];
3275 *zone_end_pfn = min(node_end_pfn,
3276 arch_zone_highest_possible_pfn[movable_zone]);
3277
3278 /* Adjust for ZONE_MOVABLE starting within this range */
3279 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3280 *zone_end_pfn > zone_movable_pfn[nid]) {
3281 *zone_end_pfn = zone_movable_pfn[nid];
3282
3283 /* Check if this whole range is within ZONE_MOVABLE */
3284 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3285 *zone_start_pfn = *zone_end_pfn;
3286 }
3287}
3288
c713216d
MG
3289/*
3290 * Return the number of pages a zone spans in a node, including holes
3291 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3292 */
6ea6e688 3293static unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
3294 unsigned long zone_type,
3295 unsigned long *ignored)
3296{
3297 unsigned long node_start_pfn, node_end_pfn;
3298 unsigned long zone_start_pfn, zone_end_pfn;
3299
3300 /* Get the start and end of the node and zone */
3301 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3302 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3303 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2a1e274a
MG
3304 adjust_zone_range_for_zone_movable(nid, zone_type,
3305 node_start_pfn, node_end_pfn,
3306 &zone_start_pfn, &zone_end_pfn);
c713216d
MG
3307
3308 /* Check that this node has pages within the zone's required range */
3309 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3310 return 0;
3311
3312 /* Move the zone boundaries inside the node if necessary */
3313 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3314 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3315
3316 /* Return the spanned pages */
3317 return zone_end_pfn - zone_start_pfn;
3318}
3319
3320/*
3321 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 3322 * then all holes in the requested range will be accounted for.
c713216d 3323 */
b69a7288 3324static unsigned long __meminit __absent_pages_in_range(int nid,
c713216d
MG
3325 unsigned long range_start_pfn,
3326 unsigned long range_end_pfn)
3327{
3328 int i = 0;
3329 unsigned long prev_end_pfn = 0, hole_pages = 0;
3330 unsigned long start_pfn;
3331
3332 /* Find the end_pfn of the first active range of pfns in the node */
3333 i = first_active_region_index_in_nid(nid);
3334 if (i == -1)
3335 return 0;
3336
b5445f95
MG
3337 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3338
9c7cd687
MG
3339 /* Account for ranges before physical memory on this node */
3340 if (early_node_map[i].start_pfn > range_start_pfn)
b5445f95 3341 hole_pages = prev_end_pfn - range_start_pfn;
c713216d
MG
3342
3343 /* Find all holes for the zone within the node */
3344 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3345
3346 /* No need to continue if prev_end_pfn is outside the zone */
3347 if (prev_end_pfn >= range_end_pfn)
3348 break;
3349
3350 /* Make sure the end of the zone is not within the hole */
3351 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3352 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3353
3354 /* Update the hole size cound and move on */
3355 if (start_pfn > range_start_pfn) {
3356 BUG_ON(prev_end_pfn > start_pfn);
3357 hole_pages += start_pfn - prev_end_pfn;
3358 }
3359 prev_end_pfn = early_node_map[i].end_pfn;
3360 }
3361
9c7cd687
MG
3362 /* Account for ranges past physical memory on this node */
3363 if (range_end_pfn > prev_end_pfn)
0c6cb974 3364 hole_pages += range_end_pfn -
9c7cd687
MG
3365 max(range_start_pfn, prev_end_pfn);
3366
c713216d
MG
3367 return hole_pages;
3368}
3369
3370/**
3371 * absent_pages_in_range - Return number of page frames in holes within a range
3372 * @start_pfn: The start PFN to start searching for holes
3373 * @end_pfn: The end PFN to stop searching for holes
3374 *
88ca3b94 3375 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
3376 */
3377unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3378 unsigned long end_pfn)
3379{
3380 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3381}
3382
3383/* Return the number of page frames in holes in a zone on a node */
6ea6e688 3384static unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
3385 unsigned long zone_type,
3386 unsigned long *ignored)
3387{
9c7cd687
MG
3388 unsigned long node_start_pfn, node_end_pfn;
3389 unsigned long zone_start_pfn, zone_end_pfn;
3390
3391 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3392 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3393 node_start_pfn);
3394 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3395 node_end_pfn);
3396
2a1e274a
MG
3397 adjust_zone_range_for_zone_movable(nid, zone_type,
3398 node_start_pfn, node_end_pfn,
3399 &zone_start_pfn, &zone_end_pfn);
9c7cd687 3400 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
c713216d 3401}
0e0b864e 3402
c713216d 3403#else
6ea6e688 3404static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
3405 unsigned long zone_type,
3406 unsigned long *zones_size)
3407{
3408 return zones_size[zone_type];
3409}
3410
6ea6e688 3411static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
3412 unsigned long zone_type,
3413 unsigned long *zholes_size)
3414{
3415 if (!zholes_size)
3416 return 0;
3417
3418 return zholes_size[zone_type];
3419}
0e0b864e 3420
c713216d
MG
3421#endif
3422
a3142c8e 3423static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
c713216d
MG
3424 unsigned long *zones_size, unsigned long *zholes_size)
3425{
3426 unsigned long realtotalpages, totalpages = 0;
3427 enum zone_type i;
3428
3429 for (i = 0; i < MAX_NR_ZONES; i++)
3430 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3431 zones_size);
3432 pgdat->node_spanned_pages = totalpages;
3433
3434 realtotalpages = totalpages;
3435 for (i = 0; i < MAX_NR_ZONES; i++)
3436 realtotalpages -=
3437 zone_absent_pages_in_node(pgdat->node_id, i,
3438 zholes_size);
3439 pgdat->node_present_pages = realtotalpages;
3440 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3441 realtotalpages);
3442}
3443
835c134e
MG
3444#ifndef CONFIG_SPARSEMEM
3445/*
3446 * Calculate the size of the zone->blockflags rounded to an unsigned long
d9c23400
MG
3447 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3448 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
835c134e
MG
3449 * round what is now in bits to nearest long in bits, then return it in
3450 * bytes.
3451 */
3452static unsigned long __init usemap_size(unsigned long zonesize)
3453{
3454 unsigned long usemapsize;
3455
d9c23400
MG
3456 usemapsize = roundup(zonesize, pageblock_nr_pages);
3457 usemapsize = usemapsize >> pageblock_order;
835c134e
MG
3458 usemapsize *= NR_PAGEBLOCK_BITS;
3459 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3460
3461 return usemapsize / 8;
3462}
3463
3464static void __init setup_usemap(struct pglist_data *pgdat,
3465 struct zone *zone, unsigned long zonesize)
3466{
3467 unsigned long usemapsize = usemap_size(zonesize);
3468 zone->pageblock_flags = NULL;
58a01a45 3469 if (usemapsize)
835c134e 3470 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
835c134e
MG
3471}
3472#else
3473static void inline setup_usemap(struct pglist_data *pgdat,
3474 struct zone *zone, unsigned long zonesize) {}
3475#endif /* CONFIG_SPARSEMEM */
3476
d9c23400 3477#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
ba72cb8c
MG
3478
3479/* Return a sensible default order for the pageblock size. */
3480static inline int pageblock_default_order(void)
3481{
3482 if (HPAGE_SHIFT > PAGE_SHIFT)
3483 return HUGETLB_PAGE_ORDER;
3484
3485 return MAX_ORDER-1;
3486}
3487
d9c23400
MG
3488/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3489static inline void __init set_pageblock_order(unsigned int order)
3490{
3491 /* Check that pageblock_nr_pages has not already been setup */
3492 if (pageblock_order)
3493 return;
3494
3495 /*
3496 * Assume the largest contiguous order of interest is a huge page.
3497 * This value may be variable depending on boot parameters on IA64
3498 */
3499 pageblock_order = order;
3500}
3501#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3502
ba72cb8c
MG
3503/*
3504 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3505 * and pageblock_default_order() are unused as pageblock_order is set
3506 * at compile-time. See include/linux/pageblock-flags.h for the values of
3507 * pageblock_order based on the kernel config
3508 */
3509static inline int pageblock_default_order(unsigned int order)
3510{
3511 return MAX_ORDER-1;
3512}
d9c23400
MG
3513#define set_pageblock_order(x) do {} while (0)
3514
3515#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3516
1da177e4
LT
3517/*
3518 * Set up the zone data structures:
3519 * - mark all pages reserved
3520 * - mark all memory queues empty
3521 * - clear the memory bitmaps
3522 */
b5a0e011 3523static void __paginginit free_area_init_core(struct pglist_data *pgdat,
1da177e4
LT
3524 unsigned long *zones_size, unsigned long *zholes_size)
3525{
2f1b6248 3526 enum zone_type j;
ed8ece2e 3527 int nid = pgdat->node_id;
1da177e4 3528 unsigned long zone_start_pfn = pgdat->node_start_pfn;
718127cc 3529 int ret;
1da177e4 3530
208d54e5 3531 pgdat_resize_init(pgdat);
1da177e4
LT
3532 pgdat->nr_zones = 0;
3533 init_waitqueue_head(&pgdat->kswapd_wait);
3534 pgdat->kswapd_max_order = 0;
52d4b9ac 3535 pgdat_page_cgroup_init(pgdat);
1da177e4
LT
3536
3537 for (j = 0; j < MAX_NR_ZONES; j++) {
3538 struct zone *zone = pgdat->node_zones + j;
0e0b864e 3539 unsigned long size, realsize, memmap_pages;
b69408e8 3540 enum lru_list l;
1da177e4 3541
c713216d
MG
3542 size = zone_spanned_pages_in_node(nid, j, zones_size);
3543 realsize = size - zone_absent_pages_in_node(nid, j,
3544 zholes_size);
1da177e4 3545
0e0b864e
MG
3546 /*
3547 * Adjust realsize so that it accounts for how much memory
3548 * is used by this zone for memmap. This affects the watermark
3549 * and per-cpu initialisations
3550 */
f7232154
JW
3551 memmap_pages =
3552 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
0e0b864e
MG
3553 if (realsize >= memmap_pages) {
3554 realsize -= memmap_pages;
5594c8c8
YL
3555 if (memmap_pages)
3556 printk(KERN_DEBUG
3557 " %s zone: %lu pages used for memmap\n",
3558 zone_names[j], memmap_pages);
0e0b864e
MG
3559 } else
3560 printk(KERN_WARNING
3561 " %s zone: %lu pages exceeds realsize %lu\n",
3562 zone_names[j], memmap_pages, realsize);
3563
6267276f
CL
3564 /* Account for reserved pages */
3565 if (j == 0 && realsize > dma_reserve) {
0e0b864e 3566 realsize -= dma_reserve;
d903ef9f 3567 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
6267276f 3568 zone_names[0], dma_reserve);
0e0b864e
MG
3569 }
3570
98d2b0eb 3571 if (!is_highmem_idx(j))
1da177e4
LT
3572 nr_kernel_pages += realsize;
3573 nr_all_pages += realsize;
3574
3575 zone->spanned_pages = size;
3576 zone->present_pages = realsize;
9614634f 3577#ifdef CONFIG_NUMA
d5f541ed 3578 zone->node = nid;
8417bba4 3579 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
9614634f 3580 / 100;
0ff38490 3581 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
9614634f 3582#endif
1da177e4
LT
3583 zone->name = zone_names[j];
3584 spin_lock_init(&zone->lock);
3585 spin_lock_init(&zone->lru_lock);
bdc8cb98 3586 zone_seqlock_init(zone);
1da177e4 3587 zone->zone_pgdat = pgdat;
1da177e4 3588
3bb1a852 3589 zone->prev_priority = DEF_PRIORITY;
1da177e4 3590
ed8ece2e 3591 zone_pcp_init(zone);
b69408e8
CL
3592 for_each_lru(l) {
3593 INIT_LIST_HEAD(&zone->lru[l].list);
3594 zone->lru[l].nr_scan = 0;
3595 }
6e901571
KM
3596 zone->reclaim_stat.recent_rotated[0] = 0;
3597 zone->reclaim_stat.recent_rotated[1] = 0;
3598 zone->reclaim_stat.recent_scanned[0] = 0;
3599 zone->reclaim_stat.recent_scanned[1] = 0;
2244b95a 3600 zap_zone_vm_stats(zone);
e815af95 3601 zone->flags = 0;
1da177e4
LT
3602 if (!size)
3603 continue;
3604
ba72cb8c 3605 set_pageblock_order(pageblock_default_order());
835c134e 3606 setup_usemap(pgdat, zone, size);
a2f3aa02
DH
3607 ret = init_currently_empty_zone(zone, zone_start_pfn,
3608 size, MEMMAP_EARLY);
718127cc 3609 BUG_ON(ret);
76cdd58e 3610 memmap_init(size, nid, j, zone_start_pfn);
1da177e4 3611 zone_start_pfn += size;
1da177e4
LT
3612 }
3613}
3614
577a32f6 3615static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 3616{
1da177e4
LT
3617 /* Skip empty nodes */
3618 if (!pgdat->node_spanned_pages)
3619 return;
3620
d41dee36 3621#ifdef CONFIG_FLAT_NODE_MEM_MAP
1da177e4
LT
3622 /* ia64 gets its own node_mem_map, before this, without bootmem */
3623 if (!pgdat->node_mem_map) {
e984bb43 3624 unsigned long size, start, end;
d41dee36
AW
3625 struct page *map;
3626
e984bb43
BP
3627 /*
3628 * The zone's endpoints aren't required to be MAX_ORDER
3629 * aligned but the node_mem_map endpoints must be in order
3630 * for the buddy allocator to function correctly.
3631 */
3632 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3633 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3634 end = ALIGN(end, MAX_ORDER_NR_PAGES);
3635 size = (end - start) * sizeof(struct page);
6f167ec7
DH
3636 map = alloc_remap(pgdat->node_id, size);
3637 if (!map)
3638 map = alloc_bootmem_node(pgdat, size);
e984bb43 3639 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
1da177e4 3640 }
12d810c1 3641#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
3642 /*
3643 * With no DISCONTIG, the global mem_map is just set as node 0's
3644 */
c713216d 3645 if (pgdat == NODE_DATA(0)) {
1da177e4 3646 mem_map = NODE_DATA(0)->node_mem_map;
c713216d
MG
3647#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3648 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
467bc461 3649 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
c713216d
MG
3650#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3651 }
1da177e4 3652#endif
d41dee36 3653#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
3654}
3655
9109fb7b
JW
3656void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3657 unsigned long node_start_pfn, unsigned long *zholes_size)
1da177e4 3658{
9109fb7b
JW
3659 pg_data_t *pgdat = NODE_DATA(nid);
3660
1da177e4
LT
3661 pgdat->node_id = nid;
3662 pgdat->node_start_pfn = node_start_pfn;
c713216d 3663 calculate_node_totalpages(pgdat, zones_size, zholes_size);
1da177e4
LT
3664
3665 alloc_node_mem_map(pgdat);
e8c27ac9
YL
3666#ifdef CONFIG_FLAT_NODE_MEM_MAP
3667 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3668 nid, (unsigned long)pgdat,
3669 (unsigned long)pgdat->node_mem_map);
3670#endif
1da177e4
LT
3671
3672 free_area_init_core(pgdat, zones_size, zholes_size);
3673}
3674
c713216d 3675#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
418508c1
MS
3676
3677#if MAX_NUMNODES > 1
3678/*
3679 * Figure out the number of possible node ids.
3680 */
3681static void __init setup_nr_node_ids(void)
3682{
3683 unsigned int node;
3684 unsigned int highest = 0;
3685
3686 for_each_node_mask(node, node_possible_map)
3687 highest = node;
3688 nr_node_ids = highest + 1;
3689}
3690#else
3691static inline void setup_nr_node_ids(void)
3692{
3693}
3694#endif
3695
c713216d
MG
3696/**
3697 * add_active_range - Register a range of PFNs backed by physical memory
3698 * @nid: The node ID the range resides on
3699 * @start_pfn: The start PFN of the available physical memory
3700 * @end_pfn: The end PFN of the available physical memory
3701 *
3702 * These ranges are stored in an early_node_map[] and later used by
3703 * free_area_init_nodes() to calculate zone sizes and holes. If the
3704 * range spans a memory hole, it is up to the architecture to ensure
3705 * the memory is not freed by the bootmem allocator. If possible
3706 * the range being registered will be merged with existing ranges.
3707 */
3708void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3709 unsigned long end_pfn)
3710{
3711 int i;
3712
6b74ab97
MG
3713 mminit_dprintk(MMINIT_TRACE, "memory_register",
3714 "Entering add_active_range(%d, %#lx, %#lx) "
3715 "%d entries of %d used\n",
3716 nid, start_pfn, end_pfn,
3717 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
c713216d 3718
2dbb51c4
MG
3719 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3720
c713216d
MG
3721 /* Merge with existing active regions if possible */
3722 for (i = 0; i < nr_nodemap_entries; i++) {
3723 if (early_node_map[i].nid != nid)
3724 continue;
3725
3726 /* Skip if an existing region covers this new one */
3727 if (start_pfn >= early_node_map[i].start_pfn &&
3728 end_pfn <= early_node_map[i].end_pfn)
3729 return;
3730
3731 /* Merge forward if suitable */
3732 if (start_pfn <= early_node_map[i].end_pfn &&
3733 end_pfn > early_node_map[i].end_pfn) {
3734 early_node_map[i].end_pfn = end_pfn;
3735 return;
3736 }
3737
3738 /* Merge backward if suitable */
3739 if (start_pfn < early_node_map[i].end_pfn &&
3740 end_pfn >= early_node_map[i].start_pfn) {
3741 early_node_map[i].start_pfn = start_pfn;
3742 return;
3743 }
3744 }
3745
3746 /* Check that early_node_map is large enough */
3747 if (i >= MAX_ACTIVE_REGIONS) {
3748 printk(KERN_CRIT "More than %d memory regions, truncating\n",
3749 MAX_ACTIVE_REGIONS);
3750 return;
3751 }
3752
3753 early_node_map[i].nid = nid;
3754 early_node_map[i].start_pfn = start_pfn;
3755 early_node_map[i].end_pfn = end_pfn;
3756 nr_nodemap_entries = i + 1;
3757}
3758
3759/**
cc1050ba 3760 * remove_active_range - Shrink an existing registered range of PFNs
c713216d 3761 * @nid: The node id the range is on that should be shrunk
cc1050ba
YL
3762 * @start_pfn: The new PFN of the range
3763 * @end_pfn: The new PFN of the range
c713216d
MG
3764 *
3765 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
cc1a9d86
YL
3766 * The map is kept near the end physical page range that has already been
3767 * registered. This function allows an arch to shrink an existing registered
3768 * range.
c713216d 3769 */
cc1050ba
YL
3770void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
3771 unsigned long end_pfn)
c713216d 3772{
cc1a9d86
YL
3773 int i, j;
3774 int removed = 0;
c713216d 3775
cc1050ba
YL
3776 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
3777 nid, start_pfn, end_pfn);
3778
c713216d 3779 /* Find the old active region end and shrink */
cc1a9d86 3780 for_each_active_range_index_in_nid(i, nid) {
cc1050ba
YL
3781 if (early_node_map[i].start_pfn >= start_pfn &&
3782 early_node_map[i].end_pfn <= end_pfn) {
cc1a9d86 3783 /* clear it */
cc1050ba 3784 early_node_map[i].start_pfn = 0;
cc1a9d86
YL
3785 early_node_map[i].end_pfn = 0;
3786 removed = 1;
3787 continue;
3788 }
cc1050ba
YL
3789 if (early_node_map[i].start_pfn < start_pfn &&
3790 early_node_map[i].end_pfn > start_pfn) {
3791 unsigned long temp_end_pfn = early_node_map[i].end_pfn;
3792 early_node_map[i].end_pfn = start_pfn;
3793 if (temp_end_pfn > end_pfn)
3794 add_active_range(nid, end_pfn, temp_end_pfn);
3795 continue;
3796 }
3797 if (early_node_map[i].start_pfn >= start_pfn &&
3798 early_node_map[i].end_pfn > end_pfn &&
3799 early_node_map[i].start_pfn < end_pfn) {
3800 early_node_map[i].start_pfn = end_pfn;
cc1a9d86 3801 continue;
c713216d 3802 }
cc1a9d86
YL
3803 }
3804
3805 if (!removed)
3806 return;
3807
3808 /* remove the blank ones */
3809 for (i = nr_nodemap_entries - 1; i > 0; i--) {
3810 if (early_node_map[i].nid != nid)
3811 continue;
3812 if (early_node_map[i].end_pfn)
3813 continue;
3814 /* we found it, get rid of it */
3815 for (j = i; j < nr_nodemap_entries - 1; j++)
3816 memcpy(&early_node_map[j], &early_node_map[j+1],
3817 sizeof(early_node_map[j]));
3818 j = nr_nodemap_entries - 1;
3819 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
3820 nr_nodemap_entries--;
3821 }
c713216d
MG
3822}
3823
3824/**
3825 * remove_all_active_ranges - Remove all currently registered regions
88ca3b94 3826 *
c713216d
MG
3827 * During discovery, it may be found that a table like SRAT is invalid
3828 * and an alternative discovery method must be used. This function removes
3829 * all currently registered regions.
3830 */
88ca3b94 3831void __init remove_all_active_ranges(void)
c713216d
MG
3832{
3833 memset(early_node_map, 0, sizeof(early_node_map));
3834 nr_nodemap_entries = 0;
3835}
3836
3837/* Compare two active node_active_regions */
3838static int __init cmp_node_active_region(const void *a, const void *b)
3839{
3840 struct node_active_region *arange = (struct node_active_region *)a;
3841 struct node_active_region *brange = (struct node_active_region *)b;
3842
3843 /* Done this way to avoid overflows */
3844 if (arange->start_pfn > brange->start_pfn)
3845 return 1;
3846 if (arange->start_pfn < brange->start_pfn)
3847 return -1;
3848
3849 return 0;
3850}
3851
3852/* sort the node_map by start_pfn */
3853static void __init sort_node_map(void)
3854{
3855 sort(early_node_map, (size_t)nr_nodemap_entries,
3856 sizeof(struct node_active_region),
3857 cmp_node_active_region, NULL);
3858}
3859
a6af2bc3 3860/* Find the lowest pfn for a node */
b69a7288 3861static unsigned long __init find_min_pfn_for_node(int nid)
c713216d
MG
3862{
3863 int i;
a6af2bc3 3864 unsigned long min_pfn = ULONG_MAX;
1abbfb41 3865
c713216d
MG
3866 /* Assuming a sorted map, the first range found has the starting pfn */
3867 for_each_active_range_index_in_nid(i, nid)
a6af2bc3 3868 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
c713216d 3869
a6af2bc3
MG
3870 if (min_pfn == ULONG_MAX) {
3871 printk(KERN_WARNING
2bc0d261 3872 "Could not find start_pfn for node %d\n", nid);
a6af2bc3
MG
3873 return 0;
3874 }
3875
3876 return min_pfn;
c713216d
MG
3877}
3878
3879/**
3880 * find_min_pfn_with_active_regions - Find the minimum PFN registered
3881 *
3882 * It returns the minimum PFN based on information provided via
88ca3b94 3883 * add_active_range().
c713216d
MG
3884 */
3885unsigned long __init find_min_pfn_with_active_regions(void)
3886{
3887 return find_min_pfn_for_node(MAX_NUMNODES);
3888}
3889
37b07e41
LS
3890/*
3891 * early_calculate_totalpages()
3892 * Sum pages in active regions for movable zone.
3893 * Populate N_HIGH_MEMORY for calculating usable_nodes.
3894 */
484f51f8 3895static unsigned long __init early_calculate_totalpages(void)
7e63efef
MG
3896{
3897 int i;
3898 unsigned long totalpages = 0;
3899
37b07e41
LS
3900 for (i = 0; i < nr_nodemap_entries; i++) {
3901 unsigned long pages = early_node_map[i].end_pfn -
7e63efef 3902 early_node_map[i].start_pfn;
37b07e41
LS
3903 totalpages += pages;
3904 if (pages)
3905 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
3906 }
3907 return totalpages;
7e63efef
MG
3908}
3909
2a1e274a
MG
3910/*
3911 * Find the PFN the Movable zone begins in each node. Kernel memory
3912 * is spread evenly between nodes as long as the nodes have enough
3913 * memory. When they don't, some nodes will have more kernelcore than
3914 * others
3915 */
b69a7288 3916static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
2a1e274a
MG
3917{
3918 int i, nid;
3919 unsigned long usable_startpfn;
3920 unsigned long kernelcore_node, kernelcore_remaining;
37b07e41
LS
3921 unsigned long totalpages = early_calculate_totalpages();
3922 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
2a1e274a 3923
7e63efef
MG
3924 /*
3925 * If movablecore was specified, calculate what size of
3926 * kernelcore that corresponds so that memory usable for
3927 * any allocation type is evenly spread. If both kernelcore
3928 * and movablecore are specified, then the value of kernelcore
3929 * will be used for required_kernelcore if it's greater than
3930 * what movablecore would have allowed.
3931 */
3932 if (required_movablecore) {
7e63efef
MG
3933 unsigned long corepages;
3934
3935 /*
3936 * Round-up so that ZONE_MOVABLE is at least as large as what
3937 * was requested by the user
3938 */
3939 required_movablecore =
3940 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
3941 corepages = totalpages - required_movablecore;
3942
3943 required_kernelcore = max(required_kernelcore, corepages);
3944 }
3945
2a1e274a
MG
3946 /* If kernelcore was not specified, there is no ZONE_MOVABLE */
3947 if (!required_kernelcore)
3948 return;
3949
3950 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
3951 find_usable_zone_for_movable();
3952 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
3953
3954restart:
3955 /* Spread kernelcore memory as evenly as possible throughout nodes */
3956 kernelcore_node = required_kernelcore / usable_nodes;
37b07e41 3957 for_each_node_state(nid, N_HIGH_MEMORY) {
2a1e274a
MG
3958 /*
3959 * Recalculate kernelcore_node if the division per node
3960 * now exceeds what is necessary to satisfy the requested
3961 * amount of memory for the kernel
3962 */
3963 if (required_kernelcore < kernelcore_node)
3964 kernelcore_node = required_kernelcore / usable_nodes;
3965
3966 /*
3967 * As the map is walked, we track how much memory is usable
3968 * by the kernel using kernelcore_remaining. When it is
3969 * 0, the rest of the node is usable by ZONE_MOVABLE
3970 */
3971 kernelcore_remaining = kernelcore_node;
3972
3973 /* Go through each range of PFNs within this node */
3974 for_each_active_range_index_in_nid(i, nid) {
3975 unsigned long start_pfn, end_pfn;
3976 unsigned long size_pages;
3977
3978 start_pfn = max(early_node_map[i].start_pfn,
3979 zone_movable_pfn[nid]);
3980 end_pfn = early_node_map[i].end_pfn;
3981 if (start_pfn >= end_pfn)
3982 continue;
3983
3984 /* Account for what is only usable for kernelcore */
3985 if (start_pfn < usable_startpfn) {
3986 unsigned long kernel_pages;
3987 kernel_pages = min(end_pfn, usable_startpfn)
3988 - start_pfn;
3989
3990 kernelcore_remaining -= min(kernel_pages,
3991 kernelcore_remaining);
3992 required_kernelcore -= min(kernel_pages,
3993 required_kernelcore);
3994
3995 /* Continue if range is now fully accounted */
3996 if (end_pfn <= usable_startpfn) {
3997
3998 /*
3999 * Push zone_movable_pfn to the end so
4000 * that if we have to rebalance
4001 * kernelcore across nodes, we will
4002 * not double account here
4003 */
4004 zone_movable_pfn[nid] = end_pfn;
4005 continue;
4006 }
4007 start_pfn = usable_startpfn;
4008 }
4009
4010 /*
4011 * The usable PFN range for ZONE_MOVABLE is from
4012 * start_pfn->end_pfn. Calculate size_pages as the
4013 * number of pages used as kernelcore
4014 */
4015 size_pages = end_pfn - start_pfn;
4016 if (size_pages > kernelcore_remaining)
4017 size_pages = kernelcore_remaining;
4018 zone_movable_pfn[nid] = start_pfn + size_pages;
4019
4020 /*
4021 * Some kernelcore has been met, update counts and
4022 * break if the kernelcore for this node has been
4023 * satisified
4024 */
4025 required_kernelcore -= min(required_kernelcore,
4026 size_pages);
4027 kernelcore_remaining -= size_pages;
4028 if (!kernelcore_remaining)
4029 break;
4030 }
4031 }
4032
4033 /*
4034 * If there is still required_kernelcore, we do another pass with one
4035 * less node in the count. This will push zone_movable_pfn[nid] further
4036 * along on the nodes that still have memory until kernelcore is
4037 * satisified
4038 */
4039 usable_nodes--;
4040 if (usable_nodes && required_kernelcore > usable_nodes)
4041 goto restart;
4042
4043 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4044 for (nid = 0; nid < MAX_NUMNODES; nid++)
4045 zone_movable_pfn[nid] =
4046 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4047}
4048
37b07e41
LS
4049/* Any regular memory on that node ? */
4050static void check_for_regular_memory(pg_data_t *pgdat)
4051{
4052#ifdef CONFIG_HIGHMEM
4053 enum zone_type zone_type;
4054
4055 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4056 struct zone *zone = &pgdat->node_zones[zone_type];
4057 if (zone->present_pages)
4058 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4059 }
4060#endif
4061}
4062
c713216d
MG
4063/**
4064 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 4065 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
4066 *
4067 * This will call free_area_init_node() for each active node in the system.
4068 * Using the page ranges provided by add_active_range(), the size of each
4069 * zone in each node and their holes is calculated. If the maximum PFN
4070 * between two adjacent zones match, it is assumed that the zone is empty.
4071 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4072 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4073 * starts where the previous one ended. For example, ZONE_DMA32 starts
4074 * at arch_max_dma_pfn.
4075 */
4076void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4077{
4078 unsigned long nid;
db99100d 4079 int i;
c713216d 4080
a6af2bc3
MG
4081 /* Sort early_node_map as initialisation assumes it is sorted */
4082 sort_node_map();
4083
c713216d
MG
4084 /* Record where the zone boundaries are */
4085 memset(arch_zone_lowest_possible_pfn, 0,
4086 sizeof(arch_zone_lowest_possible_pfn));
4087 memset(arch_zone_highest_possible_pfn, 0,
4088 sizeof(arch_zone_highest_possible_pfn));
4089 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4090 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4091 for (i = 1; i < MAX_NR_ZONES; i++) {
2a1e274a
MG
4092 if (i == ZONE_MOVABLE)
4093 continue;
c713216d
MG
4094 arch_zone_lowest_possible_pfn[i] =
4095 arch_zone_highest_possible_pfn[i-1];
4096 arch_zone_highest_possible_pfn[i] =
4097 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4098 }
2a1e274a
MG
4099 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4100 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4101
4102 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
4103 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4104 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
c713216d 4105
c713216d
MG
4106 /* Print out the zone ranges */
4107 printk("Zone PFN ranges:\n");
2a1e274a
MG
4108 for (i = 0; i < MAX_NR_ZONES; i++) {
4109 if (i == ZONE_MOVABLE)
4110 continue;
5dab8ec1 4111 printk(" %-8s %0#10lx -> %0#10lx\n",
c713216d
MG
4112 zone_names[i],
4113 arch_zone_lowest_possible_pfn[i],
4114 arch_zone_highest_possible_pfn[i]);
2a1e274a
MG
4115 }
4116
4117 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
4118 printk("Movable zone start PFN for each node\n");
4119 for (i = 0; i < MAX_NUMNODES; i++) {
4120 if (zone_movable_pfn[i])
4121 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
4122 }
c713216d
MG
4123
4124 /* Print out the early_node_map[] */
4125 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4126 for (i = 0; i < nr_nodemap_entries; i++)
5dab8ec1 4127 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
c713216d
MG
4128 early_node_map[i].start_pfn,
4129 early_node_map[i].end_pfn);
4130
4131 /* Initialise every node */
708614e6 4132 mminit_verify_pageflags_layout();
8ef82866 4133 setup_nr_node_ids();
c713216d
MG
4134 for_each_online_node(nid) {
4135 pg_data_t *pgdat = NODE_DATA(nid);
9109fb7b 4136 free_area_init_node(nid, NULL,
c713216d 4137 find_min_pfn_for_node(nid), NULL);
37b07e41
LS
4138
4139 /* Any memory on that node */
4140 if (pgdat->node_present_pages)
4141 node_set_state(nid, N_HIGH_MEMORY);
4142 check_for_regular_memory(pgdat);
c713216d
MG
4143 }
4144}
2a1e274a 4145
7e63efef 4146static int __init cmdline_parse_core(char *p, unsigned long *core)
2a1e274a
MG
4147{
4148 unsigned long long coremem;
4149 if (!p)
4150 return -EINVAL;
4151
4152 coremem = memparse(p, &p);
7e63efef 4153 *core = coremem >> PAGE_SHIFT;
2a1e274a 4154
7e63efef 4155 /* Paranoid check that UL is enough for the coremem value */
2a1e274a
MG
4156 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4157
4158 return 0;
4159}
ed7ed365 4160
7e63efef
MG
4161/*
4162 * kernelcore=size sets the amount of memory for use for allocations that
4163 * cannot be reclaimed or migrated.
4164 */
4165static int __init cmdline_parse_kernelcore(char *p)
4166{
4167 return cmdline_parse_core(p, &required_kernelcore);
4168}
4169
4170/*
4171 * movablecore=size sets the amount of memory for use for allocations that
4172 * can be reclaimed or migrated.
4173 */
4174static int __init cmdline_parse_movablecore(char *p)
4175{
4176 return cmdline_parse_core(p, &required_movablecore);
4177}
4178
ed7ed365 4179early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 4180early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 4181
c713216d
MG
4182#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4183
0e0b864e 4184/**
88ca3b94
RD
4185 * set_dma_reserve - set the specified number of pages reserved in the first zone
4186 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e
MG
4187 *
4188 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4189 * In the DMA zone, a significant percentage may be consumed by kernel image
4190 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
4191 * function may optionally be used to account for unfreeable pages in the
4192 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4193 * smaller per-cpu batchsize.
0e0b864e
MG
4194 */
4195void __init set_dma_reserve(unsigned long new_dma_reserve)
4196{
4197 dma_reserve = new_dma_reserve;
4198}
4199
93b7504e 4200#ifndef CONFIG_NEED_MULTIPLE_NODES
52765583 4201struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
1da177e4 4202EXPORT_SYMBOL(contig_page_data);
93b7504e 4203#endif
1da177e4
LT
4204
4205void __init free_area_init(unsigned long *zones_size)
4206{
9109fb7b 4207 free_area_init_node(0, zones_size,
1da177e4
LT
4208 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4209}
1da177e4 4210
1da177e4
LT
4211static int page_alloc_cpu_notify(struct notifier_block *self,
4212 unsigned long action, void *hcpu)
4213{
4214 int cpu = (unsigned long)hcpu;
1da177e4 4215
8bb78442 4216 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
9f8f2172
CL
4217 drain_pages(cpu);
4218
4219 /*
4220 * Spill the event counters of the dead processor
4221 * into the current processors event counters.
4222 * This artificially elevates the count of the current
4223 * processor.
4224 */
f8891e5e 4225 vm_events_fold_cpu(cpu);
9f8f2172
CL
4226
4227 /*
4228 * Zero the differential counters of the dead processor
4229 * so that the vm statistics are consistent.
4230 *
4231 * This is only okay since the processor is dead and cannot
4232 * race with what we are doing.
4233 */
2244b95a 4234 refresh_cpu_vm_stats(cpu);
1da177e4
LT
4235 }
4236 return NOTIFY_OK;
4237}
1da177e4
LT
4238
4239void __init page_alloc_init(void)
4240{
4241 hotcpu_notifier(page_alloc_cpu_notify, 0);
4242}
4243
cb45b0e9
HA
4244/*
4245 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4246 * or min_free_kbytes changes.
4247 */
4248static void calculate_totalreserve_pages(void)
4249{
4250 struct pglist_data *pgdat;
4251 unsigned long reserve_pages = 0;
2f6726e5 4252 enum zone_type i, j;
cb45b0e9
HA
4253
4254 for_each_online_pgdat(pgdat) {
4255 for (i = 0; i < MAX_NR_ZONES; i++) {
4256 struct zone *zone = pgdat->node_zones + i;
4257 unsigned long max = 0;
4258
4259 /* Find valid and maximum lowmem_reserve in the zone */
4260 for (j = i; j < MAX_NR_ZONES; j++) {
4261 if (zone->lowmem_reserve[j] > max)
4262 max = zone->lowmem_reserve[j];
4263 }
4264
4265 /* we treat pages_high as reserved pages. */
4266 max += zone->pages_high;
4267
4268 if (max > zone->present_pages)
4269 max = zone->present_pages;
4270 reserve_pages += max;
4271 }
4272 }
4273 totalreserve_pages = reserve_pages;
4274}
4275
1da177e4
LT
4276/*
4277 * setup_per_zone_lowmem_reserve - called whenever
4278 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
4279 * has a correct pages reserved value, so an adequate number of
4280 * pages are left in the zone after a successful __alloc_pages().
4281 */
4282static void setup_per_zone_lowmem_reserve(void)
4283{
4284 struct pglist_data *pgdat;
2f6726e5 4285 enum zone_type j, idx;
1da177e4 4286
ec936fc5 4287 for_each_online_pgdat(pgdat) {
1da177e4
LT
4288 for (j = 0; j < MAX_NR_ZONES; j++) {
4289 struct zone *zone = pgdat->node_zones + j;
4290 unsigned long present_pages = zone->present_pages;
4291
4292 zone->lowmem_reserve[j] = 0;
4293
2f6726e5
CL
4294 idx = j;
4295 while (idx) {
1da177e4
LT
4296 struct zone *lower_zone;
4297
2f6726e5
CL
4298 idx--;
4299
1da177e4
LT
4300 if (sysctl_lowmem_reserve_ratio[idx] < 1)
4301 sysctl_lowmem_reserve_ratio[idx] = 1;
4302
4303 lower_zone = pgdat->node_zones + idx;
4304 lower_zone->lowmem_reserve[j] = present_pages /
4305 sysctl_lowmem_reserve_ratio[idx];
4306 present_pages += lower_zone->present_pages;
4307 }
4308 }
4309 }
cb45b0e9
HA
4310
4311 /* update totalreserve_pages */
4312 calculate_totalreserve_pages();
1da177e4
LT
4313}
4314
88ca3b94
RD
4315/**
4316 * setup_per_zone_pages_min - called when min_free_kbytes changes.
4317 *
4318 * Ensures that the pages_{min,low,high} values for each zone are set correctly
4319 * with respect to min_free_kbytes.
1da177e4 4320 */
3947be19 4321void setup_per_zone_pages_min(void)
1da177e4
LT
4322{
4323 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4324 unsigned long lowmem_pages = 0;
4325 struct zone *zone;
4326 unsigned long flags;
4327
4328 /* Calculate total number of !ZONE_HIGHMEM pages */
4329 for_each_zone(zone) {
4330 if (!is_highmem(zone))
4331 lowmem_pages += zone->present_pages;
4332 }
4333
4334 for_each_zone(zone) {
ac924c60
AM
4335 u64 tmp;
4336
1125b4e3 4337 spin_lock_irqsave(&zone->lock, flags);
ac924c60
AM
4338 tmp = (u64)pages_min * zone->present_pages;
4339 do_div(tmp, lowmem_pages);
1da177e4
LT
4340 if (is_highmem(zone)) {
4341 /*
669ed175
NP
4342 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4343 * need highmem pages, so cap pages_min to a small
4344 * value here.
4345 *
4346 * The (pages_high-pages_low) and (pages_low-pages_min)
4347 * deltas controls asynch page reclaim, and so should
4348 * not be capped for highmem.
1da177e4
LT
4349 */
4350 int min_pages;
4351
4352 min_pages = zone->present_pages / 1024;
4353 if (min_pages < SWAP_CLUSTER_MAX)
4354 min_pages = SWAP_CLUSTER_MAX;
4355 if (min_pages > 128)
4356 min_pages = 128;
4357 zone->pages_min = min_pages;
4358 } else {
669ed175
NP
4359 /*
4360 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
4361 * proportionate to the zone's size.
4362 */
669ed175 4363 zone->pages_min = tmp;
1da177e4
LT
4364 }
4365
ac924c60
AM
4366 zone->pages_low = zone->pages_min + (tmp >> 2);
4367 zone->pages_high = zone->pages_min + (tmp >> 1);
56fd56b8 4368 setup_zone_migrate_reserve(zone);
1125b4e3 4369 spin_unlock_irqrestore(&zone->lock, flags);
1da177e4 4370 }
cb45b0e9
HA
4371
4372 /* update totalreserve_pages */
4373 calculate_totalreserve_pages();
1da177e4
LT
4374}
4375
556adecb
RR
4376/**
4377 * setup_per_zone_inactive_ratio - called when min_free_kbytes changes.
4378 *
4379 * The inactive anon list should be small enough that the VM never has to
4380 * do too much work, but large enough that each inactive page has a chance
4381 * to be referenced again before it is swapped out.
4382 *
4383 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4384 * INACTIVE_ANON pages on this zone's LRU, maintained by the
4385 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4386 * the anonymous pages are kept on the inactive list.
4387 *
4388 * total target max
4389 * memory ratio inactive anon
4390 * -------------------------------------
4391 * 10MB 1 5MB
4392 * 100MB 1 50MB
4393 * 1GB 3 250MB
4394 * 10GB 10 0.9GB
4395 * 100GB 31 3GB
4396 * 1TB 101 10GB
4397 * 10TB 320 32GB
4398 */
efab8186 4399static void setup_per_zone_inactive_ratio(void)
556adecb
RR
4400{
4401 struct zone *zone;
4402
4403 for_each_zone(zone) {
4404 unsigned int gb, ratio;
4405
4406 /* Zone size in gigabytes */
4407 gb = zone->present_pages >> (30 - PAGE_SHIFT);
4408 ratio = int_sqrt(10 * gb);
4409 if (!ratio)
4410 ratio = 1;
4411
4412 zone->inactive_ratio = ratio;
4413 }
4414}
4415
1da177e4
LT
4416/*
4417 * Initialise min_free_kbytes.
4418 *
4419 * For small machines we want it small (128k min). For large machines
4420 * we want it large (64MB max). But it is not linear, because network
4421 * bandwidth does not increase linearly with machine size. We use
4422 *
4423 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4424 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
4425 *
4426 * which yields
4427 *
4428 * 16MB: 512k
4429 * 32MB: 724k
4430 * 64MB: 1024k
4431 * 128MB: 1448k
4432 * 256MB: 2048k
4433 * 512MB: 2896k
4434 * 1024MB: 4096k
4435 * 2048MB: 5792k
4436 * 4096MB: 8192k
4437 * 8192MB: 11584k
4438 * 16384MB: 16384k
4439 */
4440static int __init init_per_zone_pages_min(void)
4441{
4442 unsigned long lowmem_kbytes;
4443
4444 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4445
4446 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4447 if (min_free_kbytes < 128)
4448 min_free_kbytes = 128;
4449 if (min_free_kbytes > 65536)
4450 min_free_kbytes = 65536;
4451 setup_per_zone_pages_min();
4452 setup_per_zone_lowmem_reserve();
556adecb 4453 setup_per_zone_inactive_ratio();
1da177e4
LT
4454 return 0;
4455}
4456module_init(init_per_zone_pages_min)
4457
4458/*
4459 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4460 * that we can call two helper functions whenever min_free_kbytes
4461 * changes.
4462 */
4463int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4464 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4465{
4466 proc_dointvec(table, write, file, buffer, length, ppos);
3b1d92c5
MG
4467 if (write)
4468 setup_per_zone_pages_min();
1da177e4
LT
4469 return 0;
4470}
4471
9614634f
CL
4472#ifdef CONFIG_NUMA
4473int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4474 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4475{
4476 struct zone *zone;
4477 int rc;
4478
4479 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4480 if (rc)
4481 return rc;
4482
4483 for_each_zone(zone)
8417bba4 4484 zone->min_unmapped_pages = (zone->present_pages *
9614634f
CL
4485 sysctl_min_unmapped_ratio) / 100;
4486 return 0;
4487}
0ff38490
CL
4488
4489int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4490 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4491{
4492 struct zone *zone;
4493 int rc;
4494
4495 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4496 if (rc)
4497 return rc;
4498
4499 for_each_zone(zone)
4500 zone->min_slab_pages = (zone->present_pages *
4501 sysctl_min_slab_ratio) / 100;
4502 return 0;
4503}
9614634f
CL
4504#endif
4505
1da177e4
LT
4506/*
4507 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4508 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4509 * whenever sysctl_lowmem_reserve_ratio changes.
4510 *
4511 * The reserve ratio obviously has absolutely no relation with the
4512 * pages_min watermarks. The lowmem reserve ratio can only make sense
4513 * if in function of the boot time zone sizes.
4514 */
4515int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4516 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4517{
4518 proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4519 setup_per_zone_lowmem_reserve();
4520 return 0;
4521}
4522
8ad4b1fb
RS
4523/*
4524 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4525 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
4526 * can have before it gets flushed back to buddy allocator.
4527 */
4528
4529int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4530 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4531{
4532 struct zone *zone;
4533 unsigned int cpu;
4534 int ret;
4535
4536 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4537 if (!write || (ret == -EINVAL))
4538 return ret;
4539 for_each_zone(zone) {
4540 for_each_online_cpu(cpu) {
4541 unsigned long high;
4542 high = zone->present_pages / percpu_pagelist_fraction;
4543 setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4544 }
4545 }
4546 return 0;
4547}
4548
f034b5d4 4549int hashdist = HASHDIST_DEFAULT;
1da177e4
LT
4550
4551#ifdef CONFIG_NUMA
4552static int __init set_hashdist(char *str)
4553{
4554 if (!str)
4555 return 0;
4556 hashdist = simple_strtoul(str, &str, 0);
4557 return 1;
4558}
4559__setup("hashdist=", set_hashdist);
4560#endif
4561
4562/*
4563 * allocate a large system hash table from bootmem
4564 * - it is assumed that the hash table must contain an exact power-of-2
4565 * quantity of entries
4566 * - limit is the number of hash buckets, not the total allocation size
4567 */
4568void *__init alloc_large_system_hash(const char *tablename,
4569 unsigned long bucketsize,
4570 unsigned long numentries,
4571 int scale,
4572 int flags,
4573 unsigned int *_hash_shift,
4574 unsigned int *_hash_mask,
4575 unsigned long limit)
4576{
4577 unsigned long long max = limit;
4578 unsigned long log2qty, size;
4579 void *table = NULL;
4580
4581 /* allow the kernel cmdline to have a say */
4582 if (!numentries) {
4583 /* round applicable memory size up to nearest megabyte */
04903664 4584 numentries = nr_kernel_pages;
1da177e4
LT
4585 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4586 numentries >>= 20 - PAGE_SHIFT;
4587 numentries <<= 20 - PAGE_SHIFT;
4588
4589 /* limit to 1 bucket per 2^scale bytes of low memory */
4590 if (scale > PAGE_SHIFT)
4591 numentries >>= (scale - PAGE_SHIFT);
4592 else
4593 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
4594
4595 /* Make sure we've got at least a 0-order allocation.. */
4596 if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4597 numentries = PAGE_SIZE / bucketsize;
1da177e4 4598 }
6e692ed3 4599 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
4600
4601 /* limit allocation size to 1/16 total memory by default */
4602 if (max == 0) {
4603 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4604 do_div(max, bucketsize);
4605 }
4606
4607 if (numentries > max)
4608 numentries = max;
4609
f0d1b0b3 4610 log2qty = ilog2(numentries);
1da177e4
LT
4611
4612 do {
4613 size = bucketsize << log2qty;
4614 if (flags & HASH_EARLY)
74768ed8 4615 table = alloc_bootmem_nopanic(size);
1da177e4
LT
4616 else if (hashdist)
4617 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4618 else {
2309f9e6 4619 unsigned long order = get_order(size);
6c0db466
HD
4620
4621 if (order < MAX_ORDER)
4622 table = (void *)__get_free_pages(GFP_ATOMIC,
4623 order);
1037b83b
ED
4624 /*
4625 * If bucketsize is not a power-of-two, we may free
4626 * some pages at the end of hash table.
4627 */
4628 if (table) {
4629 unsigned long alloc_end = (unsigned long)table +
4630 (PAGE_SIZE << order);
4631 unsigned long used = (unsigned long)table +
4632 PAGE_ALIGN(size);
4633 split_page(virt_to_page(table), order);
4634 while (used < alloc_end) {
4635 free_page(used);
4636 used += PAGE_SIZE;
4637 }
4638 }
1da177e4
LT
4639 }
4640 } while (!table && size > PAGE_SIZE && --log2qty);
4641
4642 if (!table)
4643 panic("Failed to allocate %s hash table\n", tablename);
4644
b49ad484 4645 printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
1da177e4
LT
4646 tablename,
4647 (1U << log2qty),
f0d1b0b3 4648 ilog2(size) - PAGE_SHIFT,
1da177e4
LT
4649 size);
4650
4651 if (_hash_shift)
4652 *_hash_shift = log2qty;
4653 if (_hash_mask)
4654 *_hash_mask = (1 << log2qty) - 1;
4655
dbb1f81c
CM
4656 /*
4657 * If hashdist is set, the table allocation is done with __vmalloc()
4658 * which invokes the kmemleak_alloc() callback. This function may also
4659 * be called before the slab and kmemleak are initialised when
4660 * kmemleak simply buffers the request to be executed later
4661 * (GFP_ATOMIC flag ignored in this case).
4662 */
4663 if (!hashdist)
4664 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4665
1da177e4
LT
4666 return table;
4667}
a117e66e 4668
835c134e
MG
4669/* Return a pointer to the bitmap storing bits affecting a block of pages */
4670static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4671 unsigned long pfn)
4672{
4673#ifdef CONFIG_SPARSEMEM
4674 return __pfn_to_section(pfn)->pageblock_flags;
4675#else
4676 return zone->pageblock_flags;
4677#endif /* CONFIG_SPARSEMEM */
4678}
4679
4680static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4681{
4682#ifdef CONFIG_SPARSEMEM
4683 pfn &= (PAGES_PER_SECTION-1);
d9c23400 4684 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
4685#else
4686 pfn = pfn - zone->zone_start_pfn;
d9c23400 4687 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
4688#endif /* CONFIG_SPARSEMEM */
4689}
4690
4691/**
d9c23400 4692 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
835c134e
MG
4693 * @page: The page within the block of interest
4694 * @start_bitidx: The first bit of interest to retrieve
4695 * @end_bitidx: The last bit of interest
4696 * returns pageblock_bits flags
4697 */
4698unsigned long get_pageblock_flags_group(struct page *page,
4699 int start_bitidx, int end_bitidx)
4700{
4701 struct zone *zone;
4702 unsigned long *bitmap;
4703 unsigned long pfn, bitidx;
4704 unsigned long flags = 0;
4705 unsigned long value = 1;
4706
4707 zone = page_zone(page);
4708 pfn = page_to_pfn(page);
4709 bitmap = get_pageblock_bitmap(zone, pfn);
4710 bitidx = pfn_to_bitidx(zone, pfn);
4711
4712 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4713 if (test_bit(bitidx + start_bitidx, bitmap))
4714 flags |= value;
6220ec78 4715
835c134e
MG
4716 return flags;
4717}
4718
4719/**
d9c23400 4720 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
835c134e
MG
4721 * @page: The page within the block of interest
4722 * @start_bitidx: The first bit of interest
4723 * @end_bitidx: The last bit of interest
4724 * @flags: The flags to set
4725 */
4726void set_pageblock_flags_group(struct page *page, unsigned long flags,
4727 int start_bitidx, int end_bitidx)
4728{
4729 struct zone *zone;
4730 unsigned long *bitmap;
4731 unsigned long pfn, bitidx;
4732 unsigned long value = 1;
4733
4734 zone = page_zone(page);
4735 pfn = page_to_pfn(page);
4736 bitmap = get_pageblock_bitmap(zone, pfn);
4737 bitidx = pfn_to_bitidx(zone, pfn);
86051ca5
KH
4738 VM_BUG_ON(pfn < zone->zone_start_pfn);
4739 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
835c134e
MG
4740
4741 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4742 if (flags & value)
4743 __set_bit(bitidx + start_bitidx, bitmap);
4744 else
4745 __clear_bit(bitidx + start_bitidx, bitmap);
4746}
a5d76b54
KH
4747
4748/*
4749 * This is designed as sub function...plz see page_isolation.c also.
4750 * set/clear page block's type to be ISOLATE.
4751 * page allocater never alloc memory from ISOLATE block.
4752 */
4753
4754int set_migratetype_isolate(struct page *page)
4755{
4756 struct zone *zone;
4757 unsigned long flags;
4758 int ret = -EBUSY;
4759
4760 zone = page_zone(page);
4761 spin_lock_irqsave(&zone->lock, flags);
4762 /*
4763 * In future, more migrate types will be able to be isolation target.
4764 */
4765 if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
4766 goto out;
4767 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
4768 move_freepages_block(zone, page, MIGRATE_ISOLATE);
4769 ret = 0;
4770out:
4771 spin_unlock_irqrestore(&zone->lock, flags);
4772 if (!ret)
9f8f2172 4773 drain_all_pages();
a5d76b54
KH
4774 return ret;
4775}
4776
4777void unset_migratetype_isolate(struct page *page)
4778{
4779 struct zone *zone;
4780 unsigned long flags;
4781 zone = page_zone(page);
4782 spin_lock_irqsave(&zone->lock, flags);
4783 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
4784 goto out;
4785 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4786 move_freepages_block(zone, page, MIGRATE_MOVABLE);
4787out:
4788 spin_unlock_irqrestore(&zone->lock, flags);
4789}
0c0e6195
KH
4790
4791#ifdef CONFIG_MEMORY_HOTREMOVE
4792/*
4793 * All pages in the range must be isolated before calling this.
4794 */
4795void
4796__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
4797{
4798 struct page *page;
4799 struct zone *zone;
4800 int order, i;
4801 unsigned long pfn;
4802 unsigned long flags;
4803 /* find the first valid pfn */
4804 for (pfn = start_pfn; pfn < end_pfn; pfn++)
4805 if (pfn_valid(pfn))
4806 break;
4807 if (pfn == end_pfn)
4808 return;
4809 zone = page_zone(pfn_to_page(pfn));
4810 spin_lock_irqsave(&zone->lock, flags);
4811 pfn = start_pfn;
4812 while (pfn < end_pfn) {
4813 if (!pfn_valid(pfn)) {
4814 pfn++;
4815 continue;
4816 }
4817 page = pfn_to_page(pfn);
4818 BUG_ON(page_count(page));
4819 BUG_ON(!PageBuddy(page));
4820 order = page_order(page);
4821#ifdef CONFIG_DEBUG_VM
4822 printk(KERN_INFO "remove from free list %lx %d %lx\n",
4823 pfn, 1 << order, end_pfn);
4824#endif
4825 list_del(&page->lru);
4826 rmv_page_order(page);
4827 zone->free_area[order].nr_free--;
4828 __mod_zone_page_state(zone, NR_FREE_PAGES,
4829 - (1UL << order));
4830 for (i = 0; i < (1 << order); i++)
4831 SetPageReserved((page+i));
4832 pfn += (1 << order);
4833 }
4834 spin_unlock_irqrestore(&zone->lock, flags);
4835}
4836#endif