[PATCH] Account for memmap and optionally the kernel image as holes
[linux-2.6-block.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/bootmem.h>
23#include <linux/compiler.h>
9f158333 24#include <linux/kernel.h>
1da177e4
LT
25#include <linux/module.h>
26#include <linux/suspend.h>
27#include <linux/pagevec.h>
28#include <linux/blkdev.h>
29#include <linux/slab.h>
30#include <linux/notifier.h>
31#include <linux/topology.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
bdc8cb98 35#include <linux/memory_hotplug.h>
1da177e4
LT
36#include <linux/nodemask.h>
37#include <linux/vmalloc.h>
4be38e35 38#include <linux/mempolicy.h>
6811378e 39#include <linux/stop_machine.h>
c713216d
MG
40#include <linux/sort.h>
41#include <linux/pfn.h>
1da177e4
LT
42
43#include <asm/tlbflush.h>
ac924c60 44#include <asm/div64.h>
1da177e4
LT
45#include "internal.h"
46
47/*
48 * MCD - HACK: Find somewhere to initialize this EARLY, or make this
49 * initializer cleaner
50 */
c3d8c141 51nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
7223a93a 52EXPORT_SYMBOL(node_online_map);
c3d8c141 53nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
7223a93a 54EXPORT_SYMBOL(node_possible_map);
6c231b7b 55unsigned long totalram_pages __read_mostly;
cb45b0e9 56unsigned long totalreserve_pages __read_mostly;
1da177e4 57long nr_swap_pages;
8ad4b1fb 58int percpu_pagelist_fraction;
1da177e4 59
d98c7a09 60static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 61
1da177e4
LT
62/*
63 * results with 256, 32 in the lowmem_reserve sysctl:
64 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
65 * 1G machine -> (16M dma, 784M normal, 224M high)
66 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
67 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
68 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
69 *
70 * TBD: should special case ZONE_DMA32 machines here - in those we normally
71 * don't need any ZONE_NORMAL reservation
1da177e4 72 */
2f1b6248
CL
73int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
74 256,
fb0e7942 75#ifdef CONFIG_ZONE_DMA32
2f1b6248 76 256,
fb0e7942 77#endif
e53ef38d 78#ifdef CONFIG_HIGHMEM
2f1b6248 79 32
e53ef38d 80#endif
2f1b6248 81};
1da177e4
LT
82
83EXPORT_SYMBOL(totalram_pages);
1da177e4
LT
84
85/*
86 * Used by page_zone() to look up the address of the struct zone whose
87 * id is encoded in the upper bits of page->flags
88 */
c3d8c141 89struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
1da177e4
LT
90EXPORT_SYMBOL(zone_table);
91
2f1b6248
CL
92static char *zone_names[MAX_NR_ZONES] = {
93 "DMA",
fb0e7942 94#ifdef CONFIG_ZONE_DMA32
2f1b6248 95 "DMA32",
fb0e7942 96#endif
2f1b6248 97 "Normal",
e53ef38d 98#ifdef CONFIG_HIGHMEM
2f1b6248 99 "HighMem"
e53ef38d 100#endif
2f1b6248
CL
101};
102
1da177e4
LT
103int min_free_kbytes = 1024;
104
86356ab1
YG
105unsigned long __meminitdata nr_kernel_pages;
106unsigned long __meminitdata nr_all_pages;
0e0b864e 107static unsigned long __initdata dma_reserve;
1da177e4 108
c713216d
MG
109#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
110 /*
111 * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct
112 * ranges of memory (RAM) that may be registered with add_active_range().
113 * Ranges passed to add_active_range() will be merged if possible
114 * so the number of times add_active_range() can be called is
115 * related to the number of nodes and the number of holes
116 */
117 #ifdef CONFIG_MAX_ACTIVE_REGIONS
118 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
119 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
120 #else
121 #if MAX_NUMNODES >= 32
122 /* If there can be many nodes, allow up to 50 holes per node */
123 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
124 #else
125 /* By default, allow up to 256 distinct regions */
126 #define MAX_ACTIVE_REGIONS 256
127 #endif
128 #endif
129
130 struct node_active_region __initdata early_node_map[MAX_ACTIVE_REGIONS];
131 int __initdata nr_nodemap_entries;
132 unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
133 unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
134#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
135
13e7444b 136#ifdef CONFIG_DEBUG_VM
c6a57e19 137static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 138{
bdc8cb98
DH
139 int ret = 0;
140 unsigned seq;
141 unsigned long pfn = page_to_pfn(page);
c6a57e19 142
bdc8cb98
DH
143 do {
144 seq = zone_span_seqbegin(zone);
145 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
146 ret = 1;
147 else if (pfn < zone->zone_start_pfn)
148 ret = 1;
149 } while (zone_span_seqretry(zone, seq));
150
151 return ret;
c6a57e19
DH
152}
153
154static int page_is_consistent(struct zone *zone, struct page *page)
155{
1da177e4
LT
156#ifdef CONFIG_HOLES_IN_ZONE
157 if (!pfn_valid(page_to_pfn(page)))
c6a57e19 158 return 0;
1da177e4
LT
159#endif
160 if (zone != page_zone(page))
c6a57e19
DH
161 return 0;
162
163 return 1;
164}
165/*
166 * Temporary debugging check for pages not lying within a given zone.
167 */
168static int bad_range(struct zone *zone, struct page *page)
169{
170 if (page_outside_zone_boundaries(zone, page))
1da177e4 171 return 1;
c6a57e19
DH
172 if (!page_is_consistent(zone, page))
173 return 1;
174
1da177e4
LT
175 return 0;
176}
13e7444b
NP
177#else
178static inline int bad_range(struct zone *zone, struct page *page)
179{
180 return 0;
181}
182#endif
183
224abf92 184static void bad_page(struct page *page)
1da177e4 185{
224abf92 186 printk(KERN_EMERG "Bad page state in process '%s'\n"
7365f3d1
HD
187 KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
188 KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
189 KERN_EMERG "Backtrace:\n",
224abf92
NP
190 current->comm, page, (int)(2*sizeof(unsigned long)),
191 (unsigned long)page->flags, page->mapping,
192 page_mapcount(page), page_count(page));
1da177e4 193 dump_stack();
334795ec
HD
194 page->flags &= ~(1 << PG_lru |
195 1 << PG_private |
1da177e4 196 1 << PG_locked |
1da177e4
LT
197 1 << PG_active |
198 1 << PG_dirty |
334795ec
HD
199 1 << PG_reclaim |
200 1 << PG_slab |
1da177e4 201 1 << PG_swapcache |
676165a8
NP
202 1 << PG_writeback |
203 1 << PG_buddy );
1da177e4
LT
204 set_page_count(page, 0);
205 reset_page_mapcount(page);
206 page->mapping = NULL;
9f158333 207 add_taint(TAINT_BAD_PAGE);
1da177e4
LT
208}
209
1da177e4
LT
210/*
211 * Higher-order pages are called "compound pages". They are structured thusly:
212 *
213 * The first PAGE_SIZE page is called the "head page".
214 *
215 * The remaining PAGE_SIZE pages are called "tail pages".
216 *
217 * All pages have PG_compound set. All pages have their ->private pointing at
218 * the head page (even the head page has this).
219 *
41d78ba5
HD
220 * The first tail page's ->lru.next holds the address of the compound page's
221 * put_page() function. Its ->lru.prev holds the order of allocation.
222 * This usage means that zero-order pages may not be compound.
1da177e4 223 */
d98c7a09
HD
224
225static void free_compound_page(struct page *page)
226{
227 __free_pages_ok(page, (unsigned long)page[1].lru.prev);
228}
229
1da177e4
LT
230static void prep_compound_page(struct page *page, unsigned long order)
231{
232 int i;
233 int nr_pages = 1 << order;
234
d98c7a09 235 page[1].lru.next = (void *)free_compound_page; /* set dtor */
41d78ba5 236 page[1].lru.prev = (void *)order;
1da177e4
LT
237 for (i = 0; i < nr_pages; i++) {
238 struct page *p = page + i;
239
5e9dace8 240 __SetPageCompound(p);
4c21e2f2 241 set_page_private(p, (unsigned long)page);
1da177e4
LT
242 }
243}
244
245static void destroy_compound_page(struct page *page, unsigned long order)
246{
247 int i;
248 int nr_pages = 1 << order;
249
41d78ba5 250 if (unlikely((unsigned long)page[1].lru.prev != order))
224abf92 251 bad_page(page);
1da177e4
LT
252
253 for (i = 0; i < nr_pages; i++) {
254 struct page *p = page + i;
255
224abf92
NP
256 if (unlikely(!PageCompound(p) |
257 (page_private(p) != (unsigned long)page)))
258 bad_page(page);
5e9dace8 259 __ClearPageCompound(p);
1da177e4
LT
260 }
261}
1da177e4 262
17cf4406
NP
263static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
264{
265 int i;
266
725d704e 267 VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
6626c5d5
AM
268 /*
269 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
270 * and __GFP_HIGHMEM from hard or soft interrupt context.
271 */
725d704e 272 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
17cf4406
NP
273 for (i = 0; i < (1 << order); i++)
274 clear_highpage(page + i);
275}
276
1da177e4
LT
277/*
278 * function for dealing with page's order in buddy system.
279 * zone->lock is already acquired when we use these.
280 * So, we don't need atomic page->flags operations here.
281 */
6aa3001b
AM
282static inline unsigned long page_order(struct page *page)
283{
4c21e2f2 284 return page_private(page);
1da177e4
LT
285}
286
6aa3001b
AM
287static inline void set_page_order(struct page *page, int order)
288{
4c21e2f2 289 set_page_private(page, order);
676165a8 290 __SetPageBuddy(page);
1da177e4
LT
291}
292
293static inline void rmv_page_order(struct page *page)
294{
676165a8 295 __ClearPageBuddy(page);
4c21e2f2 296 set_page_private(page, 0);
1da177e4
LT
297}
298
299/*
300 * Locate the struct page for both the matching buddy in our
301 * pair (buddy1) and the combined O(n+1) page they form (page).
302 *
303 * 1) Any buddy B1 will have an order O twin B2 which satisfies
304 * the following equation:
305 * B2 = B1 ^ (1 << O)
306 * For example, if the starting buddy (buddy2) is #8 its order
307 * 1 buddy is #10:
308 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
309 *
310 * 2) Any buddy B will have an order O+1 parent P which
311 * satisfies the following equation:
312 * P = B & ~(1 << O)
313 *
d6e05edc 314 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
1da177e4
LT
315 */
316static inline struct page *
317__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
318{
319 unsigned long buddy_idx = page_idx ^ (1 << order);
320
321 return page + (buddy_idx - page_idx);
322}
323
324static inline unsigned long
325__find_combined_index(unsigned long page_idx, unsigned int order)
326{
327 return (page_idx & ~(1 << order));
328}
329
330/*
331 * This function checks whether a page is free && is the buddy
332 * we can do coalesce a page and its buddy if
13e7444b 333 * (a) the buddy is not in a hole &&
676165a8 334 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
335 * (c) a page and its buddy have the same order &&
336 * (d) a page and its buddy are in the same zone.
676165a8
NP
337 *
338 * For recording whether a page is in the buddy system, we use PG_buddy.
339 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
1da177e4 340 *
676165a8 341 * For recording page's order, we use page_private(page).
1da177e4 342 */
cb2b95e1
AW
343static inline int page_is_buddy(struct page *page, struct page *buddy,
344 int order)
1da177e4 345{
13e7444b 346#ifdef CONFIG_HOLES_IN_ZONE
cb2b95e1 347 if (!pfn_valid(page_to_pfn(buddy)))
13e7444b
NP
348 return 0;
349#endif
350
cb2b95e1
AW
351 if (page_zone_id(page) != page_zone_id(buddy))
352 return 0;
353
354 if (PageBuddy(buddy) && page_order(buddy) == order) {
355 BUG_ON(page_count(buddy) != 0);
6aa3001b 356 return 1;
676165a8 357 }
6aa3001b 358 return 0;
1da177e4
LT
359}
360
361/*
362 * Freeing function for a buddy system allocator.
363 *
364 * The concept of a buddy system is to maintain direct-mapped table
365 * (containing bit values) for memory blocks of various "orders".
366 * The bottom level table contains the map for the smallest allocatable
367 * units of memory (here, pages), and each level above it describes
368 * pairs of units from the levels below, hence, "buddies".
369 * At a high level, all that happens here is marking the table entry
370 * at the bottom level available, and propagating the changes upward
371 * as necessary, plus some accounting needed to play nicely with other
372 * parts of the VM system.
373 * At each level, we keep a list of pages, which are heads of continuous
676165a8 374 * free pages of length of (1 << order) and marked with PG_buddy. Page's
4c21e2f2 375 * order is recorded in page_private(page) field.
1da177e4
LT
376 * So when we are allocating or freeing one, we can derive the state of the
377 * other. That is, if we allocate a small block, and both were
378 * free, the remainder of the region must be split into blocks.
379 * If a block is freed, and its buddy is also free, then this
380 * triggers coalescing into a block of larger size.
381 *
382 * -- wli
383 */
384
48db57f8 385static inline void __free_one_page(struct page *page,
1da177e4
LT
386 struct zone *zone, unsigned int order)
387{
388 unsigned long page_idx;
389 int order_size = 1 << order;
390
224abf92 391 if (unlikely(PageCompound(page)))
1da177e4
LT
392 destroy_compound_page(page, order);
393
394 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
395
725d704e
NP
396 VM_BUG_ON(page_idx & (order_size - 1));
397 VM_BUG_ON(bad_range(zone, page));
1da177e4
LT
398
399 zone->free_pages += order_size;
400 while (order < MAX_ORDER-1) {
401 unsigned long combined_idx;
402 struct free_area *area;
403 struct page *buddy;
404
1da177e4 405 buddy = __page_find_buddy(page, page_idx, order);
cb2b95e1 406 if (!page_is_buddy(page, buddy, order))
1da177e4 407 break; /* Move the buddy up one level. */
13e7444b 408
1da177e4
LT
409 list_del(&buddy->lru);
410 area = zone->free_area + order;
411 area->nr_free--;
412 rmv_page_order(buddy);
13e7444b 413 combined_idx = __find_combined_index(page_idx, order);
1da177e4
LT
414 page = page + (combined_idx - page_idx);
415 page_idx = combined_idx;
416 order++;
417 }
418 set_page_order(page, order);
419 list_add(&page->lru, &zone->free_area[order].free_list);
420 zone->free_area[order].nr_free++;
421}
422
224abf92 423static inline int free_pages_check(struct page *page)
1da177e4 424{
92be2e33
NP
425 if (unlikely(page_mapcount(page) |
426 (page->mapping != NULL) |
427 (page_count(page) != 0) |
1da177e4
LT
428 (page->flags & (
429 1 << PG_lru |
430 1 << PG_private |
431 1 << PG_locked |
432 1 << PG_active |
433 1 << PG_reclaim |
434 1 << PG_slab |
435 1 << PG_swapcache |
b5810039 436 1 << PG_writeback |
676165a8
NP
437 1 << PG_reserved |
438 1 << PG_buddy ))))
224abf92 439 bad_page(page);
1da177e4 440 if (PageDirty(page))
242e5468 441 __ClearPageDirty(page);
689bcebf
HD
442 /*
443 * For now, we report if PG_reserved was found set, but do not
444 * clear it, and do not free the page. But we shall soon need
445 * to do more, for when the ZERO_PAGE count wraps negative.
446 */
447 return PageReserved(page);
1da177e4
LT
448}
449
450/*
451 * Frees a list of pages.
452 * Assumes all pages on list are in same zone, and of same order.
207f36ee 453 * count is the number of pages to free.
1da177e4
LT
454 *
455 * If the zone was previously in an "all pages pinned" state then look to
456 * see if this freeing clears that state.
457 *
458 * And clear the zone's pages_scanned counter, to hold off the "all pages are
459 * pinned" detection logic.
460 */
48db57f8
NP
461static void free_pages_bulk(struct zone *zone, int count,
462 struct list_head *list, int order)
1da177e4 463{
c54ad30c 464 spin_lock(&zone->lock);
1da177e4
LT
465 zone->all_unreclaimable = 0;
466 zone->pages_scanned = 0;
48db57f8
NP
467 while (count--) {
468 struct page *page;
469
725d704e 470 VM_BUG_ON(list_empty(list));
1da177e4 471 page = list_entry(list->prev, struct page, lru);
48db57f8 472 /* have to delete it as __free_one_page list manipulates */
1da177e4 473 list_del(&page->lru);
48db57f8 474 __free_one_page(page, zone, order);
1da177e4 475 }
c54ad30c 476 spin_unlock(&zone->lock);
1da177e4
LT
477}
478
48db57f8 479static void free_one_page(struct zone *zone, struct page *page, int order)
1da177e4 480{
006d22d9
CL
481 spin_lock(&zone->lock);
482 zone->all_unreclaimable = 0;
483 zone->pages_scanned = 0;
484 __free_one_page(page, zone ,order);
485 spin_unlock(&zone->lock);
48db57f8
NP
486}
487
488static void __free_pages_ok(struct page *page, unsigned int order)
489{
490 unsigned long flags;
1da177e4 491 int i;
689bcebf 492 int reserved = 0;
1da177e4
LT
493
494 arch_free_page(page, order);
de5097c2 495 if (!PageHighMem(page))
f9b8404c
IM
496 debug_check_no_locks_freed(page_address(page),
497 PAGE_SIZE<<order);
1da177e4 498
1da177e4 499 for (i = 0 ; i < (1 << order) ; ++i)
224abf92 500 reserved += free_pages_check(page + i);
689bcebf
HD
501 if (reserved)
502 return;
503
48db57f8 504 kernel_map_pages(page, 1 << order, 0);
c54ad30c 505 local_irq_save(flags);
f8891e5e 506 __count_vm_events(PGFREE, 1 << order);
48db57f8 507 free_one_page(page_zone(page), page, order);
c54ad30c 508 local_irq_restore(flags);
1da177e4
LT
509}
510
a226f6c8
DH
511/*
512 * permit the bootmem allocator to evade page validation on high-order frees
513 */
514void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
515{
516 if (order == 0) {
517 __ClearPageReserved(page);
518 set_page_count(page, 0);
7835e98b 519 set_page_refcounted(page);
545b1ea9 520 __free_page(page);
a226f6c8 521 } else {
a226f6c8
DH
522 int loop;
523
545b1ea9 524 prefetchw(page);
a226f6c8
DH
525 for (loop = 0; loop < BITS_PER_LONG; loop++) {
526 struct page *p = &page[loop];
527
545b1ea9
NP
528 if (loop + 1 < BITS_PER_LONG)
529 prefetchw(p + 1);
a226f6c8
DH
530 __ClearPageReserved(p);
531 set_page_count(p, 0);
532 }
533
7835e98b 534 set_page_refcounted(page);
545b1ea9 535 __free_pages(page, order);
a226f6c8
DH
536 }
537}
538
1da177e4
LT
539
540/*
541 * The order of subdivision here is critical for the IO subsystem.
542 * Please do not alter this order without good reasons and regression
543 * testing. Specifically, as large blocks of memory are subdivided,
544 * the order in which smaller blocks are delivered depends on the order
545 * they're subdivided in this function. This is the primary factor
546 * influencing the order in which pages are delivered to the IO
547 * subsystem according to empirical testing, and this is also justified
548 * by considering the behavior of a buddy system containing a single
549 * large block of memory acted on by a series of small allocations.
550 * This behavior is a critical factor in sglist merging's success.
551 *
552 * -- wli
553 */
085cc7d5 554static inline void expand(struct zone *zone, struct page *page,
1da177e4
LT
555 int low, int high, struct free_area *area)
556{
557 unsigned long size = 1 << high;
558
559 while (high > low) {
560 area--;
561 high--;
562 size >>= 1;
725d704e 563 VM_BUG_ON(bad_range(zone, &page[size]));
1da177e4
LT
564 list_add(&page[size].lru, &area->free_list);
565 area->nr_free++;
566 set_page_order(&page[size], high);
567 }
1da177e4
LT
568}
569
1da177e4
LT
570/*
571 * This page is about to be returned from the page allocator
572 */
17cf4406 573static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
1da177e4 574{
92be2e33
NP
575 if (unlikely(page_mapcount(page) |
576 (page->mapping != NULL) |
577 (page_count(page) != 0) |
334795ec
HD
578 (page->flags & (
579 1 << PG_lru |
1da177e4
LT
580 1 << PG_private |
581 1 << PG_locked |
1da177e4
LT
582 1 << PG_active |
583 1 << PG_dirty |
584 1 << PG_reclaim |
334795ec 585 1 << PG_slab |
1da177e4 586 1 << PG_swapcache |
b5810039 587 1 << PG_writeback |
676165a8
NP
588 1 << PG_reserved |
589 1 << PG_buddy ))))
224abf92 590 bad_page(page);
1da177e4 591
689bcebf
HD
592 /*
593 * For now, we report if PG_reserved was found set, but do not
594 * clear it, and do not allocate the page: as a safety net.
595 */
596 if (PageReserved(page))
597 return 1;
598
1da177e4
LT
599 page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
600 1 << PG_referenced | 1 << PG_arch_1 |
601 1 << PG_checked | 1 << PG_mappedtodisk);
4c21e2f2 602 set_page_private(page, 0);
7835e98b 603 set_page_refcounted(page);
1da177e4 604 kernel_map_pages(page, 1 << order, 1);
17cf4406
NP
605
606 if (gfp_flags & __GFP_ZERO)
607 prep_zero_page(page, order, gfp_flags);
608
609 if (order && (gfp_flags & __GFP_COMP))
610 prep_compound_page(page, order);
611
689bcebf 612 return 0;
1da177e4
LT
613}
614
615/*
616 * Do the hard work of removing an element from the buddy allocator.
617 * Call me with the zone->lock already held.
618 */
619static struct page *__rmqueue(struct zone *zone, unsigned int order)
620{
621 struct free_area * area;
622 unsigned int current_order;
623 struct page *page;
624
625 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
626 area = zone->free_area + current_order;
627 if (list_empty(&area->free_list))
628 continue;
629
630 page = list_entry(area->free_list.next, struct page, lru);
631 list_del(&page->lru);
632 rmv_page_order(page);
633 area->nr_free--;
634 zone->free_pages -= 1UL << order;
085cc7d5
NP
635 expand(zone, page, order, current_order, area);
636 return page;
1da177e4
LT
637 }
638
639 return NULL;
640}
641
642/*
643 * Obtain a specified number of elements from the buddy allocator, all under
644 * a single hold of the lock, for efficiency. Add them to the supplied list.
645 * Returns the number of new pages which were placed at *list.
646 */
647static int rmqueue_bulk(struct zone *zone, unsigned int order,
648 unsigned long count, struct list_head *list)
649{
1da177e4 650 int i;
1da177e4 651
c54ad30c 652 spin_lock(&zone->lock);
1da177e4 653 for (i = 0; i < count; ++i) {
085cc7d5
NP
654 struct page *page = __rmqueue(zone, order);
655 if (unlikely(page == NULL))
1da177e4 656 break;
1da177e4
LT
657 list_add_tail(&page->lru, list);
658 }
c54ad30c 659 spin_unlock(&zone->lock);
085cc7d5 660 return i;
1da177e4
LT
661}
662
4ae7c039 663#ifdef CONFIG_NUMA
8fce4d8e
CL
664/*
665 * Called from the slab reaper to drain pagesets on a particular node that
39bbcb8f 666 * belongs to the currently executing processor.
879336c3
CL
667 * Note that this function must be called with the thread pinned to
668 * a single processor.
8fce4d8e
CL
669 */
670void drain_node_pages(int nodeid)
4ae7c039 671{
2f6726e5
CL
672 int i;
673 enum zone_type z;
4ae7c039
CL
674 unsigned long flags;
675
8fce4d8e
CL
676 for (z = 0; z < MAX_NR_ZONES; z++) {
677 struct zone *zone = NODE_DATA(nodeid)->node_zones + z;
4ae7c039
CL
678 struct per_cpu_pageset *pset;
679
39bbcb8f
CL
680 if (!populated_zone(zone))
681 continue;
682
23316bc8 683 pset = zone_pcp(zone, smp_processor_id());
4ae7c039
CL
684 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
685 struct per_cpu_pages *pcp;
686
687 pcp = &pset->pcp[i];
879336c3
CL
688 if (pcp->count) {
689 local_irq_save(flags);
690 free_pages_bulk(zone, pcp->count, &pcp->list, 0);
691 pcp->count = 0;
692 local_irq_restore(flags);
693 }
4ae7c039
CL
694 }
695 }
4ae7c039
CL
696}
697#endif
698
1da177e4
LT
699#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
700static void __drain_pages(unsigned int cpu)
701{
c54ad30c 702 unsigned long flags;
1da177e4
LT
703 struct zone *zone;
704 int i;
705
706 for_each_zone(zone) {
707 struct per_cpu_pageset *pset;
708
e7c8d5c9 709 pset = zone_pcp(zone, cpu);
1da177e4
LT
710 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
711 struct per_cpu_pages *pcp;
712
713 pcp = &pset->pcp[i];
c54ad30c 714 local_irq_save(flags);
48db57f8
NP
715 free_pages_bulk(zone, pcp->count, &pcp->list, 0);
716 pcp->count = 0;
c54ad30c 717 local_irq_restore(flags);
1da177e4
LT
718 }
719 }
720}
721#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
722
723#ifdef CONFIG_PM
724
725void mark_free_pages(struct zone *zone)
726{
f623f0db
RW
727 unsigned long pfn, max_zone_pfn;
728 unsigned long flags;
1da177e4
LT
729 int order;
730 struct list_head *curr;
731
732 if (!zone->spanned_pages)
733 return;
734
735 spin_lock_irqsave(&zone->lock, flags);
f623f0db
RW
736
737 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
738 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
739 if (pfn_valid(pfn)) {
740 struct page *page = pfn_to_page(pfn);
741
742 if (!PageNosave(page))
743 ClearPageNosaveFree(page);
744 }
1da177e4
LT
745
746 for (order = MAX_ORDER - 1; order >= 0; --order)
747 list_for_each(curr, &zone->free_area[order].free_list) {
f623f0db 748 unsigned long i;
1da177e4 749
f623f0db
RW
750 pfn = page_to_pfn(list_entry(curr, struct page, lru));
751 for (i = 0; i < (1UL << order); i++)
752 SetPageNosaveFree(pfn_to_page(pfn + i));
753 }
1da177e4 754
1da177e4
LT
755 spin_unlock_irqrestore(&zone->lock, flags);
756}
757
758/*
759 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
760 */
761void drain_local_pages(void)
762{
763 unsigned long flags;
764
765 local_irq_save(flags);
766 __drain_pages(smp_processor_id());
767 local_irq_restore(flags);
768}
769#endif /* CONFIG_PM */
770
1da177e4
LT
771/*
772 * Free a 0-order page
773 */
1da177e4
LT
774static void fastcall free_hot_cold_page(struct page *page, int cold)
775{
776 struct zone *zone = page_zone(page);
777 struct per_cpu_pages *pcp;
778 unsigned long flags;
779
780 arch_free_page(page, 0);
781
1da177e4
LT
782 if (PageAnon(page))
783 page->mapping = NULL;
224abf92 784 if (free_pages_check(page))
689bcebf
HD
785 return;
786
689bcebf
HD
787 kernel_map_pages(page, 1, 0);
788
e7c8d5c9 789 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
1da177e4 790 local_irq_save(flags);
f8891e5e 791 __count_vm_event(PGFREE);
1da177e4
LT
792 list_add(&page->lru, &pcp->list);
793 pcp->count++;
48db57f8
NP
794 if (pcp->count >= pcp->high) {
795 free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
796 pcp->count -= pcp->batch;
797 }
1da177e4
LT
798 local_irq_restore(flags);
799 put_cpu();
800}
801
802void fastcall free_hot_page(struct page *page)
803{
804 free_hot_cold_page(page, 0);
805}
806
807void fastcall free_cold_page(struct page *page)
808{
809 free_hot_cold_page(page, 1);
810}
811
8dfcc9ba
NP
812/*
813 * split_page takes a non-compound higher-order page, and splits it into
814 * n (1<<order) sub-pages: page[0..n]
815 * Each sub-page must be freed individually.
816 *
817 * Note: this is probably too low level an operation for use in drivers.
818 * Please consult with lkml before using this in your driver.
819 */
820void split_page(struct page *page, unsigned int order)
821{
822 int i;
823
725d704e
NP
824 VM_BUG_ON(PageCompound(page));
825 VM_BUG_ON(!page_count(page));
7835e98b
NP
826 for (i = 1; i < (1 << order); i++)
827 set_page_refcounted(page + i);
8dfcc9ba 828}
8dfcc9ba 829
1da177e4
LT
830/*
831 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
832 * we cheat by calling it from here, in the order > 0 path. Saves a branch
833 * or two.
834 */
a74609fa
NP
835static struct page *buffered_rmqueue(struct zonelist *zonelist,
836 struct zone *zone, int order, gfp_t gfp_flags)
1da177e4
LT
837{
838 unsigned long flags;
689bcebf 839 struct page *page;
1da177e4 840 int cold = !!(gfp_flags & __GFP_COLD);
a74609fa 841 int cpu;
1da177e4 842
689bcebf 843again:
a74609fa 844 cpu = get_cpu();
48db57f8 845 if (likely(order == 0)) {
1da177e4
LT
846 struct per_cpu_pages *pcp;
847
a74609fa 848 pcp = &zone_pcp(zone, cpu)->pcp[cold];
1da177e4 849 local_irq_save(flags);
a74609fa 850 if (!pcp->count) {
1da177e4
LT
851 pcp->count += rmqueue_bulk(zone, 0,
852 pcp->batch, &pcp->list);
a74609fa
NP
853 if (unlikely(!pcp->count))
854 goto failed;
1da177e4 855 }
a74609fa
NP
856 page = list_entry(pcp->list.next, struct page, lru);
857 list_del(&page->lru);
858 pcp->count--;
7fb1d9fc 859 } else {
1da177e4
LT
860 spin_lock_irqsave(&zone->lock, flags);
861 page = __rmqueue(zone, order);
a74609fa
NP
862 spin_unlock(&zone->lock);
863 if (!page)
864 goto failed;
1da177e4
LT
865 }
866
f8891e5e 867 __count_zone_vm_events(PGALLOC, zone, 1 << order);
ca889e6c 868 zone_statistics(zonelist, zone);
a74609fa
NP
869 local_irq_restore(flags);
870 put_cpu();
1da177e4 871
725d704e 872 VM_BUG_ON(bad_range(zone, page));
17cf4406 873 if (prep_new_page(page, order, gfp_flags))
a74609fa 874 goto again;
1da177e4 875 return page;
a74609fa
NP
876
877failed:
878 local_irq_restore(flags);
879 put_cpu();
880 return NULL;
1da177e4
LT
881}
882
7fb1d9fc 883#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
3148890b
NP
884#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
885#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
886#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
887#define ALLOC_HARDER 0x10 /* try to alloc harder */
888#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
889#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
7fb1d9fc 890
1da177e4
LT
891/*
892 * Return 1 if free pages are above 'mark'. This takes into account the order
893 * of the allocation.
894 */
895int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
7fb1d9fc 896 int classzone_idx, int alloc_flags)
1da177e4
LT
897{
898 /* free_pages my go negative - that's OK */
899 long min = mark, free_pages = z->free_pages - (1 << order) + 1;
900 int o;
901
7fb1d9fc 902 if (alloc_flags & ALLOC_HIGH)
1da177e4 903 min -= min / 2;
7fb1d9fc 904 if (alloc_flags & ALLOC_HARDER)
1da177e4
LT
905 min -= min / 4;
906
907 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
908 return 0;
909 for (o = 0; o < order; o++) {
910 /* At the next order, this order's pages become unavailable */
911 free_pages -= z->free_area[o].nr_free << o;
912
913 /* Require fewer higher order pages to be free */
914 min >>= 1;
915
916 if (free_pages <= min)
917 return 0;
918 }
919 return 1;
920}
921
7fb1d9fc
RS
922/*
923 * get_page_from_freeliest goes through the zonelist trying to allocate
924 * a page.
925 */
926static struct page *
927get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
928 struct zonelist *zonelist, int alloc_flags)
753ee728 929{
7fb1d9fc
RS
930 struct zone **z = zonelist->zones;
931 struct page *page = NULL;
932 int classzone_idx = zone_idx(*z);
1192d526 933 struct zone *zone;
7fb1d9fc
RS
934
935 /*
936 * Go through the zonelist once, looking for a zone with enough free.
937 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
938 */
939 do {
1192d526 940 zone = *z;
9b819d20 941 if (unlikely((gfp_mask & __GFP_THISNODE) &&
1192d526 942 zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
9b819d20 943 break;
7fb1d9fc 944 if ((alloc_flags & ALLOC_CPUSET) &&
1192d526 945 !cpuset_zone_allowed(zone, gfp_mask))
7fb1d9fc
RS
946 continue;
947
948 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
3148890b
NP
949 unsigned long mark;
950 if (alloc_flags & ALLOC_WMARK_MIN)
1192d526 951 mark = zone->pages_min;
3148890b 952 else if (alloc_flags & ALLOC_WMARK_LOW)
1192d526 953 mark = zone->pages_low;
3148890b 954 else
1192d526
CL
955 mark = zone->pages_high;
956 if (!zone_watermark_ok(zone , order, mark,
7fb1d9fc 957 classzone_idx, alloc_flags))
9eeff239 958 if (!zone_reclaim_mode ||
1192d526 959 !zone_reclaim(zone, gfp_mask, order))
9eeff239 960 continue;
7fb1d9fc
RS
961 }
962
1192d526 963 page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
7fb1d9fc 964 if (page) {
7fb1d9fc
RS
965 break;
966 }
967 } while (*(++z) != NULL);
968 return page;
753ee728
MH
969}
970
1da177e4
LT
971/*
972 * This is the 'heart' of the zoned buddy allocator.
973 */
974struct page * fastcall
dd0fc66f 975__alloc_pages(gfp_t gfp_mask, unsigned int order,
1da177e4
LT
976 struct zonelist *zonelist)
977{
260b2367 978 const gfp_t wait = gfp_mask & __GFP_WAIT;
7fb1d9fc 979 struct zone **z;
1da177e4
LT
980 struct page *page;
981 struct reclaim_state reclaim_state;
982 struct task_struct *p = current;
1da177e4 983 int do_retry;
7fb1d9fc 984 int alloc_flags;
1da177e4
LT
985 int did_some_progress;
986
987 might_sleep_if(wait);
988
6b1de916 989restart:
7fb1d9fc 990 z = zonelist->zones; /* the list of zones suitable for gfp_mask */
1da177e4 991
7fb1d9fc 992 if (unlikely(*z == NULL)) {
1da177e4
LT
993 /* Should this ever happen?? */
994 return NULL;
995 }
6b1de916 996
7fb1d9fc 997 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
3148890b 998 zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
7fb1d9fc
RS
999 if (page)
1000 goto got_pg;
1da177e4 1001
6b1de916 1002 do {
43b0bc00 1003 wakeup_kswapd(*z, order);
6b1de916 1004 } while (*(++z));
1da177e4 1005
9bf2229f 1006 /*
7fb1d9fc
RS
1007 * OK, we're below the kswapd watermark and have kicked background
1008 * reclaim. Now things get more complex, so set up alloc_flags according
1009 * to how we want to proceed.
1010 *
1011 * The caller may dip into page reserves a bit more if the caller
1012 * cannot run direct reclaim, or if the caller has realtime scheduling
4eac915d
PJ
1013 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1014 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
9bf2229f 1015 */
3148890b 1016 alloc_flags = ALLOC_WMARK_MIN;
7fb1d9fc
RS
1017 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1018 alloc_flags |= ALLOC_HARDER;
1019 if (gfp_mask & __GFP_HIGH)
1020 alloc_flags |= ALLOC_HIGH;
bdd804f4
PJ
1021 if (wait)
1022 alloc_flags |= ALLOC_CPUSET;
1da177e4
LT
1023
1024 /*
1025 * Go through the zonelist again. Let __GFP_HIGH and allocations
7fb1d9fc 1026 * coming from realtime tasks go deeper into reserves.
1da177e4
LT
1027 *
1028 * This is the last chance, in general, before the goto nopage.
1029 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
9bf2229f 1030 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1da177e4 1031 */
7fb1d9fc
RS
1032 page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
1033 if (page)
1034 goto got_pg;
1da177e4
LT
1035
1036 /* This allocation should allow future memory freeing. */
b84a35be
NP
1037
1038 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
1039 && !in_interrupt()) {
1040 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
885036d3 1041nofail_alloc:
b84a35be 1042 /* go through the zonelist yet again, ignoring mins */
7fb1d9fc 1043 page = get_page_from_freelist(gfp_mask, order,
47f3a867 1044 zonelist, ALLOC_NO_WATERMARKS);
7fb1d9fc
RS
1045 if (page)
1046 goto got_pg;
885036d3
KK
1047 if (gfp_mask & __GFP_NOFAIL) {
1048 blk_congestion_wait(WRITE, HZ/50);
1049 goto nofail_alloc;
1050 }
1da177e4
LT
1051 }
1052 goto nopage;
1053 }
1054
1055 /* Atomic allocations - we can't balance anything */
1056 if (!wait)
1057 goto nopage;
1058
1059rebalance:
1060 cond_resched();
1061
1062 /* We now go into synchronous reclaim */
3e0d98b9 1063 cpuset_memory_pressure_bump();
1da177e4
LT
1064 p->flags |= PF_MEMALLOC;
1065 reclaim_state.reclaimed_slab = 0;
1066 p->reclaim_state = &reclaim_state;
1067
7fb1d9fc 1068 did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
1da177e4
LT
1069
1070 p->reclaim_state = NULL;
1071 p->flags &= ~PF_MEMALLOC;
1072
1073 cond_resched();
1074
1075 if (likely(did_some_progress)) {
7fb1d9fc
RS
1076 page = get_page_from_freelist(gfp_mask, order,
1077 zonelist, alloc_flags);
1078 if (page)
1079 goto got_pg;
1da177e4
LT
1080 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1081 /*
1082 * Go through the zonelist yet one more time, keep
1083 * very high watermark here, this is only to catch
1084 * a parallel oom killing, we must fail if we're still
1085 * under heavy pressure.
1086 */
7fb1d9fc 1087 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
3148890b 1088 zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
7fb1d9fc
RS
1089 if (page)
1090 goto got_pg;
1da177e4 1091
9b0f8b04 1092 out_of_memory(zonelist, gfp_mask, order);
1da177e4
LT
1093 goto restart;
1094 }
1095
1096 /*
1097 * Don't let big-order allocations loop unless the caller explicitly
1098 * requests that. Wait for some write requests to complete then retry.
1099 *
1100 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
1101 * <= 3, but that may not be true in other implementations.
1102 */
1103 do_retry = 0;
1104 if (!(gfp_mask & __GFP_NORETRY)) {
1105 if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
1106 do_retry = 1;
1107 if (gfp_mask & __GFP_NOFAIL)
1108 do_retry = 1;
1109 }
1110 if (do_retry) {
1111 blk_congestion_wait(WRITE, HZ/50);
1112 goto rebalance;
1113 }
1114
1115nopage:
1116 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1117 printk(KERN_WARNING "%s: page allocation failure."
1118 " order:%d, mode:0x%x\n",
1119 p->comm, order, gfp_mask);
1120 dump_stack();
578c2fd6 1121 show_mem();
1da177e4 1122 }
1da177e4 1123got_pg:
1da177e4
LT
1124 return page;
1125}
1126
1127EXPORT_SYMBOL(__alloc_pages);
1128
1129/*
1130 * Common helper functions.
1131 */
dd0fc66f 1132fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4
LT
1133{
1134 struct page * page;
1135 page = alloc_pages(gfp_mask, order);
1136 if (!page)
1137 return 0;
1138 return (unsigned long) page_address(page);
1139}
1140
1141EXPORT_SYMBOL(__get_free_pages);
1142
dd0fc66f 1143fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4
LT
1144{
1145 struct page * page;
1146
1147 /*
1148 * get_zeroed_page() returns a 32-bit address, which cannot represent
1149 * a highmem page
1150 */
725d704e 1151 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1da177e4
LT
1152
1153 page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1154 if (page)
1155 return (unsigned long) page_address(page);
1156 return 0;
1157}
1158
1159EXPORT_SYMBOL(get_zeroed_page);
1160
1161void __pagevec_free(struct pagevec *pvec)
1162{
1163 int i = pagevec_count(pvec);
1164
1165 while (--i >= 0)
1166 free_hot_cold_page(pvec->pages[i], pvec->cold);
1167}
1168
1169fastcall void __free_pages(struct page *page, unsigned int order)
1170{
b5810039 1171 if (put_page_testzero(page)) {
1da177e4
LT
1172 if (order == 0)
1173 free_hot_page(page);
1174 else
1175 __free_pages_ok(page, order);
1176 }
1177}
1178
1179EXPORT_SYMBOL(__free_pages);
1180
1181fastcall void free_pages(unsigned long addr, unsigned int order)
1182{
1183 if (addr != 0) {
725d704e 1184 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
1185 __free_pages(virt_to_page((void *)addr), order);
1186 }
1187}
1188
1189EXPORT_SYMBOL(free_pages);
1190
1191/*
1192 * Total amount of free (allocatable) RAM:
1193 */
1194unsigned int nr_free_pages(void)
1195{
1196 unsigned int sum = 0;
1197 struct zone *zone;
1198
1199 for_each_zone(zone)
1200 sum += zone->free_pages;
1201
1202 return sum;
1203}
1204
1205EXPORT_SYMBOL(nr_free_pages);
1206
1207#ifdef CONFIG_NUMA
1208unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
1209{
2f6726e5
CL
1210 unsigned int sum = 0;
1211 enum zone_type i;
1da177e4
LT
1212
1213 for (i = 0; i < MAX_NR_ZONES; i++)
1214 sum += pgdat->node_zones[i].free_pages;
1215
1216 return sum;
1217}
1218#endif
1219
1220static unsigned int nr_free_zone_pages(int offset)
1221{
e310fd43
MB
1222 /* Just pick one node, since fallback list is circular */
1223 pg_data_t *pgdat = NODE_DATA(numa_node_id());
1da177e4
LT
1224 unsigned int sum = 0;
1225
e310fd43
MB
1226 struct zonelist *zonelist = pgdat->node_zonelists + offset;
1227 struct zone **zonep = zonelist->zones;
1228 struct zone *zone;
1da177e4 1229
e310fd43
MB
1230 for (zone = *zonep++; zone; zone = *zonep++) {
1231 unsigned long size = zone->present_pages;
1232 unsigned long high = zone->pages_high;
1233 if (size > high)
1234 sum += size - high;
1da177e4
LT
1235 }
1236
1237 return sum;
1238}
1239
1240/*
1241 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1242 */
1243unsigned int nr_free_buffer_pages(void)
1244{
af4ca457 1245 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4
LT
1246}
1247
1248/*
1249 * Amount of free RAM allocatable within all zones
1250 */
1251unsigned int nr_free_pagecache_pages(void)
1252{
af4ca457 1253 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
1da177e4 1254}
1da177e4
LT
1255#ifdef CONFIG_NUMA
1256static void show_node(struct zone *zone)
1257{
89fa3024 1258 printk("Node %ld ", zone_to_nid(zone));
1da177e4
LT
1259}
1260#else
1261#define show_node(zone) do { } while (0)
1262#endif
1263
1da177e4
LT
1264void si_meminfo(struct sysinfo *val)
1265{
1266 val->totalram = totalram_pages;
1267 val->sharedram = 0;
1268 val->freeram = nr_free_pages();
1269 val->bufferram = nr_blockdev_pages();
1da177e4
LT
1270 val->totalhigh = totalhigh_pages;
1271 val->freehigh = nr_free_highpages();
1da177e4
LT
1272 val->mem_unit = PAGE_SIZE;
1273}
1274
1275EXPORT_SYMBOL(si_meminfo);
1276
1277#ifdef CONFIG_NUMA
1278void si_meminfo_node(struct sysinfo *val, int nid)
1279{
1280 pg_data_t *pgdat = NODE_DATA(nid);
1281
1282 val->totalram = pgdat->node_present_pages;
1283 val->freeram = nr_free_pages_pgdat(pgdat);
98d2b0eb 1284#ifdef CONFIG_HIGHMEM
1da177e4
LT
1285 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1286 val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
98d2b0eb
CL
1287#else
1288 val->totalhigh = 0;
1289 val->freehigh = 0;
1290#endif
1da177e4
LT
1291 val->mem_unit = PAGE_SIZE;
1292}
1293#endif
1294
1295#define K(x) ((x) << (PAGE_SHIFT-10))
1296
1297/*
1298 * Show free area list (used inside shift_scroll-lock stuff)
1299 * We also calculate the percentage fragmentation. We do this by counting the
1300 * memory on each free list with the exception of the first item on the list.
1301 */
1302void show_free_areas(void)
1303{
1da177e4
LT
1304 int cpu, temperature;
1305 unsigned long active;
1306 unsigned long inactive;
1307 unsigned long free;
1308 struct zone *zone;
1309
1310 for_each_zone(zone) {
1311 show_node(zone);
1312 printk("%s per-cpu:", zone->name);
1313
f3fe6512 1314 if (!populated_zone(zone)) {
1da177e4
LT
1315 printk(" empty\n");
1316 continue;
1317 } else
1318 printk("\n");
1319
6b482c67 1320 for_each_online_cpu(cpu) {
1da177e4
LT
1321 struct per_cpu_pageset *pageset;
1322
e7c8d5c9 1323 pageset = zone_pcp(zone, cpu);
1da177e4
LT
1324
1325 for (temperature = 0; temperature < 2; temperature++)
2d92c5c9 1326 printk("cpu %d %s: high %d, batch %d used:%d\n",
1da177e4
LT
1327 cpu,
1328 temperature ? "cold" : "hot",
1da177e4 1329 pageset->pcp[temperature].high,
4ae7c039
CL
1330 pageset->pcp[temperature].batch,
1331 pageset->pcp[temperature].count);
1da177e4
LT
1332 }
1333 }
1334
1da177e4
LT
1335 get_zone_counts(&active, &inactive, &free);
1336
1da177e4
LT
1337 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
1338 "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
1339 active,
1340 inactive,
b1e7a8fd 1341 global_page_state(NR_FILE_DIRTY),
ce866b34 1342 global_page_state(NR_WRITEBACK),
fd39fc85 1343 global_page_state(NR_UNSTABLE_NFS),
1da177e4 1344 nr_free_pages(),
972d1a7b
CL
1345 global_page_state(NR_SLAB_RECLAIMABLE) +
1346 global_page_state(NR_SLAB_UNRECLAIMABLE),
65ba55f5 1347 global_page_state(NR_FILE_MAPPED),
df849a15 1348 global_page_state(NR_PAGETABLE));
1da177e4
LT
1349
1350 for_each_zone(zone) {
1351 int i;
1352
1353 show_node(zone);
1354 printk("%s"
1355 " free:%lukB"
1356 " min:%lukB"
1357 " low:%lukB"
1358 " high:%lukB"
1359 " active:%lukB"
1360 " inactive:%lukB"
1361 " present:%lukB"
1362 " pages_scanned:%lu"
1363 " all_unreclaimable? %s"
1364 "\n",
1365 zone->name,
1366 K(zone->free_pages),
1367 K(zone->pages_min),
1368 K(zone->pages_low),
1369 K(zone->pages_high),
1370 K(zone->nr_active),
1371 K(zone->nr_inactive),
1372 K(zone->present_pages),
1373 zone->pages_scanned,
1374 (zone->all_unreclaimable ? "yes" : "no")
1375 );
1376 printk("lowmem_reserve[]:");
1377 for (i = 0; i < MAX_NR_ZONES; i++)
1378 printk(" %lu", zone->lowmem_reserve[i]);
1379 printk("\n");
1380 }
1381
1382 for_each_zone(zone) {
8f9de51a 1383 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1da177e4
LT
1384
1385 show_node(zone);
1386 printk("%s: ", zone->name);
f3fe6512 1387 if (!populated_zone(zone)) {
1da177e4
LT
1388 printk("empty\n");
1389 continue;
1390 }
1391
1392 spin_lock_irqsave(&zone->lock, flags);
1393 for (order = 0; order < MAX_ORDER; order++) {
8f9de51a
KK
1394 nr[order] = zone->free_area[order].nr_free;
1395 total += nr[order] << order;
1da177e4
LT
1396 }
1397 spin_unlock_irqrestore(&zone->lock, flags);
8f9de51a
KK
1398 for (order = 0; order < MAX_ORDER; order++)
1399 printk("%lu*%lukB ", nr[order], K(1UL) << order);
1da177e4
LT
1400 printk("= %lukB\n", K(total));
1401 }
1402
1403 show_swap_cache_info();
1404}
1405
1406/*
1407 * Builds allocation fallback zone lists.
1a93205b
CL
1408 *
1409 * Add all populated zones of a node to the zonelist.
1da177e4 1410 */
86356ab1 1411static int __meminit build_zonelists_node(pg_data_t *pgdat,
2f6726e5 1412 struct zonelist *zonelist, int nr_zones, enum zone_type zone_type)
1da177e4 1413{
1a93205b
CL
1414 struct zone *zone;
1415
98d2b0eb 1416 BUG_ON(zone_type >= MAX_NR_ZONES);
2f6726e5 1417 zone_type++;
02a68a5e
CL
1418
1419 do {
2f6726e5 1420 zone_type--;
070f8032 1421 zone = pgdat->node_zones + zone_type;
1a93205b 1422 if (populated_zone(zone)) {
070f8032
CL
1423 zonelist->zones[nr_zones++] = zone;
1424 check_highest_zone(zone_type);
1da177e4 1425 }
02a68a5e 1426
2f6726e5 1427 } while (zone_type);
070f8032 1428 return nr_zones;
1da177e4
LT
1429}
1430
1431#ifdef CONFIG_NUMA
1432#define MAX_NODE_LOAD (num_online_nodes())
86356ab1 1433static int __meminitdata node_load[MAX_NUMNODES];
1da177e4 1434/**
4dc3b16b 1435 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
1436 * @node: node whose fallback list we're appending
1437 * @used_node_mask: nodemask_t of already used nodes
1438 *
1439 * We use a number of factors to determine which is the next node that should
1440 * appear on a given node's fallback list. The node should not have appeared
1441 * already in @node's fallback list, and it should be the next closest node
1442 * according to the distance array (which contains arbitrary distance values
1443 * from each node to each node in the system), and should also prefer nodes
1444 * with no CPUs, since presumably they'll have very little allocation pressure
1445 * on them otherwise.
1446 * It returns -1 if no node is found.
1447 */
86356ab1 1448static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 1449{
4cf808eb 1450 int n, val;
1da177e4
LT
1451 int min_val = INT_MAX;
1452 int best_node = -1;
1453
4cf808eb
LT
1454 /* Use the local node if we haven't already */
1455 if (!node_isset(node, *used_node_mask)) {
1456 node_set(node, *used_node_mask);
1457 return node;
1458 }
1da177e4 1459
4cf808eb
LT
1460 for_each_online_node(n) {
1461 cpumask_t tmp;
1da177e4
LT
1462
1463 /* Don't want a node to appear more than once */
1464 if (node_isset(n, *used_node_mask))
1465 continue;
1466
1da177e4
LT
1467 /* Use the distance array to find the distance */
1468 val = node_distance(node, n);
1469
4cf808eb
LT
1470 /* Penalize nodes under us ("prefer the next node") */
1471 val += (n < node);
1472
1da177e4
LT
1473 /* Give preference to headless and unused nodes */
1474 tmp = node_to_cpumask(n);
1475 if (!cpus_empty(tmp))
1476 val += PENALTY_FOR_NODE_WITH_CPUS;
1477
1478 /* Slight preference for less loaded node */
1479 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
1480 val += node_load[n];
1481
1482 if (val < min_val) {
1483 min_val = val;
1484 best_node = n;
1485 }
1486 }
1487
1488 if (best_node >= 0)
1489 node_set(best_node, *used_node_mask);
1490
1491 return best_node;
1492}
1493
86356ab1 1494static void __meminit build_zonelists(pg_data_t *pgdat)
1da177e4 1495{
19655d34
CL
1496 int j, node, local_node;
1497 enum zone_type i;
1da177e4
LT
1498 int prev_node, load;
1499 struct zonelist *zonelist;
1500 nodemask_t used_mask;
1501
1502 /* initialize zonelists */
19655d34 1503 for (i = 0; i < MAX_NR_ZONES; i++) {
1da177e4
LT
1504 zonelist = pgdat->node_zonelists + i;
1505 zonelist->zones[0] = NULL;
1506 }
1507
1508 /* NUMA-aware ordering of nodes */
1509 local_node = pgdat->node_id;
1510 load = num_online_nodes();
1511 prev_node = local_node;
1512 nodes_clear(used_mask);
1513 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
9eeff239
CL
1514 int distance = node_distance(local_node, node);
1515
1516 /*
1517 * If another node is sufficiently far away then it is better
1518 * to reclaim pages in a zone before going off node.
1519 */
1520 if (distance > RECLAIM_DISTANCE)
1521 zone_reclaim_mode = 1;
1522
1da177e4
LT
1523 /*
1524 * We don't want to pressure a particular node.
1525 * So adding penalty to the first node in same
1526 * distance group to make it round-robin.
1527 */
9eeff239
CL
1528
1529 if (distance != node_distance(local_node, prev_node))
1da177e4
LT
1530 node_load[node] += load;
1531 prev_node = node;
1532 load--;
19655d34 1533 for (i = 0; i < MAX_NR_ZONES; i++) {
1da177e4
LT
1534 zonelist = pgdat->node_zonelists + i;
1535 for (j = 0; zonelist->zones[j] != NULL; j++);
1536
19655d34 1537 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
1da177e4
LT
1538 zonelist->zones[j] = NULL;
1539 }
1540 }
1541}
1542
1543#else /* CONFIG_NUMA */
1544
86356ab1 1545static void __meminit build_zonelists(pg_data_t *pgdat)
1da177e4 1546{
19655d34
CL
1547 int node, local_node;
1548 enum zone_type i,j;
1da177e4
LT
1549
1550 local_node = pgdat->node_id;
19655d34 1551 for (i = 0; i < MAX_NR_ZONES; i++) {
1da177e4
LT
1552 struct zonelist *zonelist;
1553
1554 zonelist = pgdat->node_zonelists + i;
1555
19655d34 1556 j = build_zonelists_node(pgdat, zonelist, 0, i);
1da177e4
LT
1557 /*
1558 * Now we build the zonelist so that it contains the zones
1559 * of all the other nodes.
1560 * We don't want to pressure a particular node, so when
1561 * building the zones for node N, we make sure that the
1562 * zones coming right after the local ones are those from
1563 * node N+1 (modulo N)
1564 */
1565 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
1566 if (!node_online(node))
1567 continue;
19655d34 1568 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
1da177e4
LT
1569 }
1570 for (node = 0; node < local_node; node++) {
1571 if (!node_online(node))
1572 continue;
19655d34 1573 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
1da177e4
LT
1574 }
1575
1576 zonelist->zones[j] = NULL;
1577 }
1578}
1579
1580#endif /* CONFIG_NUMA */
1581
6811378e
YG
1582/* return values int ....just for stop_machine_run() */
1583static int __meminit __build_all_zonelists(void *dummy)
1da177e4 1584{
6811378e
YG
1585 int nid;
1586 for_each_online_node(nid)
1587 build_zonelists(NODE_DATA(nid));
1588 return 0;
1589}
1590
1591void __meminit build_all_zonelists(void)
1592{
1593 if (system_state == SYSTEM_BOOTING) {
1594 __build_all_zonelists(0);
1595 cpuset_init_current_mems_allowed();
1596 } else {
1597 /* we have to stop all cpus to guaranntee there is no user
1598 of zonelist */
1599 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
1600 /* cpuset refresh routine should be here */
1601 }
bd1e22b8
AM
1602 vm_total_pages = nr_free_pagecache_pages();
1603 printk("Built %i zonelists. Total pages: %ld\n",
1604 num_online_nodes(), vm_total_pages);
1da177e4
LT
1605}
1606
1607/*
1608 * Helper functions to size the waitqueue hash table.
1609 * Essentially these want to choose hash table sizes sufficiently
1610 * large so that collisions trying to wait on pages are rare.
1611 * But in fact, the number of active page waitqueues on typical
1612 * systems is ridiculously low, less than 200. So this is even
1613 * conservative, even though it seems large.
1614 *
1615 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
1616 * waitqueues, i.e. the size of the waitq table given the number of pages.
1617 */
1618#define PAGES_PER_WAITQUEUE 256
1619
cca448fe 1620#ifndef CONFIG_MEMORY_HOTPLUG
02b694de 1621static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1da177e4
LT
1622{
1623 unsigned long size = 1;
1624
1625 pages /= PAGES_PER_WAITQUEUE;
1626
1627 while (size < pages)
1628 size <<= 1;
1629
1630 /*
1631 * Once we have dozens or even hundreds of threads sleeping
1632 * on IO we've got bigger problems than wait queue collision.
1633 * Limit the size of the wait table to a reasonable size.
1634 */
1635 size = min(size, 4096UL);
1636
1637 return max(size, 4UL);
1638}
cca448fe
YG
1639#else
1640/*
1641 * A zone's size might be changed by hot-add, so it is not possible to determine
1642 * a suitable size for its wait_table. So we use the maximum size now.
1643 *
1644 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
1645 *
1646 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
1647 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
1648 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
1649 *
1650 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
1651 * or more by the traditional way. (See above). It equals:
1652 *
1653 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
1654 * ia64(16K page size) : = ( 8G + 4M)byte.
1655 * powerpc (64K page size) : = (32G +16M)byte.
1656 */
1657static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1658{
1659 return 4096UL;
1660}
1661#endif
1da177e4
LT
1662
1663/*
1664 * This is an integer logarithm so that shifts can be used later
1665 * to extract the more random high bits from the multiplicative
1666 * hash function before the remainder is taken.
1667 */
1668static inline unsigned long wait_table_bits(unsigned long size)
1669{
1670 return ffz(~size);
1671}
1672
1673#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1674
1da177e4
LT
1675/*
1676 * Initially all pages are reserved - free ones are freed
1677 * up by free_all_bootmem() once the early boot process is
1678 * done. Non-atomic initialization, single-pass.
1679 */
c09b4240 1680void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1da177e4
LT
1681 unsigned long start_pfn)
1682{
1da177e4 1683 struct page *page;
29751f69
AW
1684 unsigned long end_pfn = start_pfn + size;
1685 unsigned long pfn;
1da177e4 1686
cbe8dd4a 1687 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
d41dee36
AW
1688 if (!early_pfn_valid(pfn))
1689 continue;
1690 page = pfn_to_page(pfn);
1691 set_page_links(page, zone, nid, pfn);
7835e98b 1692 init_page_count(page);
1da177e4
LT
1693 reset_page_mapcount(page);
1694 SetPageReserved(page);
1695 INIT_LIST_HEAD(&page->lru);
1696#ifdef WANT_PAGE_VIRTUAL
1697 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1698 if (!is_highmem_idx(zone))
3212c6be 1699 set_page_address(page, __va(pfn << PAGE_SHIFT));
1da177e4 1700#endif
1da177e4
LT
1701 }
1702}
1703
1704void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
1705 unsigned long size)
1706{
1707 int order;
1708 for (order = 0; order < MAX_ORDER ; order++) {
1709 INIT_LIST_HEAD(&zone->free_area[order].free_list);
1710 zone->free_area[order].nr_free = 0;
1711 }
1712}
1713
d41dee36 1714#define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr)
2f1b6248
CL
1715void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
1716 unsigned long pfn, unsigned long size)
d41dee36
AW
1717{
1718 unsigned long snum = pfn_to_section_nr(pfn);
1719 unsigned long end = pfn_to_section_nr(pfn + size);
1720
1721 if (FLAGS_HAS_NODE)
1722 zone_table[ZONETABLE_INDEX(nid, zid)] = zone;
1723 else
1724 for (; snum <= end; snum++)
1725 zone_table[ZONETABLE_INDEX(snum, zid)] = zone;
1726}
1727
1da177e4
LT
1728#ifndef __HAVE_ARCH_MEMMAP_INIT
1729#define memmap_init(size, nid, zone, start_pfn) \
1730 memmap_init_zone((size), (nid), (zone), (start_pfn))
1731#endif
1732
6292d9aa 1733static int __cpuinit zone_batchsize(struct zone *zone)
e7c8d5c9
CL
1734{
1735 int batch;
1736
1737 /*
1738 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 1739 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
1740 *
1741 * OK, so we don't know how big the cache is. So guess.
1742 */
1743 batch = zone->present_pages / 1024;
ba56e91c
SR
1744 if (batch * PAGE_SIZE > 512 * 1024)
1745 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
1746 batch /= 4; /* We effectively *= 4 below */
1747 if (batch < 1)
1748 batch = 1;
1749
1750 /*
0ceaacc9
NP
1751 * Clamp the batch to a 2^n - 1 value. Having a power
1752 * of 2 value was found to be more likely to have
1753 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 1754 *
0ceaacc9
NP
1755 * For example if 2 tasks are alternately allocating
1756 * batches of pages, one task can end up with a lot
1757 * of pages of one half of the possible page colors
1758 * and the other with pages of the other colors.
e7c8d5c9 1759 */
0ceaacc9 1760 batch = (1 << (fls(batch + batch/2)-1)) - 1;
ba56e91c 1761
e7c8d5c9
CL
1762 return batch;
1763}
1764
2caaad41
CL
1765inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
1766{
1767 struct per_cpu_pages *pcp;
1768
1c6fe946
MD
1769 memset(p, 0, sizeof(*p));
1770
2caaad41
CL
1771 pcp = &p->pcp[0]; /* hot */
1772 pcp->count = 0;
2caaad41
CL
1773 pcp->high = 6 * batch;
1774 pcp->batch = max(1UL, 1 * batch);
1775 INIT_LIST_HEAD(&pcp->list);
1776
1777 pcp = &p->pcp[1]; /* cold*/
1778 pcp->count = 0;
2caaad41 1779 pcp->high = 2 * batch;
e46a5e28 1780 pcp->batch = max(1UL, batch/2);
2caaad41
CL
1781 INIT_LIST_HEAD(&pcp->list);
1782}
1783
8ad4b1fb
RS
1784/*
1785 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
1786 * to the value high for the pageset p.
1787 */
1788
1789static void setup_pagelist_highmark(struct per_cpu_pageset *p,
1790 unsigned long high)
1791{
1792 struct per_cpu_pages *pcp;
1793
1794 pcp = &p->pcp[0]; /* hot list */
1795 pcp->high = high;
1796 pcp->batch = max(1UL, high/4);
1797 if ((high/4) > (PAGE_SHIFT * 8))
1798 pcp->batch = PAGE_SHIFT * 8;
1799}
1800
1801
e7c8d5c9
CL
1802#ifdef CONFIG_NUMA
1803/*
2caaad41
CL
1804 * Boot pageset table. One per cpu which is going to be used for all
1805 * zones and all nodes. The parameters will be set in such a way
1806 * that an item put on a list will immediately be handed over to
1807 * the buddy list. This is safe since pageset manipulation is done
1808 * with interrupts disabled.
1809 *
1810 * Some NUMA counter updates may also be caught by the boot pagesets.
b7c84c6a
CL
1811 *
1812 * The boot_pagesets must be kept even after bootup is complete for
1813 * unused processors and/or zones. They do play a role for bootstrapping
1814 * hotplugged processors.
1815 *
1816 * zoneinfo_show() and maybe other functions do
1817 * not check if the processor is online before following the pageset pointer.
1818 * Other parts of the kernel may not check if the zone is available.
2caaad41 1819 */
88a2a4ac 1820static struct per_cpu_pageset boot_pageset[NR_CPUS];
2caaad41
CL
1821
1822/*
1823 * Dynamically allocate memory for the
e7c8d5c9
CL
1824 * per cpu pageset array in struct zone.
1825 */
6292d9aa 1826static int __cpuinit process_zones(int cpu)
e7c8d5c9
CL
1827{
1828 struct zone *zone, *dzone;
e7c8d5c9
CL
1829
1830 for_each_zone(zone) {
e7c8d5c9 1831
23316bc8 1832 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
e7c8d5c9 1833 GFP_KERNEL, cpu_to_node(cpu));
23316bc8 1834 if (!zone_pcp(zone, cpu))
e7c8d5c9 1835 goto bad;
e7c8d5c9 1836
23316bc8 1837 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
8ad4b1fb
RS
1838
1839 if (percpu_pagelist_fraction)
1840 setup_pagelist_highmark(zone_pcp(zone, cpu),
1841 (zone->present_pages / percpu_pagelist_fraction));
e7c8d5c9
CL
1842 }
1843
1844 return 0;
1845bad:
1846 for_each_zone(dzone) {
1847 if (dzone == zone)
1848 break;
23316bc8
NP
1849 kfree(zone_pcp(dzone, cpu));
1850 zone_pcp(dzone, cpu) = NULL;
e7c8d5c9
CL
1851 }
1852 return -ENOMEM;
1853}
1854
1855static inline void free_zone_pagesets(int cpu)
1856{
e7c8d5c9
CL
1857 struct zone *zone;
1858
1859 for_each_zone(zone) {
1860 struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
1861
f3ef9ead
DR
1862 /* Free per_cpu_pageset if it is slab allocated */
1863 if (pset != &boot_pageset[cpu])
1864 kfree(pset);
e7c8d5c9 1865 zone_pcp(zone, cpu) = NULL;
e7c8d5c9 1866 }
e7c8d5c9
CL
1867}
1868
9c7b216d 1869static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
e7c8d5c9
CL
1870 unsigned long action,
1871 void *hcpu)
1872{
1873 int cpu = (long)hcpu;
1874 int ret = NOTIFY_OK;
1875
1876 switch (action) {
1877 case CPU_UP_PREPARE:
1878 if (process_zones(cpu))
1879 ret = NOTIFY_BAD;
1880 break;
b0d41693 1881 case CPU_UP_CANCELED:
e7c8d5c9
CL
1882 case CPU_DEAD:
1883 free_zone_pagesets(cpu);
1884 break;
e7c8d5c9
CL
1885 default:
1886 break;
1887 }
1888 return ret;
1889}
1890
74b85f37 1891static struct notifier_block __cpuinitdata pageset_notifier =
e7c8d5c9
CL
1892 { &pageset_cpuup_callback, NULL, 0 };
1893
78d9955b 1894void __init setup_per_cpu_pageset(void)
e7c8d5c9
CL
1895{
1896 int err;
1897
1898 /* Initialize per_cpu_pageset for cpu 0.
1899 * A cpuup callback will do this for every cpu
1900 * as it comes online
1901 */
1902 err = process_zones(smp_processor_id());
1903 BUG_ON(err);
1904 register_cpu_notifier(&pageset_notifier);
1905}
1906
1907#endif
1908
c09b4240 1909static __meminit
cca448fe 1910int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
ed8ece2e
DH
1911{
1912 int i;
1913 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe 1914 size_t alloc_size;
ed8ece2e
DH
1915
1916 /*
1917 * The per-page waitqueue mechanism uses hashed waitqueues
1918 * per zone.
1919 */
02b694de
YG
1920 zone->wait_table_hash_nr_entries =
1921 wait_table_hash_nr_entries(zone_size_pages);
1922 zone->wait_table_bits =
1923 wait_table_bits(zone->wait_table_hash_nr_entries);
cca448fe
YG
1924 alloc_size = zone->wait_table_hash_nr_entries
1925 * sizeof(wait_queue_head_t);
1926
1927 if (system_state == SYSTEM_BOOTING) {
1928 zone->wait_table = (wait_queue_head_t *)
1929 alloc_bootmem_node(pgdat, alloc_size);
1930 } else {
1931 /*
1932 * This case means that a zone whose size was 0 gets new memory
1933 * via memory hot-add.
1934 * But it may be the case that a new node was hot-added. In
1935 * this case vmalloc() will not be able to use this new node's
1936 * memory - this wait_table must be initialized to use this new
1937 * node itself as well.
1938 * To use this new node's memory, further consideration will be
1939 * necessary.
1940 */
1941 zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size);
1942 }
1943 if (!zone->wait_table)
1944 return -ENOMEM;
ed8ece2e 1945
02b694de 1946 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
ed8ece2e 1947 init_waitqueue_head(zone->wait_table + i);
cca448fe
YG
1948
1949 return 0;
ed8ece2e
DH
1950}
1951
c09b4240 1952static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e
DH
1953{
1954 int cpu;
1955 unsigned long batch = zone_batchsize(zone);
1956
1957 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1958#ifdef CONFIG_NUMA
1959 /* Early boot. Slab allocator not functional yet */
23316bc8 1960 zone_pcp(zone, cpu) = &boot_pageset[cpu];
ed8ece2e
DH
1961 setup_pageset(&boot_pageset[cpu],0);
1962#else
1963 setup_pageset(zone_pcp(zone,cpu), batch);
1964#endif
1965 }
f5335c0f
AB
1966 if (zone->present_pages)
1967 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
1968 zone->name, zone->present_pages, batch);
ed8ece2e
DH
1969}
1970
718127cc
YG
1971__meminit int init_currently_empty_zone(struct zone *zone,
1972 unsigned long zone_start_pfn,
1973 unsigned long size)
ed8ece2e
DH
1974{
1975 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe
YG
1976 int ret;
1977 ret = zone_wait_table_init(zone, size);
1978 if (ret)
1979 return ret;
ed8ece2e
DH
1980 pgdat->nr_zones = zone_idx(zone) + 1;
1981
ed8ece2e
DH
1982 zone->zone_start_pfn = zone_start_pfn;
1983
1984 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
1985
1986 zone_init_free_lists(pgdat, zone, zone->spanned_pages);
718127cc
YG
1987
1988 return 0;
ed8ece2e
DH
1989}
1990
c713216d
MG
1991#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
1992/*
1993 * Basic iterator support. Return the first range of PFNs for a node
1994 * Note: nid == MAX_NUMNODES returns first region regardless of node
1995 */
1996static int __init first_active_region_index_in_nid(int nid)
1997{
1998 int i;
1999
2000 for (i = 0; i < nr_nodemap_entries; i++)
2001 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
2002 return i;
2003
2004 return -1;
2005}
2006
2007/*
2008 * Basic iterator support. Return the next active range of PFNs for a node
2009 * Note: nid == MAX_NUMNODES returns next region regardles of node
2010 */
2011static int __init next_active_region_index_in_nid(int index, int nid)
2012{
2013 for (index = index + 1; index < nr_nodemap_entries; index++)
2014 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
2015 return index;
2016
2017 return -1;
2018}
2019
2020#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
2021/*
2022 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
2023 * Architectures may implement their own version but if add_active_range()
2024 * was used and there are no special requirements, this is a convenient
2025 * alternative
2026 */
2027int __init early_pfn_to_nid(unsigned long pfn)
2028{
2029 int i;
2030
2031 for (i = 0; i < nr_nodemap_entries; i++) {
2032 unsigned long start_pfn = early_node_map[i].start_pfn;
2033 unsigned long end_pfn = early_node_map[i].end_pfn;
2034
2035 if (start_pfn <= pfn && pfn < end_pfn)
2036 return early_node_map[i].nid;
2037 }
2038
2039 return 0;
2040}
2041#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
2042
2043/* Basic iterator support to walk early_node_map[] */
2044#define for_each_active_range_index_in_nid(i, nid) \
2045 for (i = first_active_region_index_in_nid(nid); i != -1; \
2046 i = next_active_region_index_in_nid(i, nid))
2047
2048/**
2049 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
2050 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed
2051 * @max_low_pfn: The highest PFN that till be passed to free_bootmem_node
2052 *
2053 * If an architecture guarantees that all ranges registered with
2054 * add_active_ranges() contain no holes and may be freed, this
2055 * this function may be used instead of calling free_bootmem() manually.
2056 */
2057void __init free_bootmem_with_active_regions(int nid,
2058 unsigned long max_low_pfn)
2059{
2060 int i;
2061
2062 for_each_active_range_index_in_nid(i, nid) {
2063 unsigned long size_pages = 0;
2064 unsigned long end_pfn = early_node_map[i].end_pfn;
2065
2066 if (early_node_map[i].start_pfn >= max_low_pfn)
2067 continue;
2068
2069 if (end_pfn > max_low_pfn)
2070 end_pfn = max_low_pfn;
2071
2072 size_pages = end_pfn - early_node_map[i].start_pfn;
2073 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
2074 PFN_PHYS(early_node_map[i].start_pfn),
2075 size_pages << PAGE_SHIFT);
2076 }
2077}
2078
2079/**
2080 * sparse_memory_present_with_active_regions - Call memory_present for each active range
2081 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used
2082 *
2083 * If an architecture guarantees that all ranges registered with
2084 * add_active_ranges() contain no holes and may be freed, this
2085 * this function may be used instead of calling memory_present() manually.
2086 */
2087void __init sparse_memory_present_with_active_regions(int nid)
2088{
2089 int i;
2090
2091 for_each_active_range_index_in_nid(i, nid)
2092 memory_present(early_node_map[i].nid,
2093 early_node_map[i].start_pfn,
2094 early_node_map[i].end_pfn);
2095}
2096
2097/**
2098 * get_pfn_range_for_nid - Return the start and end page frames for a node
2099 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned
2100 * @start_pfn: Passed by reference. On return, it will have the node start_pfn
2101 * @end_pfn: Passed by reference. On return, it will have the node end_pfn
2102 *
2103 * It returns the start and end page frame of a node based on information
2104 * provided by an arch calling add_active_range(). If called for a node
2105 * with no available memory, a warning is printed and the start and end
2106 * PFNs will be 0
2107 */
2108void __init get_pfn_range_for_nid(unsigned int nid,
2109 unsigned long *start_pfn, unsigned long *end_pfn)
2110{
2111 int i;
2112 *start_pfn = -1UL;
2113 *end_pfn = 0;
2114
2115 for_each_active_range_index_in_nid(i, nid) {
2116 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
2117 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
2118 }
2119
2120 if (*start_pfn == -1UL) {
2121 printk(KERN_WARNING "Node %u active with no memory\n", nid);
2122 *start_pfn = 0;
2123 }
2124}
2125
2126/*
2127 * Return the number of pages a zone spans in a node, including holes
2128 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
2129 */
2130unsigned long __init zone_spanned_pages_in_node(int nid,
2131 unsigned long zone_type,
2132 unsigned long *ignored)
2133{
2134 unsigned long node_start_pfn, node_end_pfn;
2135 unsigned long zone_start_pfn, zone_end_pfn;
2136
2137 /* Get the start and end of the node and zone */
2138 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
2139 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
2140 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2141
2142 /* Check that this node has pages within the zone's required range */
2143 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
2144 return 0;
2145
2146 /* Move the zone boundaries inside the node if necessary */
2147 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
2148 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
2149
2150 /* Return the spanned pages */
2151 return zone_end_pfn - zone_start_pfn;
2152}
2153
2154/*
2155 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
2156 * then all holes in the requested range will be accounted for
2157 */
2158unsigned long __init __absent_pages_in_range(int nid,
2159 unsigned long range_start_pfn,
2160 unsigned long range_end_pfn)
2161{
2162 int i = 0;
2163 unsigned long prev_end_pfn = 0, hole_pages = 0;
2164 unsigned long start_pfn;
2165
2166 /* Find the end_pfn of the first active range of pfns in the node */
2167 i = first_active_region_index_in_nid(nid);
2168 if (i == -1)
2169 return 0;
2170
2171 prev_end_pfn = early_node_map[i].start_pfn;
2172
2173 /* Find all holes for the zone within the node */
2174 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
2175
2176 /* No need to continue if prev_end_pfn is outside the zone */
2177 if (prev_end_pfn >= range_end_pfn)
2178 break;
2179
2180 /* Make sure the end of the zone is not within the hole */
2181 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
2182 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
2183
2184 /* Update the hole size cound and move on */
2185 if (start_pfn > range_start_pfn) {
2186 BUG_ON(prev_end_pfn > start_pfn);
2187 hole_pages += start_pfn - prev_end_pfn;
2188 }
2189 prev_end_pfn = early_node_map[i].end_pfn;
2190 }
2191
2192 return hole_pages;
2193}
2194
2195/**
2196 * absent_pages_in_range - Return number of page frames in holes within a range
2197 * @start_pfn: The start PFN to start searching for holes
2198 * @end_pfn: The end PFN to stop searching for holes
2199 *
2200 * It returns the number of pages frames in memory holes within a range
2201 */
2202unsigned long __init absent_pages_in_range(unsigned long start_pfn,
2203 unsigned long end_pfn)
2204{
2205 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
2206}
2207
2208/* Return the number of page frames in holes in a zone on a node */
2209unsigned long __init zone_absent_pages_in_node(int nid,
2210 unsigned long zone_type,
2211 unsigned long *ignored)
2212{
2213 return __absent_pages_in_range(nid,
2214 arch_zone_lowest_possible_pfn[zone_type],
2215 arch_zone_highest_possible_pfn[zone_type]);
2216}
0e0b864e
MG
2217
2218/* Return the zone index a PFN is in */
2219int memmap_zone_idx(struct page *lmem_map)
2220{
2221 int i;
2222 unsigned long phys_addr = virt_to_phys(lmem_map);
2223 unsigned long pfn = phys_addr >> PAGE_SHIFT;
2224
2225 for (i = 0; i < MAX_NR_ZONES; i++)
2226 if (pfn < arch_zone_highest_possible_pfn[i])
2227 break;
2228
2229 return i;
2230}
c713216d
MG
2231#else
2232static inline unsigned long zone_spanned_pages_in_node(int nid,
2233 unsigned long zone_type,
2234 unsigned long *zones_size)
2235{
2236 return zones_size[zone_type];
2237}
2238
2239static inline unsigned long zone_absent_pages_in_node(int nid,
2240 unsigned long zone_type,
2241 unsigned long *zholes_size)
2242{
2243 if (!zholes_size)
2244 return 0;
2245
2246 return zholes_size[zone_type];
2247}
0e0b864e
MG
2248
2249static inline int memmap_zone_idx(struct page *lmem_map)
2250{
2251 return MAX_NR_ZONES;
2252}
c713216d
MG
2253#endif
2254
2255static void __init calculate_node_totalpages(struct pglist_data *pgdat,
2256 unsigned long *zones_size, unsigned long *zholes_size)
2257{
2258 unsigned long realtotalpages, totalpages = 0;
2259 enum zone_type i;
2260
2261 for (i = 0; i < MAX_NR_ZONES; i++)
2262 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
2263 zones_size);
2264 pgdat->node_spanned_pages = totalpages;
2265
2266 realtotalpages = totalpages;
2267 for (i = 0; i < MAX_NR_ZONES; i++)
2268 realtotalpages -=
2269 zone_absent_pages_in_node(pgdat->node_id, i,
2270 zholes_size);
2271 pgdat->node_present_pages = realtotalpages;
2272 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
2273 realtotalpages);
2274}
2275
1da177e4
LT
2276/*
2277 * Set up the zone data structures:
2278 * - mark all pages reserved
2279 * - mark all memory queues empty
2280 * - clear the memory bitmaps
2281 */
86356ab1 2282static void __meminit free_area_init_core(struct pglist_data *pgdat,
1da177e4
LT
2283 unsigned long *zones_size, unsigned long *zholes_size)
2284{
2f1b6248 2285 enum zone_type j;
ed8ece2e 2286 int nid = pgdat->node_id;
1da177e4 2287 unsigned long zone_start_pfn = pgdat->node_start_pfn;
718127cc 2288 int ret;
1da177e4 2289
208d54e5 2290 pgdat_resize_init(pgdat);
1da177e4
LT
2291 pgdat->nr_zones = 0;
2292 init_waitqueue_head(&pgdat->kswapd_wait);
2293 pgdat->kswapd_max_order = 0;
2294
2295 for (j = 0; j < MAX_NR_ZONES; j++) {
2296 struct zone *zone = pgdat->node_zones + j;
0e0b864e 2297 unsigned long size, realsize, memmap_pages;
1da177e4 2298
c713216d
MG
2299 size = zone_spanned_pages_in_node(nid, j, zones_size);
2300 realsize = size - zone_absent_pages_in_node(nid, j,
2301 zholes_size);
1da177e4 2302
0e0b864e
MG
2303 /*
2304 * Adjust realsize so that it accounts for how much memory
2305 * is used by this zone for memmap. This affects the watermark
2306 * and per-cpu initialisations
2307 */
2308 memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
2309 if (realsize >= memmap_pages) {
2310 realsize -= memmap_pages;
2311 printk(KERN_DEBUG
2312 " %s zone: %lu pages used for memmap\n",
2313 zone_names[j], memmap_pages);
2314 } else
2315 printk(KERN_WARNING
2316 " %s zone: %lu pages exceeds realsize %lu\n",
2317 zone_names[j], memmap_pages, realsize);
2318
2319 /* Account for reserved DMA pages */
2320 if (j == ZONE_DMA && realsize > dma_reserve) {
2321 realsize -= dma_reserve;
2322 printk(KERN_DEBUG " DMA zone: %lu pages reserved\n",
2323 dma_reserve);
2324 }
2325
98d2b0eb 2326 if (!is_highmem_idx(j))
1da177e4
LT
2327 nr_kernel_pages += realsize;
2328 nr_all_pages += realsize;
2329
2330 zone->spanned_pages = size;
2331 zone->present_pages = realsize;
9614634f 2332#ifdef CONFIG_NUMA
8417bba4 2333 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
9614634f 2334 / 100;
0ff38490 2335 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
9614634f 2336#endif
1da177e4
LT
2337 zone->name = zone_names[j];
2338 spin_lock_init(&zone->lock);
2339 spin_lock_init(&zone->lru_lock);
bdc8cb98 2340 zone_seqlock_init(zone);
1da177e4
LT
2341 zone->zone_pgdat = pgdat;
2342 zone->free_pages = 0;
2343
2344 zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
2345
ed8ece2e 2346 zone_pcp_init(zone);
1da177e4
LT
2347 INIT_LIST_HEAD(&zone->active_list);
2348 INIT_LIST_HEAD(&zone->inactive_list);
2349 zone->nr_scan_active = 0;
2350 zone->nr_scan_inactive = 0;
2351 zone->nr_active = 0;
2352 zone->nr_inactive = 0;
2244b95a 2353 zap_zone_vm_stats(zone);
53e9a615 2354 atomic_set(&zone->reclaim_in_progress, 0);
1da177e4
LT
2355 if (!size)
2356 continue;
2357
d41dee36 2358 zonetable_add(zone, nid, j, zone_start_pfn, size);
718127cc
YG
2359 ret = init_currently_empty_zone(zone, zone_start_pfn, size);
2360 BUG_ON(ret);
1da177e4 2361 zone_start_pfn += size;
1da177e4
LT
2362 }
2363}
2364
2365static void __init alloc_node_mem_map(struct pglist_data *pgdat)
2366{
1da177e4
LT
2367 /* Skip empty nodes */
2368 if (!pgdat->node_spanned_pages)
2369 return;
2370
d41dee36 2371#ifdef CONFIG_FLAT_NODE_MEM_MAP
1da177e4
LT
2372 /* ia64 gets its own node_mem_map, before this, without bootmem */
2373 if (!pgdat->node_mem_map) {
e984bb43 2374 unsigned long size, start, end;
d41dee36
AW
2375 struct page *map;
2376
e984bb43
BP
2377 /*
2378 * The zone's endpoints aren't required to be MAX_ORDER
2379 * aligned but the node_mem_map endpoints must be in order
2380 * for the buddy allocator to function correctly.
2381 */
2382 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
2383 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
2384 end = ALIGN(end, MAX_ORDER_NR_PAGES);
2385 size = (end - start) * sizeof(struct page);
6f167ec7
DH
2386 map = alloc_remap(pgdat->node_id, size);
2387 if (!map)
2388 map = alloc_bootmem_node(pgdat, size);
e984bb43 2389 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
1da177e4 2390 }
d41dee36 2391#ifdef CONFIG_FLATMEM
1da177e4
LT
2392 /*
2393 * With no DISCONTIG, the global mem_map is just set as node 0's
2394 */
c713216d 2395 if (pgdat == NODE_DATA(0)) {
1da177e4 2396 mem_map = NODE_DATA(0)->node_mem_map;
c713216d
MG
2397#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2398 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
2399 mem_map -= pgdat->node_start_pfn;
2400#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
2401 }
1da177e4 2402#endif
d41dee36 2403#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
2404}
2405
86356ab1 2406void __meminit free_area_init_node(int nid, struct pglist_data *pgdat,
1da177e4
LT
2407 unsigned long *zones_size, unsigned long node_start_pfn,
2408 unsigned long *zholes_size)
2409{
2410 pgdat->node_id = nid;
2411 pgdat->node_start_pfn = node_start_pfn;
c713216d 2412 calculate_node_totalpages(pgdat, zones_size, zholes_size);
1da177e4
LT
2413
2414 alloc_node_mem_map(pgdat);
2415
2416 free_area_init_core(pgdat, zones_size, zholes_size);
2417}
2418
c713216d
MG
2419#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2420/**
2421 * add_active_range - Register a range of PFNs backed by physical memory
2422 * @nid: The node ID the range resides on
2423 * @start_pfn: The start PFN of the available physical memory
2424 * @end_pfn: The end PFN of the available physical memory
2425 *
2426 * These ranges are stored in an early_node_map[] and later used by
2427 * free_area_init_nodes() to calculate zone sizes and holes. If the
2428 * range spans a memory hole, it is up to the architecture to ensure
2429 * the memory is not freed by the bootmem allocator. If possible
2430 * the range being registered will be merged with existing ranges.
2431 */
2432void __init add_active_range(unsigned int nid, unsigned long start_pfn,
2433 unsigned long end_pfn)
2434{
2435 int i;
2436
2437 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) "
2438 "%d entries of %d used\n",
2439 nid, start_pfn, end_pfn,
2440 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
2441
2442 /* Merge with existing active regions if possible */
2443 for (i = 0; i < nr_nodemap_entries; i++) {
2444 if (early_node_map[i].nid != nid)
2445 continue;
2446
2447 /* Skip if an existing region covers this new one */
2448 if (start_pfn >= early_node_map[i].start_pfn &&
2449 end_pfn <= early_node_map[i].end_pfn)
2450 return;
2451
2452 /* Merge forward if suitable */
2453 if (start_pfn <= early_node_map[i].end_pfn &&
2454 end_pfn > early_node_map[i].end_pfn) {
2455 early_node_map[i].end_pfn = end_pfn;
2456 return;
2457 }
2458
2459 /* Merge backward if suitable */
2460 if (start_pfn < early_node_map[i].end_pfn &&
2461 end_pfn >= early_node_map[i].start_pfn) {
2462 early_node_map[i].start_pfn = start_pfn;
2463 return;
2464 }
2465 }
2466
2467 /* Check that early_node_map is large enough */
2468 if (i >= MAX_ACTIVE_REGIONS) {
2469 printk(KERN_CRIT "More than %d memory regions, truncating\n",
2470 MAX_ACTIVE_REGIONS);
2471 return;
2472 }
2473
2474 early_node_map[i].nid = nid;
2475 early_node_map[i].start_pfn = start_pfn;
2476 early_node_map[i].end_pfn = end_pfn;
2477 nr_nodemap_entries = i + 1;
2478}
2479
2480/**
2481 * shrink_active_range - Shrink an existing registered range of PFNs
2482 * @nid: The node id the range is on that should be shrunk
2483 * @old_end_pfn: The old end PFN of the range
2484 * @new_end_pfn: The new PFN of the range
2485 *
2486 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
2487 * The map is kept at the end physical page range that has already been
2488 * registered with add_active_range(). This function allows an arch to shrink
2489 * an existing registered range.
2490 */
2491void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
2492 unsigned long new_end_pfn)
2493{
2494 int i;
2495
2496 /* Find the old active region end and shrink */
2497 for_each_active_range_index_in_nid(i, nid)
2498 if (early_node_map[i].end_pfn == old_end_pfn) {
2499 early_node_map[i].end_pfn = new_end_pfn;
2500 break;
2501 }
2502}
2503
2504/**
2505 * remove_all_active_ranges - Remove all currently registered regions
2506 * During discovery, it may be found that a table like SRAT is invalid
2507 * and an alternative discovery method must be used. This function removes
2508 * all currently registered regions.
2509 */
2510void __init remove_all_active_ranges()
2511{
2512 memset(early_node_map, 0, sizeof(early_node_map));
2513 nr_nodemap_entries = 0;
2514}
2515
2516/* Compare two active node_active_regions */
2517static int __init cmp_node_active_region(const void *a, const void *b)
2518{
2519 struct node_active_region *arange = (struct node_active_region *)a;
2520 struct node_active_region *brange = (struct node_active_region *)b;
2521
2522 /* Done this way to avoid overflows */
2523 if (arange->start_pfn > brange->start_pfn)
2524 return 1;
2525 if (arange->start_pfn < brange->start_pfn)
2526 return -1;
2527
2528 return 0;
2529}
2530
2531/* sort the node_map by start_pfn */
2532static void __init sort_node_map(void)
2533{
2534 sort(early_node_map, (size_t)nr_nodemap_entries,
2535 sizeof(struct node_active_region),
2536 cmp_node_active_region, NULL);
2537}
2538
2539/* Find the lowest pfn for a node. This depends on a sorted early_node_map */
2540unsigned long __init find_min_pfn_for_node(unsigned long nid)
2541{
2542 int i;
2543
2544 /* Assuming a sorted map, the first range found has the starting pfn */
2545 for_each_active_range_index_in_nid(i, nid)
2546 return early_node_map[i].start_pfn;
2547
2548 printk(KERN_WARNING "Could not find start_pfn for node %lu\n", nid);
2549 return 0;
2550}
2551
2552/**
2553 * find_min_pfn_with_active_regions - Find the minimum PFN registered
2554 *
2555 * It returns the minimum PFN based on information provided via
2556 * add_active_range()
2557 */
2558unsigned long __init find_min_pfn_with_active_regions(void)
2559{
2560 return find_min_pfn_for_node(MAX_NUMNODES);
2561}
2562
2563/**
2564 * find_max_pfn_with_active_regions - Find the maximum PFN registered
2565 *
2566 * It returns the maximum PFN based on information provided via
2567 * add_active_range()
2568 */
2569unsigned long __init find_max_pfn_with_active_regions(void)
2570{
2571 int i;
2572 unsigned long max_pfn = 0;
2573
2574 for (i = 0; i < nr_nodemap_entries; i++)
2575 max_pfn = max(max_pfn, early_node_map[i].end_pfn);
2576
2577 return max_pfn;
2578}
2579
2580/**
2581 * free_area_init_nodes - Initialise all pg_data_t and zone data
2582 * @arch_max_dma_pfn: The maximum PFN usable for ZONE_DMA
2583 * @arch_max_dma32_pfn: The maximum PFN usable for ZONE_DMA32
2584 * @arch_max_low_pfn: The maximum PFN usable for ZONE_NORMAL
2585 * @arch_max_high_pfn: The maximum PFN usable for ZONE_HIGHMEM
2586 *
2587 * This will call free_area_init_node() for each active node in the system.
2588 * Using the page ranges provided by add_active_range(), the size of each
2589 * zone in each node and their holes is calculated. If the maximum PFN
2590 * between two adjacent zones match, it is assumed that the zone is empty.
2591 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
2592 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
2593 * starts where the previous one ended. For example, ZONE_DMA32 starts
2594 * at arch_max_dma_pfn.
2595 */
2596void __init free_area_init_nodes(unsigned long *max_zone_pfn)
2597{
2598 unsigned long nid;
2599 enum zone_type i;
2600
2601 /* Record where the zone boundaries are */
2602 memset(arch_zone_lowest_possible_pfn, 0,
2603 sizeof(arch_zone_lowest_possible_pfn));
2604 memset(arch_zone_highest_possible_pfn, 0,
2605 sizeof(arch_zone_highest_possible_pfn));
2606 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
2607 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
2608 for (i = 1; i < MAX_NR_ZONES; i++) {
2609 arch_zone_lowest_possible_pfn[i] =
2610 arch_zone_highest_possible_pfn[i-1];
2611 arch_zone_highest_possible_pfn[i] =
2612 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
2613 }
2614
2615 /* Regions in the early_node_map can be in any order */
2616 sort_node_map();
2617
2618 /* Print out the zone ranges */
2619 printk("Zone PFN ranges:\n");
2620 for (i = 0; i < MAX_NR_ZONES; i++)
2621 printk(" %-8s %8lu -> %8lu\n",
2622 zone_names[i],
2623 arch_zone_lowest_possible_pfn[i],
2624 arch_zone_highest_possible_pfn[i]);
2625
2626 /* Print out the early_node_map[] */
2627 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
2628 for (i = 0; i < nr_nodemap_entries; i++)
2629 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid,
2630 early_node_map[i].start_pfn,
2631 early_node_map[i].end_pfn);
2632
2633 /* Initialise every node */
2634 for_each_online_node(nid) {
2635 pg_data_t *pgdat = NODE_DATA(nid);
2636 free_area_init_node(nid, pgdat, NULL,
2637 find_min_pfn_for_node(nid), NULL);
2638 }
2639}
2640#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
2641
0e0b864e
MG
2642/**
2643 * set_dma_reserve - Account the specified number of pages reserved in ZONE_DMA
2644 * @new_dma_reserve - The number of pages to mark reserved
2645 *
2646 * The per-cpu batchsize and zone watermarks are determined by present_pages.
2647 * In the DMA zone, a significant percentage may be consumed by kernel image
2648 * and other unfreeable allocations which can skew the watermarks badly. This
2649 * function may optionally be used to account for unfreeable pages in
2650 * ZONE_DMA. The effect will be lower watermarks and smaller per-cpu batchsize
2651 */
2652void __init set_dma_reserve(unsigned long new_dma_reserve)
2653{
2654 dma_reserve = new_dma_reserve;
2655}
2656
93b7504e 2657#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
2658static bootmem_data_t contig_bootmem_data;
2659struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
2660
2661EXPORT_SYMBOL(contig_page_data);
93b7504e 2662#endif
1da177e4
LT
2663
2664void __init free_area_init(unsigned long *zones_size)
2665{
93b7504e 2666 free_area_init_node(0, NODE_DATA(0), zones_size,
1da177e4
LT
2667 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
2668}
1da177e4 2669
1da177e4
LT
2670#ifdef CONFIG_HOTPLUG_CPU
2671static int page_alloc_cpu_notify(struct notifier_block *self,
2672 unsigned long action, void *hcpu)
2673{
2674 int cpu = (unsigned long)hcpu;
1da177e4
LT
2675
2676 if (action == CPU_DEAD) {
1da177e4
LT
2677 local_irq_disable();
2678 __drain_pages(cpu);
f8891e5e 2679 vm_events_fold_cpu(cpu);
1da177e4 2680 local_irq_enable();
2244b95a 2681 refresh_cpu_vm_stats(cpu);
1da177e4
LT
2682 }
2683 return NOTIFY_OK;
2684}
2685#endif /* CONFIG_HOTPLUG_CPU */
2686
2687void __init page_alloc_init(void)
2688{
2689 hotcpu_notifier(page_alloc_cpu_notify, 0);
2690}
2691
cb45b0e9
HA
2692/*
2693 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
2694 * or min_free_kbytes changes.
2695 */
2696static void calculate_totalreserve_pages(void)
2697{
2698 struct pglist_data *pgdat;
2699 unsigned long reserve_pages = 0;
2f6726e5 2700 enum zone_type i, j;
cb45b0e9
HA
2701
2702 for_each_online_pgdat(pgdat) {
2703 for (i = 0; i < MAX_NR_ZONES; i++) {
2704 struct zone *zone = pgdat->node_zones + i;
2705 unsigned long max = 0;
2706
2707 /* Find valid and maximum lowmem_reserve in the zone */
2708 for (j = i; j < MAX_NR_ZONES; j++) {
2709 if (zone->lowmem_reserve[j] > max)
2710 max = zone->lowmem_reserve[j];
2711 }
2712
2713 /* we treat pages_high as reserved pages. */
2714 max += zone->pages_high;
2715
2716 if (max > zone->present_pages)
2717 max = zone->present_pages;
2718 reserve_pages += max;
2719 }
2720 }
2721 totalreserve_pages = reserve_pages;
2722}
2723
1da177e4
LT
2724/*
2725 * setup_per_zone_lowmem_reserve - called whenever
2726 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
2727 * has a correct pages reserved value, so an adequate number of
2728 * pages are left in the zone after a successful __alloc_pages().
2729 */
2730static void setup_per_zone_lowmem_reserve(void)
2731{
2732 struct pglist_data *pgdat;
2f6726e5 2733 enum zone_type j, idx;
1da177e4 2734
ec936fc5 2735 for_each_online_pgdat(pgdat) {
1da177e4
LT
2736 for (j = 0; j < MAX_NR_ZONES; j++) {
2737 struct zone *zone = pgdat->node_zones + j;
2738 unsigned long present_pages = zone->present_pages;
2739
2740 zone->lowmem_reserve[j] = 0;
2741
2f6726e5
CL
2742 idx = j;
2743 while (idx) {
1da177e4
LT
2744 struct zone *lower_zone;
2745
2f6726e5
CL
2746 idx--;
2747
1da177e4
LT
2748 if (sysctl_lowmem_reserve_ratio[idx] < 1)
2749 sysctl_lowmem_reserve_ratio[idx] = 1;
2750
2751 lower_zone = pgdat->node_zones + idx;
2752 lower_zone->lowmem_reserve[j] = present_pages /
2753 sysctl_lowmem_reserve_ratio[idx];
2754 present_pages += lower_zone->present_pages;
2755 }
2756 }
2757 }
cb45b0e9
HA
2758
2759 /* update totalreserve_pages */
2760 calculate_totalreserve_pages();
1da177e4
LT
2761}
2762
2763/*
2764 * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures
2765 * that the pages_{min,low,high} values for each zone are set correctly
2766 * with respect to min_free_kbytes.
2767 */
3947be19 2768void setup_per_zone_pages_min(void)
1da177e4
LT
2769{
2770 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
2771 unsigned long lowmem_pages = 0;
2772 struct zone *zone;
2773 unsigned long flags;
2774
2775 /* Calculate total number of !ZONE_HIGHMEM pages */
2776 for_each_zone(zone) {
2777 if (!is_highmem(zone))
2778 lowmem_pages += zone->present_pages;
2779 }
2780
2781 for_each_zone(zone) {
ac924c60
AM
2782 u64 tmp;
2783
1da177e4 2784 spin_lock_irqsave(&zone->lru_lock, flags);
ac924c60
AM
2785 tmp = (u64)pages_min * zone->present_pages;
2786 do_div(tmp, lowmem_pages);
1da177e4
LT
2787 if (is_highmem(zone)) {
2788 /*
669ed175
NP
2789 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
2790 * need highmem pages, so cap pages_min to a small
2791 * value here.
2792 *
2793 * The (pages_high-pages_low) and (pages_low-pages_min)
2794 * deltas controls asynch page reclaim, and so should
2795 * not be capped for highmem.
1da177e4
LT
2796 */
2797 int min_pages;
2798
2799 min_pages = zone->present_pages / 1024;
2800 if (min_pages < SWAP_CLUSTER_MAX)
2801 min_pages = SWAP_CLUSTER_MAX;
2802 if (min_pages > 128)
2803 min_pages = 128;
2804 zone->pages_min = min_pages;
2805 } else {
669ed175
NP
2806 /*
2807 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
2808 * proportionate to the zone's size.
2809 */
669ed175 2810 zone->pages_min = tmp;
1da177e4
LT
2811 }
2812
ac924c60
AM
2813 zone->pages_low = zone->pages_min + (tmp >> 2);
2814 zone->pages_high = zone->pages_min + (tmp >> 1);
1da177e4
LT
2815 spin_unlock_irqrestore(&zone->lru_lock, flags);
2816 }
cb45b0e9
HA
2817
2818 /* update totalreserve_pages */
2819 calculate_totalreserve_pages();
1da177e4
LT
2820}
2821
2822/*
2823 * Initialise min_free_kbytes.
2824 *
2825 * For small machines we want it small (128k min). For large machines
2826 * we want it large (64MB max). But it is not linear, because network
2827 * bandwidth does not increase linearly with machine size. We use
2828 *
2829 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
2830 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
2831 *
2832 * which yields
2833 *
2834 * 16MB: 512k
2835 * 32MB: 724k
2836 * 64MB: 1024k
2837 * 128MB: 1448k
2838 * 256MB: 2048k
2839 * 512MB: 2896k
2840 * 1024MB: 4096k
2841 * 2048MB: 5792k
2842 * 4096MB: 8192k
2843 * 8192MB: 11584k
2844 * 16384MB: 16384k
2845 */
2846static int __init init_per_zone_pages_min(void)
2847{
2848 unsigned long lowmem_kbytes;
2849
2850 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
2851
2852 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
2853 if (min_free_kbytes < 128)
2854 min_free_kbytes = 128;
2855 if (min_free_kbytes > 65536)
2856 min_free_kbytes = 65536;
2857 setup_per_zone_pages_min();
2858 setup_per_zone_lowmem_reserve();
2859 return 0;
2860}
2861module_init(init_per_zone_pages_min)
2862
2863/*
2864 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
2865 * that we can call two helper functions whenever min_free_kbytes
2866 * changes.
2867 */
2868int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
2869 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2870{
2871 proc_dointvec(table, write, file, buffer, length, ppos);
2872 setup_per_zone_pages_min();
2873 return 0;
2874}
2875
9614634f
CL
2876#ifdef CONFIG_NUMA
2877int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
2878 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2879{
2880 struct zone *zone;
2881 int rc;
2882
2883 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
2884 if (rc)
2885 return rc;
2886
2887 for_each_zone(zone)
8417bba4 2888 zone->min_unmapped_pages = (zone->present_pages *
9614634f
CL
2889 sysctl_min_unmapped_ratio) / 100;
2890 return 0;
2891}
0ff38490
CL
2892
2893int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
2894 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2895{
2896 struct zone *zone;
2897 int rc;
2898
2899 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
2900 if (rc)
2901 return rc;
2902
2903 for_each_zone(zone)
2904 zone->min_slab_pages = (zone->present_pages *
2905 sysctl_min_slab_ratio) / 100;
2906 return 0;
2907}
9614634f
CL
2908#endif
2909
1da177e4
LT
2910/*
2911 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
2912 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
2913 * whenever sysctl_lowmem_reserve_ratio changes.
2914 *
2915 * The reserve ratio obviously has absolutely no relation with the
2916 * pages_min watermarks. The lowmem reserve ratio can only make sense
2917 * if in function of the boot time zone sizes.
2918 */
2919int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
2920 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2921{
2922 proc_dointvec_minmax(table, write, file, buffer, length, ppos);
2923 setup_per_zone_lowmem_reserve();
2924 return 0;
2925}
2926
8ad4b1fb
RS
2927/*
2928 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
2929 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
2930 * can have before it gets flushed back to buddy allocator.
2931 */
2932
2933int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
2934 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2935{
2936 struct zone *zone;
2937 unsigned int cpu;
2938 int ret;
2939
2940 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
2941 if (!write || (ret == -EINVAL))
2942 return ret;
2943 for_each_zone(zone) {
2944 for_each_online_cpu(cpu) {
2945 unsigned long high;
2946 high = zone->present_pages / percpu_pagelist_fraction;
2947 setup_pagelist_highmark(zone_pcp(zone, cpu), high);
2948 }
2949 }
2950 return 0;
2951}
2952
f034b5d4 2953int hashdist = HASHDIST_DEFAULT;
1da177e4
LT
2954
2955#ifdef CONFIG_NUMA
2956static int __init set_hashdist(char *str)
2957{
2958 if (!str)
2959 return 0;
2960 hashdist = simple_strtoul(str, &str, 0);
2961 return 1;
2962}
2963__setup("hashdist=", set_hashdist);
2964#endif
2965
2966/*
2967 * allocate a large system hash table from bootmem
2968 * - it is assumed that the hash table must contain an exact power-of-2
2969 * quantity of entries
2970 * - limit is the number of hash buckets, not the total allocation size
2971 */
2972void *__init alloc_large_system_hash(const char *tablename,
2973 unsigned long bucketsize,
2974 unsigned long numentries,
2975 int scale,
2976 int flags,
2977 unsigned int *_hash_shift,
2978 unsigned int *_hash_mask,
2979 unsigned long limit)
2980{
2981 unsigned long long max = limit;
2982 unsigned long log2qty, size;
2983 void *table = NULL;
2984
2985 /* allow the kernel cmdline to have a say */
2986 if (!numentries) {
2987 /* round applicable memory size up to nearest megabyte */
2988 numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages;
2989 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
2990 numentries >>= 20 - PAGE_SHIFT;
2991 numentries <<= 20 - PAGE_SHIFT;
2992
2993 /* limit to 1 bucket per 2^scale bytes of low memory */
2994 if (scale > PAGE_SHIFT)
2995 numentries >>= (scale - PAGE_SHIFT);
2996 else
2997 numentries <<= (PAGE_SHIFT - scale);
2998 }
6e692ed3 2999 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
3000
3001 /* limit allocation size to 1/16 total memory by default */
3002 if (max == 0) {
3003 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
3004 do_div(max, bucketsize);
3005 }
3006
3007 if (numentries > max)
3008 numentries = max;
3009
3010 log2qty = long_log2(numentries);
3011
3012 do {
3013 size = bucketsize << log2qty;
3014 if (flags & HASH_EARLY)
3015 table = alloc_bootmem(size);
3016 else if (hashdist)
3017 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
3018 else {
3019 unsigned long order;
3020 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
3021 ;
3022 table = (void*) __get_free_pages(GFP_ATOMIC, order);
3023 }
3024 } while (!table && size > PAGE_SIZE && --log2qty);
3025
3026 if (!table)
3027 panic("Failed to allocate %s hash table\n", tablename);
3028
3029 printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
3030 tablename,
3031 (1U << log2qty),
3032 long_log2(size) - PAGE_SHIFT,
3033 size);
3034
3035 if (_hash_shift)
3036 *_hash_shift = log2qty;
3037 if (_hash_mask)
3038 *_hash_mask = (1 << log2qty) - 1;
3039
3040 return table;
3041}
a117e66e
KH
3042
3043#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
a117e66e
KH
3044struct page *pfn_to_page(unsigned long pfn)
3045{
67de6482 3046 return __pfn_to_page(pfn);
a117e66e
KH
3047}
3048unsigned long page_to_pfn(struct page *page)
3049{
67de6482 3050 return __page_to_pfn(page);
a117e66e 3051}
a117e66e
KH
3052EXPORT_SYMBOL(pfn_to_page);
3053EXPORT_SYMBOL(page_to_pfn);
3054#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */