early_res: Add free_early_partial()
[linux-2.6-block.git] / mm / sparse.c
... / ...
CommitLineData
1/*
2 * sparse memory mappings.
3 */
4#include <linux/mm.h>
5#include <linux/mmzone.h>
6#include <linux/bootmem.h>
7#include <linux/highmem.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/vmalloc.h>
11#include "internal.h"
12#include <asm/dma.h>
13#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
15
16/*
17 * Permanent SPARSEMEM data:
18 *
19 * 1) mem_section - memory sections, mem_map's for valid memory
20 */
21#ifdef CONFIG_SPARSEMEM_EXTREME
22struct mem_section *mem_section[NR_SECTION_ROOTS]
23 ____cacheline_internodealigned_in_smp;
24#else
25struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
26 ____cacheline_internodealigned_in_smp;
27#endif
28EXPORT_SYMBOL(mem_section);
29
30#ifdef NODE_NOT_IN_PAGE_FLAGS
31/*
32 * If we did not store the node number in the page then we have to
33 * do a lookup in the section_to_node_table in order to find which
34 * node the page belongs to.
35 */
36#if MAX_NUMNODES <= 256
37static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
38#else
39static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
40#endif
41
42int page_to_nid(struct page *page)
43{
44 return section_to_node_table[page_to_section(page)];
45}
46EXPORT_SYMBOL(page_to_nid);
47
48static void set_section_nid(unsigned long section_nr, int nid)
49{
50 section_to_node_table[section_nr] = nid;
51}
52#else /* !NODE_NOT_IN_PAGE_FLAGS */
53static inline void set_section_nid(unsigned long section_nr, int nid)
54{
55}
56#endif
57
58#ifdef CONFIG_SPARSEMEM_EXTREME
59static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
60{
61 struct mem_section *section = NULL;
62 unsigned long array_size = SECTIONS_PER_ROOT *
63 sizeof(struct mem_section);
64
65 if (slab_is_available()) {
66 if (node_state(nid, N_HIGH_MEMORY))
67 section = kmalloc_node(array_size, GFP_KERNEL, nid);
68 else
69 section = kmalloc(array_size, GFP_KERNEL);
70 } else
71 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
72
73 if (section)
74 memset(section, 0, array_size);
75
76 return section;
77}
78
79static int __meminit sparse_index_init(unsigned long section_nr, int nid)
80{
81 static DEFINE_SPINLOCK(index_init_lock);
82 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
83 struct mem_section *section;
84 int ret = 0;
85
86 if (mem_section[root])
87 return -EEXIST;
88
89 section = sparse_index_alloc(nid);
90 if (!section)
91 return -ENOMEM;
92 /*
93 * This lock keeps two different sections from
94 * reallocating for the same index
95 */
96 spin_lock(&index_init_lock);
97
98 if (mem_section[root]) {
99 ret = -EEXIST;
100 goto out;
101 }
102
103 mem_section[root] = section;
104out:
105 spin_unlock(&index_init_lock);
106 return ret;
107}
108#else /* !SPARSEMEM_EXTREME */
109static inline int sparse_index_init(unsigned long section_nr, int nid)
110{
111 return 0;
112}
113#endif
114
115/*
116 * Although written for the SPARSEMEM_EXTREME case, this happens
117 * to also work for the flat array case because
118 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
119 */
120int __section_nr(struct mem_section* ms)
121{
122 unsigned long root_nr;
123 struct mem_section* root;
124
125 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
126 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
127 if (!root)
128 continue;
129
130 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
131 break;
132 }
133
134 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
135}
136
137/*
138 * During early boot, before section_mem_map is used for an actual
139 * mem_map, we use section_mem_map to store the section's NUMA
140 * node. This keeps us from having to use another data structure. The
141 * node information is cleared just before we store the real mem_map.
142 */
143static inline unsigned long sparse_encode_early_nid(int nid)
144{
145 return (nid << SECTION_NID_SHIFT);
146}
147
148static inline int sparse_early_nid(struct mem_section *section)
149{
150 return (section->section_mem_map >> SECTION_NID_SHIFT);
151}
152
153/* Validate the physical addressing limitations of the model */
154void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
155 unsigned long *end_pfn)
156{
157 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
158
159 /*
160 * Sanity checks - do not allow an architecture to pass
161 * in larger pfns than the maximum scope of sparsemem:
162 */
163 if (*start_pfn > max_sparsemem_pfn) {
164 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
165 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
166 *start_pfn, *end_pfn, max_sparsemem_pfn);
167 WARN_ON_ONCE(1);
168 *start_pfn = max_sparsemem_pfn;
169 *end_pfn = max_sparsemem_pfn;
170 } else if (*end_pfn > max_sparsemem_pfn) {
171 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
172 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
173 *start_pfn, *end_pfn, max_sparsemem_pfn);
174 WARN_ON_ONCE(1);
175 *end_pfn = max_sparsemem_pfn;
176 }
177}
178
179/* Record a memory area against a node. */
180void __init memory_present(int nid, unsigned long start, unsigned long end)
181{
182 unsigned long pfn;
183
184 start &= PAGE_SECTION_MASK;
185 mminit_validate_memmodel_limits(&start, &end);
186 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
187 unsigned long section = pfn_to_section_nr(pfn);
188 struct mem_section *ms;
189
190 sparse_index_init(section, nid);
191 set_section_nid(section, nid);
192
193 ms = __nr_to_section(section);
194 if (!ms->section_mem_map)
195 ms->section_mem_map = sparse_encode_early_nid(nid) |
196 SECTION_MARKED_PRESENT;
197 }
198}
199
200/*
201 * Only used by the i386 NUMA architecures, but relatively
202 * generic code.
203 */
204unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
205 unsigned long end_pfn)
206{
207 unsigned long pfn;
208 unsigned long nr_pages = 0;
209
210 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
211 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
212 if (nid != early_pfn_to_nid(pfn))
213 continue;
214
215 if (pfn_present(pfn))
216 nr_pages += PAGES_PER_SECTION;
217 }
218
219 return nr_pages * sizeof(struct page);
220}
221
222/*
223 * Subtle, we encode the real pfn into the mem_map such that
224 * the identity pfn - section_mem_map will return the actual
225 * physical page frame number.
226 */
227static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
228{
229 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
230}
231
232/*
233 * Decode mem_map from the coded memmap
234 */
235struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
236{
237 /* mask off the extra low bits of information */
238 coded_mem_map &= SECTION_MAP_MASK;
239 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
240}
241
242static int __meminit sparse_init_one_section(struct mem_section *ms,
243 unsigned long pnum, struct page *mem_map,
244 unsigned long *pageblock_bitmap)
245{
246 if (!present_section(ms))
247 return -EINVAL;
248
249 ms->section_mem_map &= ~SECTION_MAP_MASK;
250 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
251 SECTION_HAS_MEM_MAP;
252 ms->pageblock_flags = pageblock_bitmap;
253
254 return 1;
255}
256
257unsigned long usemap_size(void)
258{
259 unsigned long size_bytes;
260 size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
261 size_bytes = roundup(size_bytes, sizeof(unsigned long));
262 return size_bytes;
263}
264
265#ifdef CONFIG_MEMORY_HOTPLUG
266static unsigned long *__kmalloc_section_usemap(void)
267{
268 return kmalloc(usemap_size(), GFP_KERNEL);
269}
270#endif /* CONFIG_MEMORY_HOTPLUG */
271
272#ifdef CONFIG_MEMORY_HOTREMOVE
273static unsigned long * __init
274sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
275 unsigned long count)
276{
277 unsigned long section_nr;
278
279 /*
280 * A page may contain usemaps for other sections preventing the
281 * page being freed and making a section unremovable while
282 * other sections referencing the usemap retmain active. Similarly,
283 * a pgdat can prevent a section being removed. If section A
284 * contains a pgdat and section B contains the usemap, both
285 * sections become inter-dependent. This allocates usemaps
286 * from the same section as the pgdat where possible to avoid
287 * this problem.
288 */
289 section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
290 return alloc_bootmem_section(usemap_size() * count, section_nr);
291}
292
293static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
294{
295 unsigned long usemap_snr, pgdat_snr;
296 static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
297 static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
298 struct pglist_data *pgdat = NODE_DATA(nid);
299 int usemap_nid;
300
301 usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
302 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
303 if (usemap_snr == pgdat_snr)
304 return;
305
306 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
307 /* skip redundant message */
308 return;
309
310 old_usemap_snr = usemap_snr;
311 old_pgdat_snr = pgdat_snr;
312
313 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
314 if (usemap_nid != nid) {
315 printk(KERN_INFO
316 "node %d must be removed before remove section %ld\n",
317 nid, usemap_snr);
318 return;
319 }
320 /*
321 * There is a circular dependency.
322 * Some platforms allow un-removable section because they will just
323 * gather other removable sections for dynamic partitioning.
324 * Just notify un-removable section's number here.
325 */
326 printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,
327 pgdat_snr, nid);
328 printk(KERN_CONT
329 " have a circular dependency on usemap and pgdat allocations\n");
330}
331#else
332static unsigned long * __init
333sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
334 unsigned long count)
335{
336 return NULL;
337}
338
339static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
340{
341}
342#endif /* CONFIG_MEMORY_HOTREMOVE */
343
344static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
345 unsigned long pnum_begin,
346 unsigned long pnum_end,
347 unsigned long usemap_count, int nodeid)
348{
349 void *usemap;
350 unsigned long pnum;
351 int size = usemap_size();
352
353 usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
354 usemap_count);
355 if (usemap) {
356 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
357 if (!present_section_nr(pnum))
358 continue;
359 usemap_map[pnum] = usemap;
360 usemap += size;
361 }
362 return;
363 }
364
365 usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
366 if (usemap) {
367 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
368 if (!present_section_nr(pnum))
369 continue;
370 usemap_map[pnum] = usemap;
371 usemap += size;
372 check_usemap_section_nr(nodeid, usemap_map[pnum]);
373 }
374 return;
375 }
376
377 printk(KERN_WARNING "%s: allocation failed\n", __func__);
378}
379
380#ifndef CONFIG_SPARSEMEM_VMEMMAP
381struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
382{
383 struct page *map;
384
385 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
386 if (map)
387 return map;
388
389 map = alloc_bootmem_pages_node(NODE_DATA(nid),
390 PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
391 return map;
392}
393void __init sparse_mem_maps_populate_node(struct page **map_map,
394 unsigned long pnum_begin,
395 unsigned long pnum_end,
396 unsigned long map_count, int nodeid)
397{
398 void *map;
399 unsigned long pnum;
400 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
401
402 map = alloc_remap(nodeid, size * map_count);
403 if (map) {
404 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
405 if (!present_section_nr(pnum))
406 continue;
407 map_map[pnum] = map;
408 map += size;
409 }
410 return;
411 }
412
413 size = PAGE_ALIGN(size);
414 map = alloc_bootmem_pages_node(NODE_DATA(nodeid), size * map_count);
415 if (map) {
416 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
417 if (!present_section_nr(pnum))
418 continue;
419 map_map[pnum] = map;
420 map += size;
421 }
422 return;
423 }
424
425 /* fallback */
426 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
427 struct mem_section *ms;
428
429 if (!present_section_nr(pnum))
430 continue;
431 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
432 if (map_map[pnum])
433 continue;
434 ms = __nr_to_section(pnum);
435 printk(KERN_ERR "%s: sparsemem memory map backing failed "
436 "some memory will not be available.\n", __func__);
437 ms->section_mem_map = 0;
438 }
439}
440#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
441
442static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
443 unsigned long pnum_begin,
444 unsigned long pnum_end,
445 unsigned long map_count, int nodeid)
446{
447 sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
448 map_count, nodeid);
449}
450
451#ifndef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
452static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
453{
454 struct page *map;
455 struct mem_section *ms = __nr_to_section(pnum);
456 int nid = sparse_early_nid(ms);
457
458 map = sparse_mem_map_populate(pnum, nid);
459 if (map)
460 return map;
461
462 printk(KERN_ERR "%s: sparsemem memory map backing failed "
463 "some memory will not be available.\n", __func__);
464 ms->section_mem_map = 0;
465 return NULL;
466}
467#endif
468
469void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
470{
471}
472
473/*
474 * Allocate the accumulated non-linear sections, allocate a mem_map
475 * for each and record the physical to section mapping.
476 */
477void __init sparse_init(void)
478{
479 unsigned long pnum;
480 struct page *map;
481 struct page **map_map;
482 unsigned long *usemap;
483 unsigned long **usemap_map;
484 int size, size2;
485 int nodeid_begin = 0;
486 unsigned long pnum_begin = 0;
487 unsigned long usemap_count;
488 unsigned long map_count;
489
490 /*
491 * map is using big page (aka 2M in x86 64 bit)
492 * usemap is less one page (aka 24 bytes)
493 * so alloc 2M (with 2M align) and 24 bytes in turn will
494 * make next 2M slip to one more 2M later.
495 * then in big system, the memory will have a lot of holes...
496 * here try to allocate 2M pages continously.
497 *
498 * powerpc need to call sparse_init_one_section right after each
499 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
500 */
501 size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
502 usemap_map = alloc_bootmem(size);
503 if (!usemap_map)
504 panic("can not allocate usemap_map\n");
505
506 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
507 struct mem_section *ms;
508
509 if (!present_section_nr(pnum))
510 continue;
511 ms = __nr_to_section(pnum);
512 nodeid_begin = sparse_early_nid(ms);
513 pnum_begin = pnum;
514 break;
515 }
516 usemap_count = 1;
517 for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
518 struct mem_section *ms;
519 int nodeid;
520
521 if (!present_section_nr(pnum))
522 continue;
523 ms = __nr_to_section(pnum);
524 nodeid = sparse_early_nid(ms);
525 if (nodeid == nodeid_begin) {
526 usemap_count++;
527 continue;
528 }
529 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
530 sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
531 usemap_count, nodeid_begin);
532 /* new start, update count etc*/
533 nodeid_begin = nodeid;
534 pnum_begin = pnum;
535 usemap_count = 1;
536 }
537 /* ok, last chunk */
538 sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
539 usemap_count, nodeid_begin);
540
541#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
542 size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
543 map_map = alloc_bootmem(size2);
544 if (!map_map)
545 panic("can not allocate map_map\n");
546
547 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
548 struct mem_section *ms;
549
550 if (!present_section_nr(pnum))
551 continue;
552 ms = __nr_to_section(pnum);
553 nodeid_begin = sparse_early_nid(ms);
554 pnum_begin = pnum;
555 break;
556 }
557 map_count = 1;
558 for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
559 struct mem_section *ms;
560 int nodeid;
561
562 if (!present_section_nr(pnum))
563 continue;
564 ms = __nr_to_section(pnum);
565 nodeid = sparse_early_nid(ms);
566 if (nodeid == nodeid_begin) {
567 map_count++;
568 continue;
569 }
570 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
571 sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
572 map_count, nodeid_begin);
573 /* new start, update count etc*/
574 nodeid_begin = nodeid;
575 pnum_begin = pnum;
576 map_count = 1;
577 }
578 /* ok, last chunk */
579 sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
580 map_count, nodeid_begin);
581#endif
582
583 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
584 if (!present_section_nr(pnum))
585 continue;
586
587 usemap = usemap_map[pnum];
588 if (!usemap)
589 continue;
590
591#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
592 map = map_map[pnum];
593#else
594 map = sparse_early_mem_map_alloc(pnum);
595#endif
596 if (!map)
597 continue;
598
599 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
600 usemap);
601 }
602
603 vmemmap_populate_print_last();
604
605#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
606 free_bootmem(__pa(map_map), size2);
607#endif
608 free_bootmem(__pa(usemap_map), size);
609}
610
611#ifdef CONFIG_MEMORY_HOTPLUG
612#ifdef CONFIG_SPARSEMEM_VMEMMAP
613static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
614 unsigned long nr_pages)
615{
616 /* This will make the necessary allocations eventually. */
617 return sparse_mem_map_populate(pnum, nid);
618}
619static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
620{
621 return; /* XXX: Not implemented yet */
622}
623static void free_map_bootmem(struct page *page, unsigned long nr_pages)
624{
625}
626#else
627static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
628{
629 struct page *page, *ret;
630 unsigned long memmap_size = sizeof(struct page) * nr_pages;
631
632 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
633 if (page)
634 goto got_map_page;
635
636 ret = vmalloc(memmap_size);
637 if (ret)
638 goto got_map_ptr;
639
640 return NULL;
641got_map_page:
642 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
643got_map_ptr:
644 memset(ret, 0, memmap_size);
645
646 return ret;
647}
648
649static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
650 unsigned long nr_pages)
651{
652 return __kmalloc_section_memmap(nr_pages);
653}
654
655static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
656{
657 if (is_vmalloc_addr(memmap))
658 vfree(memmap);
659 else
660 free_pages((unsigned long)memmap,
661 get_order(sizeof(struct page) * nr_pages));
662}
663
664static void free_map_bootmem(struct page *page, unsigned long nr_pages)
665{
666 unsigned long maps_section_nr, removing_section_nr, i;
667 int magic;
668
669 for (i = 0; i < nr_pages; i++, page++) {
670 magic = atomic_read(&page->_mapcount);
671
672 BUG_ON(magic == NODE_INFO);
673
674 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
675 removing_section_nr = page->private;
676
677 /*
678 * When this function is called, the removing section is
679 * logical offlined state. This means all pages are isolated
680 * from page allocator. If removing section's memmap is placed
681 * on the same section, it must not be freed.
682 * If it is freed, page allocator may allocate it which will
683 * be removed physically soon.
684 */
685 if (maps_section_nr != removing_section_nr)
686 put_page_bootmem(page);
687 }
688}
689#endif /* CONFIG_SPARSEMEM_VMEMMAP */
690
691static void free_section_usemap(struct page *memmap, unsigned long *usemap)
692{
693 struct page *usemap_page;
694 unsigned long nr_pages;
695
696 if (!usemap)
697 return;
698
699 usemap_page = virt_to_page(usemap);
700 /*
701 * Check to see if allocation came from hot-plug-add
702 */
703 if (PageSlab(usemap_page)) {
704 kfree(usemap);
705 if (memmap)
706 __kfree_section_memmap(memmap, PAGES_PER_SECTION);
707 return;
708 }
709
710 /*
711 * The usemap came from bootmem. This is packed with other usemaps
712 * on the section which has pgdat at boot time. Just keep it as is now.
713 */
714
715 if (memmap) {
716 struct page *memmap_page;
717 memmap_page = virt_to_page(memmap);
718
719 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
720 >> PAGE_SHIFT;
721
722 free_map_bootmem(memmap_page, nr_pages);
723 }
724}
725
726/*
727 * returns the number of sections whose mem_maps were properly
728 * set. If this is <=0, then that means that the passed-in
729 * map was not consumed and must be freed.
730 */
731int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
732 int nr_pages)
733{
734 unsigned long section_nr = pfn_to_section_nr(start_pfn);
735 struct pglist_data *pgdat = zone->zone_pgdat;
736 struct mem_section *ms;
737 struct page *memmap;
738 unsigned long *usemap;
739 unsigned long flags;
740 int ret;
741
742 /*
743 * no locking for this, because it does its own
744 * plus, it does a kmalloc
745 */
746 ret = sparse_index_init(section_nr, pgdat->node_id);
747 if (ret < 0 && ret != -EEXIST)
748 return ret;
749 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
750 if (!memmap)
751 return -ENOMEM;
752 usemap = __kmalloc_section_usemap();
753 if (!usemap) {
754 __kfree_section_memmap(memmap, nr_pages);
755 return -ENOMEM;
756 }
757
758 pgdat_resize_lock(pgdat, &flags);
759
760 ms = __pfn_to_section(start_pfn);
761 if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
762 ret = -EEXIST;
763 goto out;
764 }
765
766 ms->section_mem_map |= SECTION_MARKED_PRESENT;
767
768 ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
769
770out:
771 pgdat_resize_unlock(pgdat, &flags);
772 if (ret <= 0) {
773 kfree(usemap);
774 __kfree_section_memmap(memmap, nr_pages);
775 }
776 return ret;
777}
778
779void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
780{
781 struct page *memmap = NULL;
782 unsigned long *usemap = NULL;
783
784 if (ms->section_mem_map) {
785 usemap = ms->pageblock_flags;
786 memmap = sparse_decode_mem_map(ms->section_mem_map,
787 __section_nr(ms));
788 ms->section_mem_map = 0;
789 ms->pageblock_flags = NULL;
790 }
791
792 free_section_usemap(memmap, usemap);
793}
794#endif