2 * mm/percpu.c - percpu memory allocator
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 * This file is released under the GPLv2.
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks. Each chunk is
11 * consisted of boot-time determined number of units and the first
12 * chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated.
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
26 * cpus. On NUMA, the mapping can be non-linear and even sparse.
27 * Percpu access can be done by configuring percpu base registers
28 * according to cpu to unit mapping and pcpu_unit_size.
30 * There are usually many small percpu allocations many of them being
31 * as small as 4 bytes. The allocator organizes chunks into lists
32 * according to free size and tries to allocate from the fullest one.
33 * Each chunk keeps the maximum contiguous area size hint which is
34 * guaranteed to be eqaul to or larger than the maximum contiguous
35 * area in the chunk. This helps the allocator not to iterate the
36 * chunk maps unnecessarily.
38 * Allocation state in each chunk is kept using an array of integers
39 * on chunk->map. A positive value in the map represents a free
40 * region and negative allocated. Allocation inside a chunk is done
41 * by scanning this map sequentially and serving the first matching
42 * entry. This is mostly copied from the percpu_modalloc() allocator.
43 * Chunks can be determined from the address using the index field
44 * in the page struct. The index field contains a pointer to the chunk.
46 * To use this allocator, arch code should do the followings.
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back if they need to be
50 * different from the default
52 * - use pcpu_setup_first_chunk() during percpu area initialization to
53 * setup the first chunk containing the kernel static percpu area
56 #include <linux/bitmap.h>
57 #include <linux/bootmem.h>
58 #include <linux/err.h>
59 #include <linux/list.h>
60 #include <linux/log2.h>
62 #include <linux/module.h>
63 #include <linux/mutex.h>
64 #include <linux/percpu.h>
65 #include <linux/pfn.h>
66 #include <linux/slab.h>
67 #include <linux/spinlock.h>
68 #include <linux/vmalloc.h>
69 #include <linux/workqueue.h>
71 #include <asm/cacheflush.h>
72 #include <asm/sections.h>
73 #include <asm/tlbflush.h>
76 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
77 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
79 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
80 #ifndef __addr_to_pcpu_ptr
81 #define __addr_to_pcpu_ptr(addr) \
82 (void __percpu *)((unsigned long)(addr) - \
83 (unsigned long)pcpu_base_addr + \
84 (unsigned long)__per_cpu_start)
86 #ifndef __pcpu_ptr_to_addr
87 #define __pcpu_ptr_to_addr(ptr) \
88 (void __force *)((unsigned long)(ptr) + \
89 (unsigned long)pcpu_base_addr - \
90 (unsigned long)__per_cpu_start)
94 struct list_head list; /* linked to pcpu_slot lists */
95 int free_size; /* free bytes in the chunk */
96 int contig_hint; /* max contiguous size hint */
97 void *base_addr; /* base address of this chunk */
98 int map_used; /* # of map entries used */
99 int map_alloc; /* # of map entries allocated */
100 int *map; /* allocation map */
101 void *data; /* chunk data */
102 bool immutable; /* no [de]population allowed */
103 unsigned long populated[]; /* populated bitmap */
106 static int pcpu_unit_pages __read_mostly;
107 static int pcpu_unit_size __read_mostly;
108 static int pcpu_nr_units __read_mostly;
109 static int pcpu_atom_size __read_mostly;
110 static int pcpu_nr_slots __read_mostly;
111 static size_t pcpu_chunk_struct_size __read_mostly;
113 /* cpus with the lowest and highest unit numbers */
114 static unsigned int pcpu_first_unit_cpu __read_mostly;
115 static unsigned int pcpu_last_unit_cpu __read_mostly;
117 /* the address of the first chunk which starts with the kernel static area */
118 void *pcpu_base_addr __read_mostly;
119 EXPORT_SYMBOL_GPL(pcpu_base_addr);
121 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
122 const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
124 /* group information, used for vm allocation */
125 static int pcpu_nr_groups __read_mostly;
126 static const unsigned long *pcpu_group_offsets __read_mostly;
127 static const size_t *pcpu_group_sizes __read_mostly;
130 * The first chunk which always exists. Note that unlike other
131 * chunks, this one can be allocated and mapped in several different
132 * ways and thus often doesn't live in the vmalloc area.
134 static struct pcpu_chunk *pcpu_first_chunk;
137 * Optional reserved chunk. This chunk reserves part of the first
138 * chunk and serves it for reserved allocations. The amount of
139 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
140 * area doesn't exist, the following variables contain NULL and 0
143 static struct pcpu_chunk *pcpu_reserved_chunk;
144 static int pcpu_reserved_chunk_limit;
147 * Synchronization rules.
149 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
150 * protects allocation/reclaim paths, chunks, populated bitmap and
151 * vmalloc mapping. The latter is a spinlock and protects the index
152 * data structures - chunk slots, chunks and area maps in chunks.
154 * During allocation, pcpu_alloc_mutex is kept locked all the time and
155 * pcpu_lock is grabbed and released as necessary. All actual memory
156 * allocations are done using GFP_KERNEL with pcpu_lock released. In
157 * general, percpu memory can't be allocated with irq off but
158 * irqsave/restore are still used in alloc path so that it can be used
159 * from early init path - sched_init() specifically.
161 * Free path accesses and alters only the index data structures, so it
162 * can be safely called from atomic context. When memory needs to be
163 * returned to the system, free path schedules reclaim_work which
164 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
165 * reclaimed, release both locks and frees the chunks. Note that it's
166 * necessary to grab both locks to remove a chunk from circulation as
167 * allocation path might be referencing the chunk with only
168 * pcpu_alloc_mutex locked.
170 static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
171 static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
173 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
175 /* reclaim work to release fully free chunks, scheduled from free path */
176 static void pcpu_reclaim(struct work_struct *work);
177 static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
179 static bool pcpu_addr_in_first_chunk(void *addr)
181 void *first_start = pcpu_first_chunk->base_addr;
183 return addr >= first_start && addr < first_start + pcpu_unit_size;
186 static bool pcpu_addr_in_reserved_chunk(void *addr)
188 void *first_start = pcpu_first_chunk->base_addr;
190 return addr >= first_start &&
191 addr < first_start + pcpu_reserved_chunk_limit;
194 static int __pcpu_size_to_slot(int size)
196 int highbit = fls(size); /* size is in bytes */
197 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
200 static int pcpu_size_to_slot(int size)
202 if (size == pcpu_unit_size)
203 return pcpu_nr_slots - 1;
204 return __pcpu_size_to_slot(size);
207 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
209 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
212 return pcpu_size_to_slot(chunk->free_size);
215 /* set the pointer to a chunk in a page struct */
216 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
218 page->index = (unsigned long)pcpu;
221 /* obtain pointer to a chunk from a page struct */
222 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
224 return (struct pcpu_chunk *)page->index;
227 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
229 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
232 static unsigned long __maybe_unused pcpu_chunk_addr(struct pcpu_chunk *chunk,
233 unsigned int cpu, int page_idx)
235 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
236 (page_idx << PAGE_SHIFT);
239 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
240 unsigned int cpu, int page_idx)
242 /* must not be used on pre-mapped chunk */
243 WARN_ON(chunk->immutable);
245 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
248 static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
249 int *rs, int *re, int end)
251 *rs = find_next_zero_bit(chunk->populated, end, *rs);
252 *re = find_next_bit(chunk->populated, end, *rs + 1);
255 static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
256 int *rs, int *re, int end)
258 *rs = find_next_bit(chunk->populated, end, *rs);
259 *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
263 * (Un)populated page region iterators. Iterate over (un)populated
264 * page regions betwen @start and @end in @chunk. @rs and @re should
265 * be integer variables and will be set to start and end page index of
266 * the current region.
268 #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
269 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
271 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
273 #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
274 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
276 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
279 * pcpu_mem_alloc - allocate memory
280 * @size: bytes to allocate
282 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
283 * kzalloc() is used; otherwise, vmalloc() is used. The returned
284 * memory is always zeroed.
287 * Does GFP_KERNEL allocation.
290 * Pointer to the allocated area on success, NULL on failure.
292 static void *pcpu_mem_alloc(size_t size)
294 if (size <= PAGE_SIZE)
295 return kzalloc(size, GFP_KERNEL);
297 void *ptr = vmalloc(size);
299 memset(ptr, 0, size);
305 * pcpu_mem_free - free memory
306 * @ptr: memory to free
307 * @size: size of the area
309 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
311 static void pcpu_mem_free(void *ptr, size_t size)
313 if (size <= PAGE_SIZE)
320 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
321 * @chunk: chunk of interest
322 * @oslot: the previous slot it was on
324 * This function is called after an allocation or free changed @chunk.
325 * New slot according to the changed state is determined and @chunk is
326 * moved to the slot. Note that the reserved chunk is never put on
332 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
334 int nslot = pcpu_chunk_slot(chunk);
336 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
338 list_move(&chunk->list, &pcpu_slot[nslot]);
340 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
345 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
346 * @chunk: chunk of interest
348 * Determine whether area map of @chunk needs to be extended to
349 * accomodate a new allocation.
355 * New target map allocation length if extension is necessary, 0
358 static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
362 if (chunk->map_alloc >= chunk->map_used + 2)
365 new_alloc = PCPU_DFL_MAP_ALLOC;
366 while (new_alloc < chunk->map_used + 2)
373 * pcpu_extend_area_map - extend area map of a chunk
374 * @chunk: chunk of interest
375 * @new_alloc: new target allocation length of the area map
377 * Extend area map of @chunk to have @new_alloc entries.
380 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
383 * 0 on success, -errno on failure.
385 static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
387 int *old = NULL, *new = NULL;
388 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
391 new = pcpu_mem_alloc(new_size);
395 /* acquire pcpu_lock and switch to new area map */
396 spin_lock_irqsave(&pcpu_lock, flags);
398 if (new_alloc <= chunk->map_alloc)
401 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
402 memcpy(new, chunk->map, old_size);
405 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
406 * one of the first chunks and still using static map.
408 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
411 chunk->map_alloc = new_alloc;
416 spin_unlock_irqrestore(&pcpu_lock, flags);
419 * pcpu_mem_free() might end up calling vfree() which uses
420 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
422 pcpu_mem_free(old, old_size);
423 pcpu_mem_free(new, new_size);
429 * pcpu_split_block - split a map block
430 * @chunk: chunk of interest
431 * @i: index of map block to split
432 * @head: head size in bytes (can be 0)
433 * @tail: tail size in bytes (can be 0)
435 * Split the @i'th map block into two or three blocks. If @head is
436 * non-zero, @head bytes block is inserted before block @i moving it
437 * to @i+1 and reducing its size by @head bytes.
439 * If @tail is non-zero, the target block, which can be @i or @i+1
440 * depending on @head, is reduced by @tail bytes and @tail byte block
441 * is inserted after the target block.
443 * @chunk->map must have enough free slots to accomodate the split.
448 static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
451 int nr_extra = !!head + !!tail;
453 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
455 /* insert new subblocks */
456 memmove(&chunk->map[i + nr_extra], &chunk->map[i],
457 sizeof(chunk->map[0]) * (chunk->map_used - i));
458 chunk->map_used += nr_extra;
461 chunk->map[i + 1] = chunk->map[i] - head;
462 chunk->map[i++] = head;
465 chunk->map[i++] -= tail;
466 chunk->map[i] = tail;
471 * pcpu_alloc_area - allocate area from a pcpu_chunk
472 * @chunk: chunk of interest
473 * @size: wanted size in bytes
474 * @align: wanted align
476 * Try to allocate @size bytes area aligned at @align from @chunk.
477 * Note that this function only allocates the offset. It doesn't
478 * populate or map the area.
480 * @chunk->map must have at least two free slots.
486 * Allocated offset in @chunk on success, -1 if no matching area is
489 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
491 int oslot = pcpu_chunk_slot(chunk);
495 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
496 bool is_last = i + 1 == chunk->map_used;
499 /* extra for alignment requirement */
500 head = ALIGN(off, align) - off;
501 BUG_ON(i == 0 && head != 0);
503 if (chunk->map[i] < 0)
505 if (chunk->map[i] < head + size) {
506 max_contig = max(chunk->map[i], max_contig);
511 * If head is small or the previous block is free,
512 * merge'em. Note that 'small' is defined as smaller
513 * than sizeof(int), which is very small but isn't too
514 * uncommon for percpu allocations.
516 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
517 if (chunk->map[i - 1] > 0)
518 chunk->map[i - 1] += head;
520 chunk->map[i - 1] -= head;
521 chunk->free_size -= head;
523 chunk->map[i] -= head;
528 /* if tail is small, just keep it around */
529 tail = chunk->map[i] - head - size;
530 if (tail < sizeof(int))
533 /* split if warranted */
535 pcpu_split_block(chunk, i, head, tail);
539 max_contig = max(chunk->map[i - 1], max_contig);
542 max_contig = max(chunk->map[i + 1], max_contig);
545 /* update hint and mark allocated */
547 chunk->contig_hint = max_contig; /* fully scanned */
549 chunk->contig_hint = max(chunk->contig_hint,
552 chunk->free_size -= chunk->map[i];
553 chunk->map[i] = -chunk->map[i];
555 pcpu_chunk_relocate(chunk, oslot);
559 chunk->contig_hint = max_contig; /* fully scanned */
560 pcpu_chunk_relocate(chunk, oslot);
562 /* tell the upper layer that this chunk has no matching area */
567 * pcpu_free_area - free area to a pcpu_chunk
568 * @chunk: chunk of interest
569 * @freeme: offset of area to free
571 * Free area starting from @freeme to @chunk. Note that this function
572 * only modifies the allocation map. It doesn't depopulate or unmap
578 static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
580 int oslot = pcpu_chunk_slot(chunk);
583 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
586 BUG_ON(off != freeme);
587 BUG_ON(chunk->map[i] > 0);
589 chunk->map[i] = -chunk->map[i];
590 chunk->free_size += chunk->map[i];
592 /* merge with previous? */
593 if (i > 0 && chunk->map[i - 1] >= 0) {
594 chunk->map[i - 1] += chunk->map[i];
596 memmove(&chunk->map[i], &chunk->map[i + 1],
597 (chunk->map_used - i) * sizeof(chunk->map[0]));
600 /* merge with next? */
601 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
602 chunk->map[i] += chunk->map[i + 1];
604 memmove(&chunk->map[i + 1], &chunk->map[i + 2],
605 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
608 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
609 pcpu_chunk_relocate(chunk, oslot);
612 static struct pcpu_chunk *pcpu_alloc_chunk(void)
614 struct pcpu_chunk *chunk;
616 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
620 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
626 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
627 chunk->map[chunk->map_used++] = pcpu_unit_size;
629 INIT_LIST_HEAD(&chunk->list);
630 chunk->free_size = pcpu_unit_size;
631 chunk->contig_hint = pcpu_unit_size;
636 static void pcpu_free_chunk(struct pcpu_chunk *chunk)
640 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
645 * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
646 * @chunk: chunk of interest
647 * @bitmapp: output parameter for bitmap
648 * @may_alloc: may allocate the array
650 * Returns pointer to array of pointers to struct page and bitmap,
651 * both of which can be indexed with pcpu_page_idx(). The returned
652 * array is cleared to zero and *@bitmapp is copied from
653 * @chunk->populated. Note that there is only one array and bitmap
654 * and access exclusion is the caller's responsibility.
657 * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc.
658 * Otherwise, don't care.
661 * Pointer to temp pages array on success, NULL on failure.
663 static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
664 unsigned long **bitmapp,
667 static struct page **pages;
668 static unsigned long *bitmap;
669 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
670 size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) *
671 sizeof(unsigned long);
673 if (!pages || !bitmap) {
674 if (may_alloc && !pages)
675 pages = pcpu_mem_alloc(pages_size);
676 if (may_alloc && !bitmap)
677 bitmap = pcpu_mem_alloc(bitmap_size);
678 if (!pages || !bitmap)
682 memset(pages, 0, pages_size);
683 bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
690 * pcpu_free_pages - free pages which were allocated for @chunk
691 * @chunk: chunk pages were allocated for
692 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
693 * @populated: populated bitmap
694 * @page_start: page index of the first page to be freed
695 * @page_end: page index of the last page to be freed + 1
697 * Free pages [@page_start and @page_end) in @pages for all units.
698 * The pages were allocated for @chunk.
700 static void pcpu_free_pages(struct pcpu_chunk *chunk,
701 struct page **pages, unsigned long *populated,
702 int page_start, int page_end)
707 for_each_possible_cpu(cpu) {
708 for (i = page_start; i < page_end; i++) {
709 struct page *page = pages[pcpu_page_idx(cpu, i)];
718 * pcpu_alloc_pages - allocates pages for @chunk
719 * @chunk: target chunk
720 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
721 * @populated: populated bitmap
722 * @page_start: page index of the first page to be allocated
723 * @page_end: page index of the last page to be allocated + 1
725 * Allocate pages [@page_start,@page_end) into @pages for all units.
726 * The allocation is for @chunk. Percpu core doesn't care about the
727 * content of @pages and will pass it verbatim to pcpu_map_pages().
729 static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
730 struct page **pages, unsigned long *populated,
731 int page_start, int page_end)
733 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
737 for_each_possible_cpu(cpu) {
738 for (i = page_start; i < page_end; i++) {
739 struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
741 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
743 pcpu_free_pages(chunk, pages, populated,
744 page_start, page_end);
753 * pcpu_pre_unmap_flush - flush cache prior to unmapping
754 * @chunk: chunk the regions to be flushed belongs to
755 * @page_start: page index of the first page to be flushed
756 * @page_end: page index of the last page to be flushed + 1
758 * Pages in [@page_start,@page_end) of @chunk are about to be
759 * unmapped. Flush cache. As each flushing trial can be very
760 * expensive, issue flush on the whole region at once rather than
761 * doing it for each cpu. This could be an overkill but is more
764 static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
765 int page_start, int page_end)
768 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
769 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
772 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
774 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
778 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
779 * @chunk: chunk of interest
780 * @pages: pages array which can be used to pass information to free
781 * @populated: populated bitmap
782 * @page_start: page index of the first page to unmap
783 * @page_end: page index of the last page to unmap + 1
785 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
786 * Corresponding elements in @pages were cleared by the caller and can
787 * be used to carry information to pcpu_free_pages() which will be
788 * called after all unmaps are finished. The caller should call
789 * proper pre/post flush functions.
791 static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
792 struct page **pages, unsigned long *populated,
793 int page_start, int page_end)
798 for_each_possible_cpu(cpu) {
799 for (i = page_start; i < page_end; i++) {
802 page = pcpu_chunk_page(chunk, cpu, i);
804 pages[pcpu_page_idx(cpu, i)] = page;
806 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
807 page_end - page_start);
810 for (i = page_start; i < page_end; i++)
811 __clear_bit(i, populated);
815 * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
816 * @chunk: pcpu_chunk the regions to be flushed belong to
817 * @page_start: page index of the first page to be flushed
818 * @page_end: page index of the last page to be flushed + 1
820 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
821 * TLB for the regions. This can be skipped if the area is to be
822 * returned to vmalloc as vmalloc will handle TLB flushing lazily.
824 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
825 * for the whole region.
827 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
828 int page_start, int page_end)
830 flush_tlb_kernel_range(
831 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
832 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
835 static int __pcpu_map_pages(unsigned long addr, struct page **pages,
838 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
843 * pcpu_map_pages - map pages into a pcpu_chunk
844 * @chunk: chunk of interest
845 * @pages: pages array containing pages to be mapped
846 * @populated: populated bitmap
847 * @page_start: page index of the first page to map
848 * @page_end: page index of the last page to map + 1
850 * For each cpu, map pages [@page_start,@page_end) into @chunk. The
851 * caller is responsible for calling pcpu_post_map_flush() after all
852 * mappings are complete.
854 * This function is responsible for setting corresponding bits in
855 * @chunk->populated bitmap and whatever is necessary for reverse
856 * lookup (addr -> chunk).
858 static int pcpu_map_pages(struct pcpu_chunk *chunk,
859 struct page **pages, unsigned long *populated,
860 int page_start, int page_end)
862 unsigned int cpu, tcpu;
865 for_each_possible_cpu(cpu) {
866 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
867 &pages[pcpu_page_idx(cpu, page_start)],
868 page_end - page_start);
873 /* mapping successful, link chunk and mark populated */
874 for (i = page_start; i < page_end; i++) {
875 for_each_possible_cpu(cpu)
876 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
878 __set_bit(i, populated);
884 for_each_possible_cpu(tcpu) {
887 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
888 page_end - page_start);
894 * pcpu_post_map_flush - flush cache after mapping
895 * @chunk: pcpu_chunk the regions to be flushed belong to
896 * @page_start: page index of the first page to be flushed
897 * @page_end: page index of the last page to be flushed + 1
899 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
902 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
903 * for the whole region.
905 static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
906 int page_start, int page_end)
909 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
910 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
914 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
915 * @chunk: chunk to depopulate
916 * @off: offset to the area to depopulate
917 * @size: size of the area to depopulate in bytes
918 * @flush: whether to flush cache and tlb or not
920 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
921 * from @chunk. If @flush is true, vcache is flushed before unmapping
927 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
929 int page_start = PFN_DOWN(off);
930 int page_end = PFN_UP(off + size);
932 unsigned long *populated;
935 /* quick path, check whether it's empty already */
937 pcpu_next_unpop(chunk, &rs, &re, page_end);
938 if (rs == page_start && re == page_end)
941 /* immutable chunks can't be depopulated */
942 WARN_ON(chunk->immutable);
945 * If control reaches here, there must have been at least one
946 * successful population attempt so the temp pages array must
949 pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
953 pcpu_pre_unmap_flush(chunk, page_start, page_end);
955 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
956 pcpu_unmap_pages(chunk, pages, populated, rs, re);
958 /* no need to flush tlb, vmalloc will handle it lazily */
960 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
961 pcpu_free_pages(chunk, pages, populated, rs, re);
963 /* commit new bitmap */
964 bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
968 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
969 * @chunk: chunk of interest
970 * @off: offset to the area to populate
971 * @size: size of the area to populate in bytes
973 * For each cpu, populate and map pages [@page_start,@page_end) into
974 * @chunk. The area is cleared on return.
977 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
979 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
981 int page_start = PFN_DOWN(off);
982 int page_end = PFN_UP(off + size);
983 int free_end = page_start, unmap_end = page_start;
985 unsigned long *populated;
989 /* quick path, check whether all pages are already there */
991 pcpu_next_pop(chunk, &rs, &re, page_end);
992 if (rs == page_start && re == page_end)
995 /* need to allocate and map pages, this chunk can't be immutable */
996 WARN_ON(chunk->immutable);
998 pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
1003 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
1004 rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
1010 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
1011 rc = pcpu_map_pages(chunk, pages, populated, rs, re);
1016 pcpu_post_map_flush(chunk, page_start, page_end);
1018 /* commit new bitmap */
1019 bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
1021 for_each_possible_cpu(cpu)
1022 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1026 pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
1027 pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
1028 pcpu_unmap_pages(chunk, pages, populated, rs, re);
1029 pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
1031 pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
1032 pcpu_free_pages(chunk, pages, populated, rs, re);
1036 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
1038 if (chunk && chunk->data)
1039 pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
1040 pcpu_free_chunk(chunk);
1043 static struct pcpu_chunk *pcpu_create_chunk(void)
1045 struct pcpu_chunk *chunk;
1046 struct vm_struct **vms;
1048 chunk = pcpu_alloc_chunk();
1052 vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
1053 pcpu_nr_groups, pcpu_atom_size, GFP_KERNEL);
1055 pcpu_free_chunk(chunk);
1060 chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
1065 * pcpu_chunk_addr_search - determine chunk containing specified address
1066 * @addr: address for which the chunk needs to be determined.
1069 * The address of the found chunk.
1071 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1073 /* is it in the first chunk? */
1074 if (pcpu_addr_in_first_chunk(addr)) {
1075 /* is it in the reserved area? */
1076 if (pcpu_addr_in_reserved_chunk(addr))
1077 return pcpu_reserved_chunk;
1078 return pcpu_first_chunk;
1082 * The address is relative to unit0 which might be unused and
1083 * thus unmapped. Offset the address to the unit space of the
1084 * current processor before looking it up in the vmalloc
1085 * space. Note that any possible cpu id can be used here, so
1086 * there's no need to worry about preemption or cpu hotplug.
1088 addr += pcpu_unit_offsets[raw_smp_processor_id()];
1089 return pcpu_get_page_chunk(vmalloc_to_page(addr));
1093 * pcpu_alloc - the percpu allocator
1094 * @size: size of area to allocate in bytes
1095 * @align: alignment of area (max PAGE_SIZE)
1096 * @reserved: allocate from the reserved chunk if available
1098 * Allocate percpu area of @size bytes aligned at @align.
1101 * Does GFP_KERNEL allocation.
1104 * Percpu pointer to the allocated area on success, NULL on failure.
1106 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
1108 static int warn_limit = 10;
1109 struct pcpu_chunk *chunk;
1111 int slot, off, new_alloc;
1112 unsigned long flags;
1114 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
1115 WARN(true, "illegal size (%zu) or align (%zu) for "
1116 "percpu allocation\n", size, align);
1120 mutex_lock(&pcpu_alloc_mutex);
1121 spin_lock_irqsave(&pcpu_lock, flags);
1123 /* serve reserved allocations from the reserved chunk if available */
1124 if (reserved && pcpu_reserved_chunk) {
1125 chunk = pcpu_reserved_chunk;
1127 if (size > chunk->contig_hint) {
1128 err = "alloc from reserved chunk failed";
1132 while ((new_alloc = pcpu_need_to_extend(chunk))) {
1133 spin_unlock_irqrestore(&pcpu_lock, flags);
1134 if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
1135 err = "failed to extend area map of reserved chunk";
1136 goto fail_unlock_mutex;
1138 spin_lock_irqsave(&pcpu_lock, flags);
1141 off = pcpu_alloc_area(chunk, size, align);
1145 err = "alloc from reserved chunk failed";
1150 /* search through normal chunks */
1151 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
1152 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1153 if (size > chunk->contig_hint)
1156 new_alloc = pcpu_need_to_extend(chunk);
1158 spin_unlock_irqrestore(&pcpu_lock, flags);
1159 if (pcpu_extend_area_map(chunk,
1161 err = "failed to extend area map";
1162 goto fail_unlock_mutex;
1164 spin_lock_irqsave(&pcpu_lock, flags);
1166 * pcpu_lock has been dropped, need to
1167 * restart cpu_slot list walking.
1172 off = pcpu_alloc_area(chunk, size, align);
1178 /* hmmm... no space left, create a new chunk */
1179 spin_unlock_irqrestore(&pcpu_lock, flags);
1181 chunk = pcpu_create_chunk();
1183 err = "failed to allocate new chunk";
1184 goto fail_unlock_mutex;
1187 spin_lock_irqsave(&pcpu_lock, flags);
1188 pcpu_chunk_relocate(chunk, -1);
1192 spin_unlock_irqrestore(&pcpu_lock, flags);
1194 /* populate, map and clear the area */
1195 if (pcpu_populate_chunk(chunk, off, size)) {
1196 spin_lock_irqsave(&pcpu_lock, flags);
1197 pcpu_free_area(chunk, off);
1198 err = "failed to populate";
1202 mutex_unlock(&pcpu_alloc_mutex);
1204 /* return address relative to base address */
1205 return __addr_to_pcpu_ptr(chunk->base_addr + off);
1208 spin_unlock_irqrestore(&pcpu_lock, flags);
1210 mutex_unlock(&pcpu_alloc_mutex);
1212 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
1213 "%s\n", size, align, err);
1216 pr_info("PERCPU: limit reached, disable warning\n");
1222 * __alloc_percpu - allocate dynamic percpu area
1223 * @size: size of area to allocate in bytes
1224 * @align: alignment of area (max PAGE_SIZE)
1226 * Allocate percpu area of @size bytes aligned at @align. Might
1227 * sleep. Might trigger writeouts.
1230 * Does GFP_KERNEL allocation.
1233 * Percpu pointer to the allocated area on success, NULL on failure.
1235 void __percpu *__alloc_percpu(size_t size, size_t align)
1237 return pcpu_alloc(size, align, false);
1239 EXPORT_SYMBOL_GPL(__alloc_percpu);
1242 * __alloc_reserved_percpu - allocate reserved percpu area
1243 * @size: size of area to allocate in bytes
1244 * @align: alignment of area (max PAGE_SIZE)
1246 * Allocate percpu area of @size bytes aligned at @align from reserved
1247 * percpu area if arch has set it up; otherwise, allocation is served
1248 * from the same dynamic area. Might sleep. Might trigger writeouts.
1251 * Does GFP_KERNEL allocation.
1254 * Percpu pointer to the allocated area on success, NULL on failure.
1256 void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1258 return pcpu_alloc(size, align, true);
1262 * pcpu_reclaim - reclaim fully free chunks, workqueue function
1265 * Reclaim all fully free chunks except for the first one.
1268 * workqueue context.
1270 static void pcpu_reclaim(struct work_struct *work)
1273 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
1274 struct pcpu_chunk *chunk, *next;
1276 mutex_lock(&pcpu_alloc_mutex);
1277 spin_lock_irq(&pcpu_lock);
1279 list_for_each_entry_safe(chunk, next, head, list) {
1280 WARN_ON(chunk->immutable);
1282 /* spare the first one */
1283 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
1286 list_move(&chunk->list, &todo);
1289 spin_unlock_irq(&pcpu_lock);
1291 list_for_each_entry_safe(chunk, next, &todo, list) {
1292 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
1293 pcpu_destroy_chunk(chunk);
1296 mutex_unlock(&pcpu_alloc_mutex);
1300 * free_percpu - free percpu area
1301 * @ptr: pointer to area to free
1303 * Free percpu area @ptr.
1306 * Can be called from atomic context.
1308 void free_percpu(void __percpu *ptr)
1311 struct pcpu_chunk *chunk;
1312 unsigned long flags;
1318 addr = __pcpu_ptr_to_addr(ptr);
1320 spin_lock_irqsave(&pcpu_lock, flags);
1322 chunk = pcpu_chunk_addr_search(addr);
1323 off = addr - chunk->base_addr;
1325 pcpu_free_area(chunk, off);
1327 /* if there are more than one fully free chunks, wake up grim reaper */
1328 if (chunk->free_size == pcpu_unit_size) {
1329 struct pcpu_chunk *pos;
1331 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1333 schedule_work(&pcpu_reclaim_work);
1338 spin_unlock_irqrestore(&pcpu_lock, flags);
1340 EXPORT_SYMBOL_GPL(free_percpu);
1343 * is_kernel_percpu_address - test whether address is from static percpu area
1344 * @addr: address to test
1346 * Test whether @addr belongs to in-kernel static percpu area. Module
1347 * static percpu areas are not considered. For those, use
1348 * is_module_percpu_address().
1351 * %true if @addr is from in-kernel static percpu area, %false otherwise.
1353 bool is_kernel_percpu_address(unsigned long addr)
1355 const size_t static_size = __per_cpu_end - __per_cpu_start;
1356 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1359 for_each_possible_cpu(cpu) {
1360 void *start = per_cpu_ptr(base, cpu);
1362 if ((void *)addr >= start && (void *)addr < start + static_size)
1369 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1370 * @addr: the address to be converted to physical address
1372 * Given @addr which is dereferenceable address obtained via one of
1373 * percpu access macros, this function translates it into its physical
1374 * address. The caller is responsible for ensuring @addr stays valid
1375 * until this function finishes.
1378 * The physical address for @addr.
1380 phys_addr_t per_cpu_ptr_to_phys(void *addr)
1382 if (pcpu_addr_in_first_chunk(addr)) {
1383 if ((unsigned long)addr < VMALLOC_START ||
1384 (unsigned long)addr >= VMALLOC_END)
1387 return page_to_phys(vmalloc_to_page(addr));
1389 return page_to_phys(vmalloc_to_page(addr));
1392 static inline size_t pcpu_calc_fc_sizes(size_t static_size,
1393 size_t reserved_size,
1398 size_sum = PFN_ALIGN(static_size + reserved_size +
1399 (*dyn_sizep >= 0 ? *dyn_sizep : 0));
1400 if (*dyn_sizep != 0)
1401 *dyn_sizep = size_sum - static_size - reserved_size;
1407 * pcpu_alloc_alloc_info - allocate percpu allocation info
1408 * @nr_groups: the number of groups
1409 * @nr_units: the number of units
1411 * Allocate ai which is large enough for @nr_groups groups containing
1412 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1413 * cpu_map array which is long enough for @nr_units and filled with
1414 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1415 * pointer of other groups.
1418 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1421 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1424 struct pcpu_alloc_info *ai;
1425 size_t base_size, ai_size;
1429 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1430 __alignof__(ai->groups[0].cpu_map[0]));
1431 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1433 ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1439 ai->groups[0].cpu_map = ptr;
1441 for (unit = 0; unit < nr_units; unit++)
1442 ai->groups[0].cpu_map[unit] = NR_CPUS;
1444 ai->nr_groups = nr_groups;
1445 ai->__ai_size = PFN_ALIGN(ai_size);
1451 * pcpu_free_alloc_info - free percpu allocation info
1452 * @ai: pcpu_alloc_info to free
1454 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1456 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1458 free_bootmem(__pa(ai), ai->__ai_size);
1462 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1463 * @reserved_size: the size of reserved percpu area in bytes
1464 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1465 * @atom_size: allocation atom size
1466 * @cpu_distance_fn: callback to determine distance between cpus, optional
1468 * This function determines grouping of units, their mappings to cpus
1469 * and other parameters considering needed percpu size, allocation
1470 * atom size and distances between CPUs.
1472 * Groups are always mutliples of atom size and CPUs which are of
1473 * LOCAL_DISTANCE both ways are grouped together and share space for
1474 * units in the same group. The returned configuration is guaranteed
1475 * to have CPUs on different nodes on different groups and >=75% usage
1476 * of allocated virtual address space.
1479 * On success, pointer to the new allocation_info is returned. On
1480 * failure, ERR_PTR value is returned.
1482 struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1483 size_t reserved_size, ssize_t dyn_size,
1485 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1487 static int group_map[NR_CPUS] __initdata;
1488 static int group_cnt[NR_CPUS] __initdata;
1489 const size_t static_size = __per_cpu_end - __per_cpu_start;
1490 int group_cnt_max = 0, nr_groups = 1, nr_units = 0;
1491 size_t size_sum, min_unit_size, alloc_size;
1492 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
1493 int last_allocs, group, unit;
1494 unsigned int cpu, tcpu;
1495 struct pcpu_alloc_info *ai;
1496 unsigned int *cpu_map;
1498 /* this function may be called multiple times */
1499 memset(group_map, 0, sizeof(group_map));
1500 memset(group_cnt, 0, sizeof(group_map));
1503 * Determine min_unit_size, alloc_size and max_upa such that
1504 * alloc_size is multiple of atom_size and is the smallest
1505 * which can accomodate 4k aligned segments which are equal to
1506 * or larger than min_unit_size.
1508 size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
1509 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1511 alloc_size = roundup(min_unit_size, atom_size);
1512 upa = alloc_size / min_unit_size;
1513 while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1517 /* group cpus according to their proximity */
1518 for_each_possible_cpu(cpu) {
1521 for_each_possible_cpu(tcpu) {
1524 if (group_map[tcpu] == group && cpu_distance_fn &&
1525 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1526 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1528 nr_groups = max(nr_groups, group + 1);
1532 group_map[cpu] = group;
1534 group_cnt_max = max(group_cnt_max, group_cnt[group]);
1538 * Expand unit size until address space usage goes over 75%
1539 * and then as much as possible without using more address
1542 last_allocs = INT_MAX;
1543 for (upa = max_upa; upa; upa--) {
1544 int allocs = 0, wasted = 0;
1546 if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1549 for (group = 0; group < nr_groups; group++) {
1550 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1551 allocs += this_allocs;
1552 wasted += this_allocs * upa - group_cnt[group];
1556 * Don't accept if wastage is over 25%. The
1557 * greater-than comparison ensures upa==1 always
1558 * passes the following check.
1560 if (wasted > num_possible_cpus() / 3)
1563 /* and then don't consume more memory */
1564 if (allocs > last_allocs)
1566 last_allocs = allocs;
1571 /* allocate and fill alloc_info */
1572 for (group = 0; group < nr_groups; group++)
1573 nr_units += roundup(group_cnt[group], upa);
1575 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1577 return ERR_PTR(-ENOMEM);
1578 cpu_map = ai->groups[0].cpu_map;
1580 for (group = 0; group < nr_groups; group++) {
1581 ai->groups[group].cpu_map = cpu_map;
1582 cpu_map += roundup(group_cnt[group], upa);
1585 ai->static_size = static_size;
1586 ai->reserved_size = reserved_size;
1587 ai->dyn_size = dyn_size;
1588 ai->unit_size = alloc_size / upa;
1589 ai->atom_size = atom_size;
1590 ai->alloc_size = alloc_size;
1592 for (group = 0, unit = 0; group_cnt[group]; group++) {
1593 struct pcpu_group_info *gi = &ai->groups[group];
1596 * Initialize base_offset as if all groups are located
1597 * back-to-back. The caller should update this to
1598 * reflect actual allocation.
1600 gi->base_offset = unit * ai->unit_size;
1602 for_each_possible_cpu(cpu)
1603 if (group_map[cpu] == group)
1604 gi->cpu_map[gi->nr_units++] = cpu;
1605 gi->nr_units = roundup(gi->nr_units, upa);
1606 unit += gi->nr_units;
1608 BUG_ON(unit != nr_units);
1614 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1616 * @ai: allocation info to dump
1618 * Print out information about @ai using loglevel @lvl.
1620 static void pcpu_dump_alloc_info(const char *lvl,
1621 const struct pcpu_alloc_info *ai)
1623 int group_width = 1, cpu_width = 1, width;
1624 char empty_str[] = "--------";
1625 int alloc = 0, alloc_end = 0;
1627 int upa, apl; /* units per alloc, allocs per line */
1633 v = num_possible_cpus();
1636 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1638 upa = ai->alloc_size / ai->unit_size;
1639 width = upa * (cpu_width + 1) + group_width + 3;
1640 apl = rounddown_pow_of_two(max(60 / width, 1));
1642 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1643 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1644 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1646 for (group = 0; group < ai->nr_groups; group++) {
1647 const struct pcpu_group_info *gi = &ai->groups[group];
1648 int unit = 0, unit_end = 0;
1650 BUG_ON(gi->nr_units % upa);
1651 for (alloc_end += gi->nr_units / upa;
1652 alloc < alloc_end; alloc++) {
1653 if (!(alloc % apl)) {
1655 printk("%spcpu-alloc: ", lvl);
1657 printk("[%0*d] ", group_width, group);
1659 for (unit_end += upa; unit < unit_end; unit++)
1660 if (gi->cpu_map[unit] != NR_CPUS)
1661 printk("%0*d ", cpu_width,
1664 printk("%s ", empty_str);
1671 * pcpu_setup_first_chunk - initialize the first percpu chunk
1672 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1673 * @base_addr: mapped address
1675 * Initialize the first percpu chunk which contains the kernel static
1676 * perpcu area. This function is to be called from arch percpu area
1679 * @ai contains all information necessary to initialize the first
1680 * chunk and prime the dynamic percpu allocator.
1682 * @ai->static_size is the size of static percpu area.
1684 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1685 * reserve after the static area in the first chunk. This reserves
1686 * the first chunk such that it's available only through reserved
1687 * percpu allocation. This is primarily used to serve module percpu
1688 * static areas on architectures where the addressing model has
1689 * limited offset range for symbol relocations to guarantee module
1690 * percpu symbols fall inside the relocatable range.
1692 * @ai->dyn_size determines the number of bytes available for dynamic
1693 * allocation in the first chunk. The area between @ai->static_size +
1694 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1696 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1697 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1700 * @ai->atom_size is the allocation atom size and used as alignment
1703 * @ai->alloc_size is the allocation size and always multiple of
1704 * @ai->atom_size. This is larger than @ai->atom_size if
1705 * @ai->unit_size is larger than @ai->atom_size.
1707 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1708 * percpu areas. Units which should be colocated are put into the
1709 * same group. Dynamic VM areas will be allocated according to these
1710 * groupings. If @ai->nr_groups is zero, a single group containing
1711 * all units is assumed.
1713 * The caller should have mapped the first chunk at @base_addr and
1714 * copied static data to each unit.
1716 * If the first chunk ends up with both reserved and dynamic areas, it
1717 * is served by two chunks - one to serve the core static and reserved
1718 * areas and the other for the dynamic area. They share the same vm
1719 * and page map but uses different area allocation map to stay away
1720 * from each other. The latter chunk is circulated in the chunk slots
1721 * and available for dynamic allocation like any other chunks.
1724 * 0 on success, -errno on failure.
1726 int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1729 static char cpus_buf[4096] __initdata;
1730 static int smap[2], dmap[2];
1731 size_t dyn_size = ai->dyn_size;
1732 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1733 struct pcpu_chunk *schunk, *dchunk = NULL;
1734 unsigned long *group_offsets;
1735 size_t *group_sizes;
1736 unsigned long *unit_off;
1741 cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1743 #define PCPU_SETUP_BUG_ON(cond) do { \
1744 if (unlikely(cond)) { \
1745 pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1746 pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1747 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1753 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1754 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1755 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1756 PCPU_SETUP_BUG_ON(!ai->static_size);
1757 PCPU_SETUP_BUG_ON(!base_addr);
1758 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1759 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1760 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1762 /* process group information and build config tables accordingly */
1763 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1764 group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1765 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1766 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1768 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1769 unit_map[cpu] = UINT_MAX;
1770 pcpu_first_unit_cpu = NR_CPUS;
1772 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1773 const struct pcpu_group_info *gi = &ai->groups[group];
1775 group_offsets[group] = gi->base_offset;
1776 group_sizes[group] = gi->nr_units * ai->unit_size;
1778 for (i = 0; i < gi->nr_units; i++) {
1779 cpu = gi->cpu_map[i];
1783 PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1784 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1785 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1787 unit_map[cpu] = unit + i;
1788 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1790 if (pcpu_first_unit_cpu == NR_CPUS)
1791 pcpu_first_unit_cpu = cpu;
1794 pcpu_last_unit_cpu = cpu;
1795 pcpu_nr_units = unit;
1797 for_each_possible_cpu(cpu)
1798 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1800 /* we're done parsing the input, undefine BUG macro and dump config */
1801 #undef PCPU_SETUP_BUG_ON
1802 pcpu_dump_alloc_info(KERN_INFO, ai);
1804 pcpu_nr_groups = ai->nr_groups;
1805 pcpu_group_offsets = group_offsets;
1806 pcpu_group_sizes = group_sizes;
1807 pcpu_unit_map = unit_map;
1808 pcpu_unit_offsets = unit_off;
1810 /* determine basic parameters */
1811 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1812 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1813 pcpu_atom_size = ai->atom_size;
1814 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1815 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1818 * Allocate chunk slots. The additional last slot is for
1821 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1822 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1823 for (i = 0; i < pcpu_nr_slots; i++)
1824 INIT_LIST_HEAD(&pcpu_slot[i]);
1827 * Initialize static chunk. If reserved_size is zero, the
1828 * static chunk covers static area + dynamic allocation area
1829 * in the first chunk. If reserved_size is not zero, it
1830 * covers static area + reserved area (mostly used for module
1831 * static percpu allocation).
1833 schunk = alloc_bootmem(pcpu_chunk_struct_size);
1834 INIT_LIST_HEAD(&schunk->list);
1835 schunk->base_addr = base_addr;
1837 schunk->map_alloc = ARRAY_SIZE(smap);
1838 schunk->immutable = true;
1839 bitmap_fill(schunk->populated, pcpu_unit_pages);
1841 if (ai->reserved_size) {
1842 schunk->free_size = ai->reserved_size;
1843 pcpu_reserved_chunk = schunk;
1844 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1846 schunk->free_size = dyn_size;
1847 dyn_size = 0; /* dynamic area covered */
1849 schunk->contig_hint = schunk->free_size;
1851 schunk->map[schunk->map_used++] = -ai->static_size;
1852 if (schunk->free_size)
1853 schunk->map[schunk->map_used++] = schunk->free_size;
1855 /* init dynamic chunk if necessary */
1857 dchunk = alloc_bootmem(pcpu_chunk_struct_size);
1858 INIT_LIST_HEAD(&dchunk->list);
1859 dchunk->base_addr = base_addr;
1861 dchunk->map_alloc = ARRAY_SIZE(dmap);
1862 dchunk->immutable = true;
1863 bitmap_fill(dchunk->populated, pcpu_unit_pages);
1865 dchunk->contig_hint = dchunk->free_size = dyn_size;
1866 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1867 dchunk->map[dchunk->map_used++] = dchunk->free_size;
1870 /* link the first chunk in */
1871 pcpu_first_chunk = dchunk ?: schunk;
1872 pcpu_chunk_relocate(pcpu_first_chunk, -1);
1875 pcpu_base_addr = base_addr;
1879 const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1880 [PCPU_FC_AUTO] = "auto",
1881 [PCPU_FC_EMBED] = "embed",
1882 [PCPU_FC_PAGE] = "page",
1885 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1887 static int __init percpu_alloc_setup(char *str)
1891 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1892 else if (!strcmp(str, "embed"))
1893 pcpu_chosen_fc = PCPU_FC_EMBED;
1895 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1896 else if (!strcmp(str, "page"))
1897 pcpu_chosen_fc = PCPU_FC_PAGE;
1900 pr_warning("PERCPU: unknown allocator %s specified\n", str);
1904 early_param("percpu_alloc", percpu_alloc_setup);
1906 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1907 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1909 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1910 * @reserved_size: the size of reserved percpu area in bytes
1911 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1912 * @atom_size: allocation atom size
1913 * @cpu_distance_fn: callback to determine distance between cpus, optional
1914 * @alloc_fn: function to allocate percpu page
1915 * @free_fn: funtion to free percpu page
1917 * This is a helper to ease setting up embedded first percpu chunk and
1918 * can be called where pcpu_setup_first_chunk() is expected.
1920 * If this function is used to setup the first chunk, it is allocated
1921 * by calling @alloc_fn and used as-is without being mapped into
1922 * vmalloc area. Allocations are always whole multiples of @atom_size
1923 * aligned to @atom_size.
1925 * This enables the first chunk to piggy back on the linear physical
1926 * mapping which often uses larger page size. Please note that this
1927 * can result in very sparse cpu->unit mapping on NUMA machines thus
1928 * requiring large vmalloc address space. Don't use this allocator if
1929 * vmalloc space is not orders of magnitude larger than distances
1930 * between node memory addresses (ie. 32bit NUMA machines).
1932 * When @dyn_size is positive, dynamic area might be larger than
1933 * specified to fill page alignment. When @dyn_size is auto,
1934 * @dyn_size is just big enough to fill page alignment after static
1935 * and reserved areas.
1937 * If the needed size is smaller than the minimum or specified unit
1938 * size, the leftover is returned using @free_fn.
1941 * 0 on success, -errno on failure.
1943 int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1945 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1946 pcpu_fc_alloc_fn_t alloc_fn,
1947 pcpu_fc_free_fn_t free_fn)
1949 void *base = (void *)ULONG_MAX;
1950 void **areas = NULL;
1951 struct pcpu_alloc_info *ai;
1952 size_t size_sum, areas_size, max_distance;
1955 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1960 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1961 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1963 areas = alloc_bootmem_nopanic(areas_size);
1969 /* allocate, copy and determine base address */
1970 for (group = 0; group < ai->nr_groups; group++) {
1971 struct pcpu_group_info *gi = &ai->groups[group];
1972 unsigned int cpu = NR_CPUS;
1975 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1976 cpu = gi->cpu_map[i];
1977 BUG_ON(cpu == NR_CPUS);
1979 /* allocate space for the whole group */
1980 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1983 goto out_free_areas;
1987 base = min(ptr, base);
1989 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1990 if (gi->cpu_map[i] == NR_CPUS) {
1991 /* unused unit, free whole */
1992 free_fn(ptr, ai->unit_size);
1995 /* copy and return the unused part */
1996 memcpy(ptr, __per_cpu_load, ai->static_size);
1997 free_fn(ptr + size_sum, ai->unit_size - size_sum);
2001 /* base address is now known, determine group base offsets */
2003 for (group = 0; group < ai->nr_groups; group++) {
2004 ai->groups[group].base_offset = areas[group] - base;
2005 max_distance = max_t(size_t, max_distance,
2006 ai->groups[group].base_offset);
2008 max_distance += ai->unit_size;
2010 /* warn if maximum distance is further than 75% of vmalloc space */
2011 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
2012 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
2014 max_distance, VMALLOC_END - VMALLOC_START);
2015 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2016 /* and fail if we have fallback */
2022 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2023 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
2024 ai->dyn_size, ai->unit_size);
2026 rc = pcpu_setup_first_chunk(ai, base);
2030 for (group = 0; group < ai->nr_groups; group++)
2031 free_fn(areas[group],
2032 ai->groups[group].nr_units * ai->unit_size);
2034 pcpu_free_alloc_info(ai);
2036 free_bootmem(__pa(areas), areas_size);
2039 #endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
2040 !CONFIG_HAVE_SETUP_PER_CPU_AREA */
2042 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2044 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2045 * @reserved_size: the size of reserved percpu area in bytes
2046 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2047 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
2048 * @populate_pte_fn: function to populate pte
2050 * This is a helper to ease setting up page-remapped first percpu
2051 * chunk and can be called where pcpu_setup_first_chunk() is expected.
2053 * This is the basic allocator. Static percpu area is allocated
2054 * page-by-page into vmalloc area.
2057 * 0 on success, -errno on failure.
2059 int __init pcpu_page_first_chunk(size_t reserved_size,
2060 pcpu_fc_alloc_fn_t alloc_fn,
2061 pcpu_fc_free_fn_t free_fn,
2062 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2064 static struct vm_struct vm;
2065 struct pcpu_alloc_info *ai;
2069 struct page **pages;
2072 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2074 ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL);
2077 BUG_ON(ai->nr_groups != 1);
2078 BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
2080 unit_pages = ai->unit_size >> PAGE_SHIFT;
2082 /* unaligned allocations can't be freed, round up to page size */
2083 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2085 pages = alloc_bootmem(pages_size);
2087 /* allocate pages */
2089 for (unit = 0; unit < num_possible_cpus(); unit++)
2090 for (i = 0; i < unit_pages; i++) {
2091 unsigned int cpu = ai->groups[0].cpu_map[unit];
2094 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2096 pr_warning("PERCPU: failed to allocate %s page "
2097 "for cpu%u\n", psize_str, cpu);
2100 pages[j++] = virt_to_page(ptr);
2103 /* allocate vm area, map the pages and copy static data */
2104 vm.flags = VM_ALLOC;
2105 vm.size = num_possible_cpus() * ai->unit_size;
2106 vm_area_register_early(&vm, PAGE_SIZE);
2108 for (unit = 0; unit < num_possible_cpus(); unit++) {
2109 unsigned long unit_addr =
2110 (unsigned long)vm.addr + unit * ai->unit_size;
2112 for (i = 0; i < unit_pages; i++)
2113 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
2115 /* pte already populated, the following shouldn't fail */
2116 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2119 panic("failed to map percpu area, err=%d\n", rc);
2122 * FIXME: Archs with virtual cache should flush local
2123 * cache for the linear mapping here - something
2124 * equivalent to flush_cache_vmap() on the local cpu.
2125 * flush_cache_vmap() can't be used as most supporting
2126 * data structures are not set up yet.
2129 /* copy static data */
2130 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
2133 /* we're ready, commit */
2134 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
2135 unit_pages, psize_str, vm.addr, ai->static_size,
2136 ai->reserved_size, ai->dyn_size);
2138 rc = pcpu_setup_first_chunk(ai, vm.addr);
2143 free_fn(page_address(pages[j]), PAGE_SIZE);
2146 free_bootmem(__pa(pages), pages_size);
2147 pcpu_free_alloc_info(ai);
2150 #endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
2153 * Generic percpu area setup.
2155 * The embedding helper is used because its behavior closely resembles
2156 * the original non-dynamic generic percpu area setup. This is
2157 * important because many archs have addressing restrictions and might
2158 * fail if the percpu area is located far away from the previous
2159 * location. As an added bonus, in non-NUMA cases, embedding is
2160 * generally a good idea TLB-wise because percpu area can piggy back
2161 * on the physical linear memory mapping which uses large page
2162 * mappings on applicable archs.
2164 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
2165 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2166 EXPORT_SYMBOL(__per_cpu_offset);
2168 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2171 return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
2174 static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2176 free_bootmem(__pa(ptr), size);
2179 void __init setup_per_cpu_areas(void)
2181 unsigned long delta;
2186 * Always reserve area for module percpu variables. That's
2187 * what the legacy allocator did.
2189 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2190 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2191 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
2193 panic("Failed to initialized percpu areas.");
2195 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2196 for_each_possible_cpu(cpu)
2197 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2199 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */